From edb0ae3fae6618c1b69a1f4e8baa7a6774950139 Mon Sep 17 00:00:00 2001 From: Kirk Byers Date: Sun, 16 Dec 2018 20:50:34 -0800 Subject: [PATCH 1/3] Black formatting --- docs/conf.py | 146 +- docs/tutorials/sample_scripts/load_replace.py | 30 +- napalm/_SUPPORTED_DRIVERS.py | 10 +- napalm/__init__.py | 13 +- napalm/base/__init__.py | 24 +- napalm/base/base.py | 93 +- napalm/base/canonical_map.py | 200 +- napalm/base/clitools/cl_napalm.py | 203 +- napalm/base/clitools/cl_napalm_configure.py | 51 +- napalm/base/clitools/cl_napalm_test.py | 28 +- napalm/base/clitools/cl_napalm_validate.py | 25 +- napalm/base/clitools/helpers.py | 121 +- napalm/base/constants.py | 86 +- napalm/base/exceptions.py | 40 +- napalm/base/helpers.py | 104 +- napalm/base/mock.py | 19 +- napalm/base/netmiko_helpers.py | 4 +- napalm/base/test/base.py | 179 +- napalm/base/test/conftest.py | 30 +- napalm/base/test/double.py | 25 +- napalm/base/test/getters.py | 114 +- napalm/base/test/helpers.py | 23 +- napalm/base/test/models.py | 465 ++-- napalm/base/utils/jinja_filters.py | 23 +- napalm/base/utils/string_parsers.py | 69 +- napalm/base/validate.py | 38 +- napalm/eos/__init__.py | 2 +- napalm/eos/eos.py | 1680 +++++++------ napalm/ios/__init__.py | 2 +- napalm/ios/ios.py | 1809 ++++++++------ napalm/iosxr/__init__.py | 4 +- napalm/iosxr/iosxr.py | 2154 ++++++++++------- napalm/junos/constants.py | 14 +- napalm/junos/junos.py | 1723 +++++++------ napalm/junos/utils/junos_views.py | 2 +- napalm/nxos/__init__.py | 4 +- napalm/nxos/nxos.py | 1082 +++++---- napalm/nxos_ssh/__init__.py | 4 +- napalm/nxos_ssh/nxos_ssh.py | 606 +++-- setup.py | 42 +- test/base/test_get_network_driver.py | 2 +- test/base/test_helpers.py | 586 +++-- test/base/test_mock_driver.py | 57 +- test/base/test_napalm_test_framework.py | 2 + test/base/validate/test_unit.py | 503 ++-- test/base/validate/test_validate.py | 34 +- test/eos/TestEOSDriver.py | 18 +- test/eos/conftest.py | 16 +- test/eos/test_heredoc.py | 36 +- test/ios/TestIOSDriver.py | 74 +- test/ios/conftest.py | 14 +- test/iosxr/TestIOSXRDriver.py | 57 +- test/iosxr/conftest.py | 18 +- test/junos/TestJunOSDriver.py | 102 +- test/junos/conftest.py | 81 +- test/nxos/TestDriver.py | 11 +- test/nxos/conftest.py | 15 +- test/nxos/test_getters.py | 3 +- test/nxos_ssh/TestDriver.py | 11 +- test/nxos_ssh/conftest.py | 16 +- test/nxos_ssh/test_getters.py | 3 +- vagrant/provision.py | 44 +- 62 files changed, 7286 insertions(+), 5708 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index 7d4c33839..d703a2882 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -34,40 +34,37 @@ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' -autoclass_content = 'both' +autoclass_content = "both" # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. -extensions = [ - 'sphinx.ext.autodoc', - 'sphinx.ext.napoleon', -] +extensions = ["sphinx.ext.autodoc", "sphinx.ext.napoleon"] # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The suffix of source filenames. -source_suffix = '.rst' +source_suffix = ".rst" # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. -master_doc = 'index' +master_doc = "index" # General information about the project. -project = u'NAPALM' -copyright = u'2016, David Barroso' +project = u"NAPALM" +copyright = u"2016, David Barroso" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. -version = '0' +version = "0" # The full version, including alpha/beta/rc tags. -release = '1' +release = "1" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. @@ -81,7 +78,7 @@ # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. -exclude_patterns = ['_build', 'napalm_ansible_repo'] +exclude_patterns = ["_build", "napalm_ansible_repo"] # The reST default role (used for this markup: `text`) to use for all # documents. @@ -99,7 +96,7 @@ # show_authors = False # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] @@ -112,14 +109,15 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. -on_rtd = os.environ.get('READTHEDOCS', None) == 'True' +on_rtd = os.environ.get("READTHEDOCS", None) == "True" if not on_rtd: # only import and set the theme if we're building docs locally import sphinx_rtd_theme - html_theme = 'sphinx_rtd_theme' + + html_theme = "sphinx_rtd_theme" html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] else: - html_theme = 'default' + html_theme = "default" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the @@ -148,7 +146,7 @@ # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +html_static_path = ["_static"] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied @@ -197,7 +195,7 @@ # html_file_suffix = None # Output file base name for HTML help builder. -htmlhelp_basename = 'napalmdoc' +htmlhelp_basename = "napalmdoc" # -- Options for LaTeX output --------------------------------------------- @@ -205,10 +203,8 @@ latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. # 'preamble': '', } @@ -217,8 +213,7 @@ # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - ('index', 'napalm.tex', u'NAPALM Documentation', - u'David Barroso', 'manual'), + ("index", "napalm.tex", u"NAPALM Documentation", u"David Barroso", "manual") ] # The name of an image file (relative to this directory) to place at the top of @@ -246,10 +241,7 @@ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [ - ('index', 'napalm', u'NAPALM Documentation', - [u'David Barroso'], 1) -] +man_pages = [("index", "napalm", u"NAPALM Documentation", [u"David Barroso"], 1)] # If true, show URL addresses after external links. # man_show_urls = False @@ -261,9 +253,15 @@ # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - ('index', 'napalm', u'NAPALM Documentation', - u'David Barroso', 'napalm', 'One line description of project.', - 'Miscellaneous'), + ( + "index", + "napalm", + u"NAPALM Documentation", + u"David Barroso", + "napalm", + "One line description of project.", + "Miscellaneous", + ) ] # Documents to append as an appendix to all manuals. @@ -282,26 +280,35 @@ enable_pdf_build = False enable_epub_build = False -EXCLUDE_METHODS = ('cli', 'close', 'commit_config', 'compare_config', - 'discard_config', 'load_merge_candidate', - 'load_replace_candidate', 'load_template', 'open', - 'rollback', 'compliance_report', 'connection_tests', - 'post_connection_tests', 'pre_connection_tests') +EXCLUDE_METHODS = ( + "cli", + "close", + "commit_config", + "compare_config", + "discard_config", + "load_merge_candidate", + "load_replace_candidate", + "load_template", + "open", + "rollback", + "compliance_report", + "connection_tests", + "post_connection_tests", + "pre_connection_tests", +) -EXCLUDE_IN_REPORT = ('test_method_signatures') +EXCLUDE_IN_REPORT = "test_method_signatures" -METHOD_ALIASES = { - 'get_config_filtered': 'get_config', -} +METHOD_ALIASES = {"get_config_filtered": "get_config"} def _merge_results(last, intermediate): - if intermediate == 'failed': - return 'failed' - elif intermediate == 'skipped': - return 'failed' if last == 'failed' else 'skipped' - elif intermediate == 'passed': - return 'ok' if last == 'ok' else last + if intermediate == "failed": + return "failed" + elif intermediate == "skipped": + return "failed" if last == "failed" else "skipped" + elif intermediate == "passed": + return "ok" if last == "ok" else last else: return last @@ -310,7 +317,9 @@ def build_napalm_ansible_module_docs(app): """Create documentation for Ansible modules.""" # Add script to clone napalm-ansible repo - status = subprocess.call("./build-ansible-module-docs.sh", stdout=sys.stdout, stderr=sys.stderr) + status = subprocess.call( + "./build-ansible-module-docs.sh", stdout=sys.stdout, stderr=sys.stderr + ) if status != 0: print("Something bad happened when processing the Ansible modules.") @@ -318,15 +327,15 @@ def build_napalm_ansible_module_docs(app): env = Environment(loader=FileSystemLoader(".")) - modules_dir = './integrations/ansible/modules/source' - module_files = glob('{0}/*.json'.format(modules_dir)) + modules_dir = "./integrations/ansible/modules/source" + module_files = glob("{0}/*.json".format(modules_dir)) for module_file in module_files: - with open(module_file, 'r') as f: - module = module_file.split('/')[-1].split('.')[0] + with open(module_file, "r") as f: + module = module_file.split("/")[-1].split(".")[0] data = json.loads(f.read()) - data['name'] = module + data["name"] = module - module_dir = './integrations/ansible/modules/{0}'.format(module) + module_dir = "./integrations/ansible/modules/{0}".format(module) try: os.stat(module_dir) @@ -336,7 +345,7 @@ def build_napalm_ansible_module_docs(app): template_file = env.get_template("ansible-module.j2") rendered_template = template_file.render(**data) - with open('{0}/index.rst'.format(module_dir), 'w') as f: + with open("{0}/index.rst".format(module_dir), "w") as f: f.write(rendered_template) @@ -349,26 +358,28 @@ def build_getters_support_matrix(app): sys.exit(-1) drivers = set() - matrix = {m: defaultdict(dict) for m in dir(NetworkDriver) - if not (m.startswith('_') or - m in EXCLUDE_METHODS)} + matrix = { + m: defaultdict(dict) + for m in dir(NetworkDriver) + if not (m.startswith("_") or m in EXCLUDE_METHODS) + } regex_name = re.compile(r"(?P\w+)\/.*::test_(?P\w+)") - filename = './support/tests/report.json' - with open(filename, 'r') as f: + filename = "./support/tests/report.json" + with open(filename, "r") as f: data = json.loads(f.read()) for test in data["report"]["tests"]: - match = regex_name.search(test['name']) + match = regex_name.search(test["name"]) if match: - driver = match.group('driver') + driver = match.group("driver") drivers.add(driver) - method = match.group('getter') + method = match.group("getter") else: continue if method in EXCLUDE_IN_REPORT: continue - result = test['outcome'] + result = test["outcome"] if method in METHOD_ALIASES.keys(): method = METHOD_ALIASES[method] @@ -380,17 +391,18 @@ def build_getters_support_matrix(app): drivers = sorted(drivers) env = Environment(loader=FileSystemLoader(".")) template_file = env.get_template("matrix.j2") - rendered_template = template_file.render(matrix=matrix, drivers=drivers, - sorted_methods=sorted_methods) + rendered_template = template_file.render( + matrix=matrix, drivers=drivers, sorted_methods=sorted_methods + ) - with open('support/matrix.rst', 'w') as f: + with open("support/matrix.rst", "w") as f: f.write(rendered_template) def setup(app): """Map methods to states of the documentation build.""" - app.connect('builder-inited', build_getters_support_matrix) - app.connect('builder-inited', build_napalm_ansible_module_docs) + app.connect("builder-inited", build_getters_support_matrix) + app.connect("builder-inited", build_napalm_ansible_module_docs) build_getters_support_matrix(None) diff --git a/docs/tutorials/sample_scripts/load_replace.py b/docs/tutorials/sample_scripts/load_replace.py index 9de22b631..856bfafa7 100644 --- a/docs/tutorials/sample_scripts/load_replace.py +++ b/docs/tutorials/sample_scripts/load_replace.py @@ -15,27 +15,31 @@ def main(config_file): """Load a config for the device.""" if not (os.path.exists(config_file) and os.path.isfile(config_file)): - msg = 'Missing or invalid config file {0}'.format(config_file) + msg = "Missing or invalid config file {0}".format(config_file) raise ValueError(msg) - print('Loading config file {0}.'.format(config_file)) + print("Loading config file {0}.".format(config_file)) # Use the appropriate network driver to connect to the device: - driver = napalm.get_network_driver('eos') + driver = napalm.get_network_driver("eos") # Connect: - device = driver(hostname='127.0.0.1', username='vagrant', - password='vagrant', optional_args={'port': 12443}) + device = driver( + hostname="127.0.0.1", + username="vagrant", + password="vagrant", + optional_args={"port": 12443}, + ) - print('Opening ...') + print("Opening ...") device.open() - print('Loading replacement candidate ...') + print("Loading replacement candidate ...") device.load_replace_candidate(filename=config_file) # Note that the changes have not been applied yet. Before applying # the configuration you can check the changes: - print('\nDiff:') + print("\nDiff:") print(device.compare_config()) # You can commit or discard the candidate changes. @@ -43,19 +47,19 @@ def main(config_file): choice = raw_input("\nWould you like to commit these changes? [yN]: ") except NameError: choice = input("\nWould you like to commit these changes? [yN]: ") - if choice == 'y': - print('Committing ...') + if choice == "y": + print("Committing ...") device.commit_config() else: - print('Discarding ...') + print("Discarding ...") device.discard_config() # close the session with the device. device.close() - print('Done.') + print("Done.") -if __name__ == '__main__': +if __name__ == "__main__": if len(sys.argv) < 2: print('Please supply the full path to "new_good.conf"') sys.exit(1) diff --git a/napalm/_SUPPORTED_DRIVERS.py b/napalm/_SUPPORTED_DRIVERS.py index 6e1cef508..bd0937da8 100644 --- a/napalm/_SUPPORTED_DRIVERS.py +++ b/napalm/_SUPPORTED_DRIVERS.py @@ -1,9 +1 @@ -SUPPORTED_DRIVERS = [ - "base", - "eos", - "ios", - "iosxr", - "junos", - "nxos", - "nxos_ssh", -] +SUPPORTED_DRIVERS = ["base", "eos", "ios", "iosxr", "junos", "nxos", "nxos_ssh"] diff --git a/napalm/__init__.py b/napalm/__init__.py index 9036e13d0..22d46d32d 100644 --- a/napalm/__init__.py +++ b/napalm/__init__.py @@ -5,15 +5,16 @@ # Verify Python Version that is running try: - if not(sys.version_info.major == 2 and sys.version_info.minor == 7) and \ - not(sys.version_info.major == 3): - raise RuntimeError('NAPALM requires Python 2.7 or Python3') + if not (sys.version_info.major == 2 and sys.version_info.minor == 7) and not ( + sys.version_info.major == 3 + ): + raise RuntimeError("NAPALM requires Python 2.7 or Python3") except AttributeError: - raise RuntimeError('NAPALM requires Python 2.7 or Python3') + raise RuntimeError("NAPALM requires Python 2.7 or Python3") try: - __version__ = pkg_resources.get_distribution('napalm').version + __version__ = pkg_resources.get_distribution("napalm").version except pkg_resources.DistributionNotFound: __version__ = "Not installed" -__all__ = ('get_network_driver', 'SUPPORTED_DRIVERS') +__all__ = ("get_network_driver", "SUPPORTED_DRIVERS") diff --git a/napalm/base/__init__.py b/napalm/base/__init__.py index cad8d08a1..6133cf4a8 100644 --- a/napalm/base/__init__.py +++ b/napalm/base/__init__.py @@ -30,8 +30,8 @@ from napalm.base.utils import py23_compat __all__ = [ - 'get_network_driver', # export the function - 'NetworkDriver' # also export the base class + "get_network_driver", # export the function + "NetworkDriver", # also export the base class ] @@ -71,19 +71,23 @@ def get_network_driver(name, prepend=True): return MockDriver if not (isinstance(name, py23_compat.string_types) and len(name) > 0): - raise ModuleImportError('Please provide a valid driver name.') + raise ModuleImportError("Please provide a valid driver name.") # Only lowercase allowed name = name.lower() # Try to not raise error when users requests IOS-XR for e.g. - module_install_name = name.replace('-', '') + module_install_name = name.replace("-", "") community_install_name = "napalm_{name}".format(name=module_install_name) custom_install_name = "custom_napalm.{name}".format(name=module_install_name) # Can also request using napalm_[SOMETHING] - if 'napalm' not in module_install_name and prepend is True: - module_install_name = 'napalm.{name}'.format(name=module_install_name) + if "napalm" not in module_install_name and prepend is True: + module_install_name = "napalm.{name}".format(name=module_install_name) # Order is custom_napalm_os (local only) -> napalm.os (core) -> napalm_os (community) - for module_name in [custom_install_name, module_install_name, community_install_name]: + for module_name in [ + custom_install_name, + module_install_name, + community_install_name, + ]: try: module = importlib.import_module(module_name) break @@ -108,5 +112,7 @@ def get_network_driver(name, prepend=True): # looks like you don't have any Driver class in your module... raise ModuleImportError( - 'No class inheriting "napalm.base.base.NetworkDriver" found in "{install_name}".' - .format(install_name=module_install_name)) + 'No class inheriting "napalm.base.base.NetworkDriver" found in "{install_name}".'.format( + install_name=module_install_name + ) + ) diff --git a/napalm/base/base.py b/napalm/base/base.py index c9e4e7102..7e8397fe5 100644 --- a/napalm/base/base.py +++ b/napalm/base/base.py @@ -50,11 +50,14 @@ def __enter__(self): def __exit__(self, exc_type, exc_value, exc_traceback): self.close() if exc_type is not None and ( - exc_type.__name__ not in dir(napalm.base.exceptions) and - exc_type.__name__ not in __builtins__.keys()): - epilog = ("NAPALM didn't catch this exception. Please, fill a bugfix on " - "https://github.com/napalm-automation/napalm/issues\n" - "Don't forget to include this traceback.") + exc_type.__name__ not in dir(napalm.base.exceptions) + and exc_type.__name__ not in __builtins__.keys() + ): + epilog = ( + "NAPALM didn't catch this exception. Please, fill a bugfix on " + "https://github.com/napalm-automation/napalm/issues\n" + "Don't forget to include this traceback." + ) print(epilog) return False @@ -75,14 +78,16 @@ def _netmiko_open(self, device_type, netmiko_optional_args=None): if netmiko_optional_args is None: netmiko_optional_args = {} try: - self._netmiko_device = ConnectHandler(device_type=device_type, - host=self.hostname, - username=self.username, - password=self.password, - timeout=self.timeout, - **netmiko_optional_args) + self._netmiko_device = ConnectHandler( + device_type=device_type, + host=self.hostname, + username=self.username, + password=self.password, + timeout=self.timeout, + **netmiko_optional_args + ) except NetMikoTimeoutException: - raise ConnectionException('Cannot connect to {}'.format(self.hostname)) + raise ConnectionException("Cannot connect to {}".format(self.hostname)) # ensure in enable mode self._netmiko_device.enable() @@ -140,8 +145,9 @@ def post_connection_tests(self): """ raise NotImplementedError - def load_template(self, template_name, template_source=None, - template_path=None, **template_vars): + def load_template( + self, template_name, template_source=None, template_path=None, **template_vars + ): """ Will load a templated configuration on the device. @@ -157,11 +163,13 @@ def load_template(self, template_name, template_source=None, source does not have the right format, either the arguments in `template_vars` are not \ properly specified. """ - return napalm.base.helpers.load_template(self, - template_name, - template_source=template_source, - template_path=template_path, - **template_vars) + return napalm.base.helpers.load_template( + self, + template_name, + template_source=template_source, + template_path=template_path, + **template_vars + ) def load_replace_candidate(self, filename=None, config=None): """ @@ -497,7 +505,7 @@ def get_interfaces_counters(self): """ raise NotImplementedError - def get_lldp_neighbors_detail(self, interface=''): + def get_lldp_neighbors_detail(self, interface=""): """ Returns a detailed view of the LLDP neighbors as a dictionary containing lists of dictionaries for each interface. @@ -534,7 +542,7 @@ def get_lldp_neighbors_detail(self, interface=''): """ raise NotImplementedError - def get_bgp_config(self, group='', neighbor=''): + def get_bgp_config(self, group="", neighbor=""): """ Returns a dictionary containing the BGP configuration. Can return either the whole config, either the config only for a group or neighbor. @@ -661,7 +669,7 @@ def cli(self, commands): """ raise NotImplementedError - def get_bgp_neighbors_detail(self, neighbor_address=''): + def get_bgp_neighbors_detail(self, neighbor_address=""): """ Returns a detailed view of the BGP neighbors as a dictionary of lists. @@ -967,7 +975,7 @@ def get_mac_address_table(self): """ raise NotImplementedError - def get_route_to(self, destination='', protocol=''): + def get_route_to(self, destination="", protocol=""): """ Returns a dictionary of dictionaries containing details of all available routes to a @@ -1190,8 +1198,16 @@ def get_probes_results(self): """ raise NotImplementedError - def ping(self, destination, source=c.PING_SOURCE, ttl=c.PING_TTL, timeout=c.PING_TIMEOUT, - size=c.PING_SIZE, count=c.PING_COUNT, vrf=c.PING_VRF): + def ping( + self, + destination, + source=c.PING_SOURCE, + ttl=c.PING_TTL, + timeout=c.PING_TIMEOUT, + size=c.PING_SIZE, + count=c.PING_COUNT, + vrf=c.PING_VRF, + ): """ Executes ping on the device and returns a dictionary with the result @@ -1254,12 +1270,14 @@ def ping(self, destination, source=c.PING_SOURCE, ttl=c.PING_TTL, timeout=c.PING """ raise NotImplementedError - def traceroute(self, - destination, - source=c.TRACEROUTE_SOURCE, - ttl=c.TRACEROUTE_TTL, - timeout=c.TRACEROUTE_TIMEOUT, - vrf=c.TRACEROUTE_VRF): + def traceroute( + self, + destination, + source=c.TRACEROUTE_SOURCE, + ttl=c.TRACEROUTE_TTL, + timeout=c.TRACEROUTE_TIMEOUT, + vrf=c.TRACEROUTE_VRF, + ): """ Executes traceroute on the device and returns a dictionary with the result. @@ -1462,7 +1480,7 @@ def get_optics(self): """ raise NotImplementedError - def get_config(self, retrieve='all'): + def get_config(self, retrieve="all"): """ Return the configuration of a device. @@ -1483,7 +1501,7 @@ def get_config(self, retrieve='all'): """ raise NotImplementedError - def get_network_instances(self, name=''): + def get_network_instances(self, name=""): """ Return a dictionary of network instances (VRFs) configured, including default/global @@ -1621,12 +1639,15 @@ def compliance_report(self, validation_file=None, validation_source=None): :raise ValidationException: File is not valid. :raise NotImplementedError: Method not implemented. """ - return validate.compliance_report(self, validation_file=validation_file, - validation_source=validation_source) + return validate.compliance_report( + self, validation_file=validation_file, validation_source=validation_source + ) def _canonical_int(self, interface): """Expose the helper function within this class.""" if self.use_canonical_interface is True: - return napalm.base.helpers.canonical_interface_name(interface, addl_name_map=None) + return napalm.base.helpers.canonical_interface_name( + interface, addl_name_map=None + ) else: return interface diff --git a/napalm/base/canonical_map.py b/napalm/base/canonical_map.py index ea529740c..3a9697ccc 100644 --- a/napalm/base/canonical_map.py +++ b/napalm/base/canonical_map.py @@ -3,107 +3,107 @@ base_interfaces = { - "ATM": "ATM", - "AT": "ATM", - "B": "Bdi", - "Bd": "Bdi", - "Bdi": "Bdi", - "EOBC": "EOBC", - "EO": "EOBC", - "Ethernet": "Ethernet", - "Eth": "Ethernet", - "Et": "Ethernet", - "FastEthernet": "FastEthernet", - "FastEth": "FastEthernet", - "FastE": "FastEthernet", - "Fast": "FastEthernet", - "Fas": "FastEthernet", - "FE": "FastEthernet", - "Fa": "FastEthernet", - "Fddi": "Fddi", - "FD": "Fddi", - "FortyGigabitEthernet": "FortyGigabitEthernet", - "FortyGigEthernet": "FortyGigabitEthernet", - "FortyGigEth": "FortyGigabitEthernet", - "FortyGigE": "FortyGigabitEthernet", - "FortyGig": "FortyGigabitEthernet", - "FGE": "FortyGigabitEthernet", - "FO": "FortyGigabitEthernet", - "Fo": "FortyGigabitEthernet", - "GigabitEthernet": "GigabitEthernet", - "GigEthernet": "GigabitEthernet", - "GigEth": "GigabitEthernet", - "GigE": "GigabitEthernet", - "Gig": "GigabitEthernet", - "GE": "GigabitEthernet", - "Gi": "GigabitEthernet", - "HundredGigabitEthernet": "HundredGigabitEthernet", - "HundredGigEthernet": "HundredGigabitEthernet", - "HundredGigEth": "HundredGigabitEthernet", - "HundredGigE": "HundredGigabitEthernet", - "HundredGig": "HundredGigabitEthernet", - "Hu": "HundredGigabitEthernet", - "Loopback": "Loopback", - "Lo": "Loopback", - "lo": "Loopback", - "Management": "Management", - "Mgmt": "Management", - "Ma": "Management", - "Management_short": "Ma", - "MFR": "MFR", - "Multilink": "Multilink", - "Mu": "Multilink", - "n": "nve", - "nv": "nve", - "nve": "nve", - "PortChannel": "Port-channel", - "Port-channel": "Port-channel", - "Port-Channel": "Port-channel", - "Po": "Port-channel", - "POS": "POS", - "PO": "POS", - "Serial": "Serial", - "Se": "Serial", - "S": "Serial", - "TenGigabitEthernet": "TenGigabitEthernet", - "TenGigEthernet": "TenGigabitEthernet", - "TenGigEth": "TenGigabitEthernet", - "TenGig": "TenGigabitEthernet", - "TeGig": "TenGigabitEthernet", - "Ten": "TenGigabitEthernet", - "T": "TenGigabitEthernet", - "Te": "TenGigabitEthernet", - "Tunnel": "Tunnel", - "Tun": "Tunnel", - "Tu": "Tunnel", - "Virtual-Access": "Virtual-Access", - "Vi": "Virtual-Access", - "Virtual-Template": "Virtual-Template", - "Vt": "Virtual-Template", - "VLAN": "VLAN", - "V": "VLAN", - "Vl": "VLAN" + "ATM": "ATM", + "AT": "ATM", + "B": "Bdi", + "Bd": "Bdi", + "Bdi": "Bdi", + "EOBC": "EOBC", + "EO": "EOBC", + "Ethernet": "Ethernet", + "Eth": "Ethernet", + "Et": "Ethernet", + "FastEthernet": "FastEthernet", + "FastEth": "FastEthernet", + "FastE": "FastEthernet", + "Fast": "FastEthernet", + "Fas": "FastEthernet", + "FE": "FastEthernet", + "Fa": "FastEthernet", + "Fddi": "Fddi", + "FD": "Fddi", + "FortyGigabitEthernet": "FortyGigabitEthernet", + "FortyGigEthernet": "FortyGigabitEthernet", + "FortyGigEth": "FortyGigabitEthernet", + "FortyGigE": "FortyGigabitEthernet", + "FortyGig": "FortyGigabitEthernet", + "FGE": "FortyGigabitEthernet", + "FO": "FortyGigabitEthernet", + "Fo": "FortyGigabitEthernet", + "GigabitEthernet": "GigabitEthernet", + "GigEthernet": "GigabitEthernet", + "GigEth": "GigabitEthernet", + "GigE": "GigabitEthernet", + "Gig": "GigabitEthernet", + "GE": "GigabitEthernet", + "Gi": "GigabitEthernet", + "HundredGigabitEthernet": "HundredGigabitEthernet", + "HundredGigEthernet": "HundredGigabitEthernet", + "HundredGigEth": "HundredGigabitEthernet", + "HundredGigE": "HundredGigabitEthernet", + "HundredGig": "HundredGigabitEthernet", + "Hu": "HundredGigabitEthernet", + "Loopback": "Loopback", + "Lo": "Loopback", + "lo": "Loopback", + "Management": "Management", + "Mgmt": "Management", + "Ma": "Management", + "Management_short": "Ma", + "MFR": "MFR", + "Multilink": "Multilink", + "Mu": "Multilink", + "n": "nve", + "nv": "nve", + "nve": "nve", + "PortChannel": "Port-channel", + "Port-channel": "Port-channel", + "Port-Channel": "Port-channel", + "Po": "Port-channel", + "POS": "POS", + "PO": "POS", + "Serial": "Serial", + "Se": "Serial", + "S": "Serial", + "TenGigabitEthernet": "TenGigabitEthernet", + "TenGigEthernet": "TenGigabitEthernet", + "TenGigEth": "TenGigabitEthernet", + "TenGig": "TenGigabitEthernet", + "TeGig": "TenGigabitEthernet", + "Ten": "TenGigabitEthernet", + "T": "TenGigabitEthernet", + "Te": "TenGigabitEthernet", + "Tunnel": "Tunnel", + "Tun": "Tunnel", + "Tu": "Tunnel", + "Virtual-Access": "Virtual-Access", + "Vi": "Virtual-Access", + "Virtual-Template": "Virtual-Template", + "Vt": "Virtual-Template", + "VLAN": "VLAN", + "V": "VLAN", + "Vl": "VLAN", } reverse_mapping = { - "ATM": "At", - "EOBC": "EO", - "Ethernet": "Et", - "FastEthernet": "Fa", - "Fddi": "FD", - "FortyGigabitEthernet": "Fo", - "GigabitEthernet": "Gi", - "HundredGigabitEthernet": "Hu", - "Loopback": "Lo", - "Management": "Ma", - "MFR": "MFR", - "Multilink": "Mu", - "Port-channel": "Po", - "POS": "PO", - "Serial": "Se", - "TenGigabitEthernet": "Te", - "Tunnel": "Tu", - "Virtual-Access": "Vi", - "Virtual-Template": "Vt", - "VLAN": "Vl" + "ATM": "At", + "EOBC": "EO", + "Ethernet": "Et", + "FastEthernet": "Fa", + "Fddi": "FD", + "FortyGigabitEthernet": "Fo", + "GigabitEthernet": "Gi", + "HundredGigabitEthernet": "Hu", + "Loopback": "Lo", + "Management": "Ma", + "MFR": "MFR", + "Multilink": "Mu", + "Port-channel": "Po", + "POS": "PO", + "Serial": "Se", + "TenGigabitEthernet": "Te", + "Tunnel": "Tu", + "Virtual-Access": "Vi", + "Virtual-Template": "Vt", + "VLAN": "Vl", } diff --git a/napalm/base/clitools/cl_napalm.py b/napalm/base/clitools/cl_napalm.py index 69a835f6c..df68a582b 100755 --- a/napalm/base/clitools/cl_napalm.py +++ b/napalm/base/clitools/cl_napalm.py @@ -19,132 +19,149 @@ def debugging(name): def real_decorator(func): @wraps(func) def wrapper(*args, **kwargs): - censor_parameters = ["password"] - censored_kwargs = {k: v if k not in censor_parameters else "*******" - for k, v in kwargs.items()} - logger.debug("{} - Calling with args: {}, {}".format(name, args, censored_kwargs)) - try: - r = func(*args, **kwargs) - logger.debug("{} - Successful".format(name)) - return r - except NotImplementedError: - if name not in ["pre_connection_tests", "connection_tests", - "post_connection_tests"]: - logger.debug("{} - Not implemented".format(name)) - except Exception as e: - logger.error("{} - Failed: {}".format(name, e)) - print("\n================= Traceback =================\n") - raise + censor_parameters = ["password"] + censored_kwargs = { + k: v if k not in censor_parameters else "*******" + for k, v in kwargs.items() + } + logger.debug( + "{} - Calling with args: {}, {}".format(name, args, censored_kwargs) + ) + try: + r = func(*args, **kwargs) + logger.debug("{} - Successful".format(name)) + return r + except NotImplementedError: + if name not in [ + "pre_connection_tests", + "connection_tests", + "post_connection_tests", + ]: + logger.debug("{} - Not implemented".format(name)) + except Exception as e: + logger.error("{} - Failed: {}".format(name, e)) + print("\n================= Traceback =================\n") + raise + return wrapper + return real_decorator -logger = logging.getLogger('napalm') +logger = logging.getLogger("napalm") def build_help(): parser = argparse.ArgumentParser( - description='Command line tool to handle configuration on devices using NAPALM.' - 'The script will print the diff on the screen', - epilog='Automate all the things!!!' + description="Command line tool to handle configuration on devices using NAPALM." + "The script will print the diff on the screen", + epilog="Automate all the things!!!", ) parser.add_argument( - dest='hostname', - action='store', - help='Host where you want to deploy the configuration.' + dest="hostname", + action="store", + help="Host where you want to deploy the configuration.", ) parser.add_argument( - '--user', '-u', - dest='user', - action='store', + "--user", + "-u", + dest="user", + action="store", default=getpass.getuser(), - help='User for authenticating to the host. Default: user running the script.' + help="User for authenticating to the host. Default: user running the script.", ) parser.add_argument( - '--password', '-p', - dest='password', - action='store', - help='Password for authenticating to the host.' - 'If you do not provide a password in the CLI you will be prompted.', + "--password", + "-p", + dest="password", + action="store", + help="Password for authenticating to the host." + "If you do not provide a password in the CLI you will be prompted.", ) parser.add_argument( - '--vendor', '-v', - dest='vendor', - action='store', + "--vendor", + "-v", + dest="vendor", + action="store", required=True, - help='Host Operating System.' + help="Host Operating System.", ) parser.add_argument( - '--optional_args', '-o', - dest='optional_args', - action='store', - help='String with comma separated key=value pairs passed via optional_args to the driver.', + "--optional_args", + "-o", + dest="optional_args", + action="store", + help="String with comma separated key=value pairs passed via optional_args to the driver.", ) parser.add_argument( - '--debug', - dest='debug', - action='store_true', - help='Enables debug mode; more verbosity.' + "--debug", + dest="debug", + action="store_true", + help="Enables debug mode; more verbosity.", ) - subparser = parser.add_subparsers(title='actions') + subparser = parser.add_subparsers(title="actions") - config = subparser.add_parser('configure', help='Perform a configuration operation') - config.set_defaults(which='config') + config = subparser.add_parser("configure", help="Perform a configuration operation") + config.set_defaults(which="config") config.add_argument( - dest='config_file', - action='store', - help='File containing the configuration you want to deploy.' + dest="config_file", + action="store", + help="File containing the configuration you want to deploy.", ) config.add_argument( - '--strategy', '-s', - dest='strategy', - action='store', - choices=['replace', 'merge'], - default='replace', - help='Strategy to use to deploy configuration. Default: replace.' + "--strategy", + "-s", + dest="strategy", + action="store", + choices=["replace", "merge"], + default="replace", + help="Strategy to use to deploy configuration. Default: replace.", ) config.add_argument( - '--dry-run', '-d', - dest='dry_run', - action='store_true', + "--dry-run", + "-d", + dest="dry_run", + action="store_true", default=None, - help='Only returns diff, it does not deploy the configuration.', + help="Only returns diff, it does not deploy the configuration.", ) - call = subparser.add_parser('call', help='Call a napalm method') - call.set_defaults(which='call') - call.add_argument( - dest='method', - action='store', - help='Run this method' - ) + call = subparser.add_parser("call", help="Call a napalm method") + call.set_defaults(which="call") + call.add_argument(dest="method", action="store", help="Run this method") call.add_argument( - '--method-kwargs', '-k', - dest='method_kwargs', - action='store', - help='kwargs to pass to the method. For example: "destination=1.1.1.1,protocol=bgp"' + "--method-kwargs", + "-k", + dest="method_kwargs", + action="store", + help='kwargs to pass to the method. For example: "destination=1.1.1.1,protocol=bgp"', ) - validate = subparser.add_parser('validate', help='Validate configuration/state') - validate.set_defaults(which='validate') + validate = subparser.add_parser("validate", help="Validate configuration/state") + validate.set_defaults(which="validate") validate.add_argument( - dest='validation_file', - action='store', - help='Validation file containing resources derised states' + dest="validation_file", + action="store", + help="Validation file containing resources derised states", ) args = parser.parse_args() if args.password is None: - password = getpass.getpass('Enter password: ') - setattr(args, 'password', password) + password = getpass.getpass("Enter password: ") + setattr(args, "password", password) return args def check_installed_packages(): logger.debug("Gathering napalm packages") - napalm_packages = sorted(["{}=={}".format(i.key, i.version) - for i in pkg_resources.working_set if i.key.startswith("napalm")]) + napalm_packages = sorted( + [ + "{}=={}".format(i.key, i.version) + for i in pkg_resources.working_set + if i.key.startswith("napalm") + ] + ) for n in napalm_packages: logger.debug(n) @@ -215,9 +232,9 @@ def call_commit_config(device, *args, **kwargs): def configuration_change(device, config_file, strategy, dry_run): - if strategy == 'replace': + if strategy == "replace": strategy_method = call_load_replace_candidate - elif strategy == 'merge': + elif strategy == "merge": strategy_method = call_load_merge_candidate strategy_method(device, filename=config_file) @@ -233,7 +250,9 @@ def configuration_change(device, config_file, strategy, dry_run): def call_getter(device, method, **kwargs): logger.debug("{} - Attempting to resolve method".format(method)) func = getattr(device, method) - logger.debug("{} - Attempting to call method with kwargs: {}".format(method, kwargs)) + logger.debug( + "{} - Attempting to call method with kwargs: {}".format(method, kwargs) + ) r = func(**kwargs) logger.debug("{} - Response".format(method)) print(json.dumps(r, indent=4)) @@ -250,8 +269,14 @@ def run_tests(args): driver = call_get_network_driver(args.vendor) optional_args = helpers.parse_optional_args(args.optional_args) - device = call_instantiating_object(driver, args.hostname, args.user, password=args.password, - timeout=60, optional_args=optional_args) + device = call_instantiating_object( + driver, + args.hostname, + args.user, + password=args.password, + timeout=60, + optional_args=optional_args, + ) if args.debug: call_pre_connection(device) @@ -262,12 +287,12 @@ def run_tests(args): call_connection(device) call_facts(device) - if args.which == 'call': + if args.which == "call": method_kwargs = helpers.parse_optional_args(args.method_kwargs) call_getter(device, args.method, **method_kwargs) - elif args.which == 'config': + elif args.which == "config": configuration_change(device, args.config_file, args.strategy, args.dry_run) - elif args.which == 'validate': + elif args.which == "validate": call_compliance_report(device, args.validation_file) call_close(device) @@ -284,5 +309,5 @@ def main(): run_tests(args) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/napalm/base/clitools/cl_napalm_configure.py b/napalm/base/clitools/cl_napalm_configure.py index 1fa586886..476ef03e9 100644 --- a/napalm/base/clitools/cl_napalm_configure.py +++ b/napalm/base/clitools/cl_napalm_configure.py @@ -1,10 +1,10 @@ # -*- coding: utf-8 -*- -''' +""" NAPALM CLI Tools: configure =========================== Deploy device config from the shell. -''' +""" # Python3 support from __future__ import print_function @@ -20,36 +20,45 @@ import sys import logging -logger = logging.getLogger('cl-napalm-config.py') +logger = logging.getLogger("cl-napalm-config.py") -def run(vendor, hostname, user, password, strategy, optional_args, config_file, dry_run): +def run( + vendor, hostname, user, password, strategy, optional_args, config_file, dry_run +): logger.debug('Getting driver for OS "{driver}"'.format(driver=vendor)) driver = get_network_driver(vendor) optional_args = parse_optional_args(optional_args) - logger.debug('Connecting to device "{}" with user "{}" and optional_args={}'.format( - hostname, user, optional_args)) + logger.debug( + 'Connecting to device "{}" with user "{}" and optional_args={}'.format( + hostname, user, optional_args + ) + ) with driver(hostname, user, password, optional_args=optional_args) as device: - logger.debug('Strategy for loading configuration is "{strategy}"'.format(strategy=strategy)) - if strategy == 'replace': + logger.debug( + 'Strategy for loading configuration is "{strategy}"'.format( + strategy=strategy + ) + ) + if strategy == "replace": strategy_method = device.load_replace_candidate - elif strategy == 'merge': + elif strategy == "merge": strategy_method = device.load_merge_candidate logger.debug('Loading configuration file "{config}"'.format(config=config_file)) strategy_method(filename=config_file) - logger.debug('Comparing configuration') + logger.debug("Comparing configuration") diff = device.compare_config() if dry_run: - logger.debug('Dry-run. Discarding configuration.') + logger.debug("Dry-run. Discarding configuration.") else: - logger.debug('Committing configuration') + logger.debug("Committing configuration") device.commit_config() - logger.debug('Closing session') + logger.debug("Closing session") return diff @@ -58,10 +67,20 @@ def main(): args = build_help(configure=True) configure_logging(logger, args.debug) - print(run(args.vendor, args.hostname, args.user, args.password, args.strategy, - args.optional_args, args.config_file, args.dry_run)) + print( + run( + args.vendor, + args.hostname, + args.user, + args.password, + args.strategy, + args.optional_args, + args.config_file, + args.dry_run, + ) + ) sys.exit(0) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/napalm/base/clitools/cl_napalm_test.py b/napalm/base/clitools/cl_napalm_test.py index 17ac5bd70..89376cb2d 100644 --- a/napalm/base/clitools/cl_napalm_test.py +++ b/napalm/base/clitools/cl_napalm_test.py @@ -1,10 +1,10 @@ # -*- coding: utf-8 -*- -''' +""" NAPALM CLI Tools: test connectivity =================================== Module to test connectivity with the network device through NAPALM. -''' +""" from __future__ import absolute_import from __future__ import print_function from __future__ import unicode_literals @@ -20,7 +20,7 @@ import sys import logging -logger = logging.getLogger('cl_napalm_test.py') +logger = logging.getLogger("cl_napalm_test.py") def main(): @@ -32,17 +32,19 @@ def main(): driver = get_network_driver(args.vendor) optional_args = parse_optional_args(args.optional_args) - logger.debug('Connecting to device "{}" with user "{}" and optional_args={}'.format( - args.hostname, args.user, optional_args)) - - with driver(args.hostname, - args.user, - args.password, - optional_args=optional_args) as device: - logger.debug('Successfully connected to the device: {}'.format(device.hostname)) - print('Successfully connected to the device') + logger.debug( + 'Connecting to device "{}" with user "{}" and optional_args={}'.format( + args.hostname, args.user, optional_args + ) + ) + + with driver( + args.hostname, args.user, args.password, optional_args=optional_args + ) as device: + logger.debug("Successfully connected to the device: {}".format(device.hostname)) + print("Successfully connected to the device") sys.exit(0) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/napalm/base/clitools/cl_napalm_validate.py b/napalm/base/clitools/cl_napalm_validate.py index c584b5bb5..6137ecf29 100755 --- a/napalm/base/clitools/cl_napalm_validate.py +++ b/napalm/base/clitools/cl_napalm_validate.py @@ -1,10 +1,10 @@ # -*- coding: utf-8 -*- -''' +""" NAPALM CLI Tools: validate =========================== Validating deployments from the shell. -''' +""" # Python3 support from __future__ import print_function @@ -22,7 +22,7 @@ import json import logging -logger = logging.getLogger('cl_napalm_validate.py') +logger = logging.getLogger("cl_napalm_validate.py") def main(): @@ -34,15 +34,20 @@ def main(): driver = get_network_driver(args.vendor) optional_args = parse_optional_args(args.optional_args) - logger.debug('Connecting to device "{}" with user "{}" and optional_args={}'.format( - args.hostname, args.user, optional_args)) - - with driver(args.hostname, args.user, args.password, optional_args=optional_args) as device: - logger.debug('Generating compliance report') + logger.debug( + 'Connecting to device "{}" with user "{}" and optional_args={}'.format( + args.hostname, args.user, optional_args + ) + ) + + with driver( + args.hostname, args.user, args.password, optional_args=optional_args + ) as device: + logger.debug("Generating compliance report") print(json.dumps(device.compliance_report(args.validation_file), indent=4)) - logger.debug('Closing session') + logger.debug("Closing session") sys.exit(0) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/napalm/base/clitools/helpers.py b/napalm/base/clitools/helpers.py index d3020e579..c8bc6908f 100644 --- a/napalm/base/clitools/helpers.py +++ b/napalm/base/clitools/helpers.py @@ -1,10 +1,10 @@ # -*- coding: utf-8 -*- -''' +""" NAPALM CLI Tools: helpers ========================= Defines helpers for the CLI tools. -''' +""" from __future__ import absolute_import from __future__ import print_function from __future__ import unicode_literals @@ -19,89 +19,98 @@ def warning(): - warnings.simplefilter('always', DeprecationWarning) - warnings.warn("This tool has been deprecated, please use `napalm` instead\n", - DeprecationWarning) + warnings.simplefilter("always", DeprecationWarning) + warnings.warn( + "This tool has been deprecated, please use `napalm` instead\n", + DeprecationWarning, + ) def build_help(connect_test=False, validate=False, configure=False, napalm_cli=False): parser = argparse.ArgumentParser( - description='Command line tool to handle configuration on devices using NAPALM.' - 'The script will print the diff on the screen', - epilog='Automate all the things!!!' + description="Command line tool to handle configuration on devices using NAPALM." + "The script will print the diff on the screen", + epilog="Automate all the things!!!", ) parser.add_argument( - dest='hostname', - action='store', - help='Host where you want to deploy the configuration.' + dest="hostname", + action="store", + help="Host where you want to deploy the configuration.", ) parser.add_argument( - '--user', '-u', - dest='user', - action='store', + "--user", + "-u", + dest="user", + action="store", default=getpass.getuser(), - help='User for authenticating to the host. Default: user running the script.' + help="User for authenticating to the host. Default: user running the script.", ) parser.add_argument( - '--password', '-p', - dest='password', - action='store', - help='Password for authenticating to the host.' - 'If you do not provide a password in the CLI you will be prompted.', + "--password", + "-p", + dest="password", + action="store", + help="Password for authenticating to the host." + "If you do not provide a password in the CLI you will be prompted.", ) parser.add_argument( - '--vendor', '-v', - dest='vendor', - action='store', + "--vendor", + "-v", + dest="vendor", + action="store", required=True, - help='Host Operating System.' + help="Host Operating System.", ) parser.add_argument( - '--optional_args', '-o', - dest='optional_args', - action='store', - help='String with comma separated key=value pairs passed via optional_args to the driver.', + "--optional_args", + "-o", + dest="optional_args", + action="store", + help="String with comma separated key=value pairs passed via optional_args to the driver.", ) parser.add_argument( - '--debug', - dest='debug', - action='store_true', - help='Enables debug mode; more verbosity.' + "--debug", + dest="debug", + action="store_true", + help="Enables debug mode; more verbosity.", ) if configure: parser.add_argument( - '--strategy', '-s', - dest='strategy', - action='store', - choices=['replace', 'merge'], - default='replace', - help='Strategy to use to deploy configuration. Default: replace.' + "--strategy", + "-s", + dest="strategy", + action="store", + choices=["replace", "merge"], + default="replace", + help="Strategy to use to deploy configuration. Default: replace.", ) parser.add_argument( - dest='config_file', - action='store', - help='File containing the configuration you want to deploy.' + dest="config_file", + action="store", + help="File containing the configuration you want to deploy.", ) parser.add_argument( - '--dry-run', '-d', - dest='dry_run', - action='store_true', + "--dry-run", + "-d", + dest="dry_run", + action="store_true", default=None, - help='Only returns diff, it does not deploy the configuration.', + help="Only returns diff, it does not deploy the configuration.", ) elif validate: parser.add_argument( - '--validation_file', '-f', - dest='validation_file', - action='store', - help='Validation file containing resources derised states' + "--validation_file", + "-f", + dest="validation_file", + action="store", + help="Validation file containing resources derised states", ) args = parser.parse_args() if args.password is None: - password = getpass.getpass('Enter password: ') - setattr(args, 'password', password) + password = getpass.getpass("Enter password: ") + setattr(args, "password", password) return args @@ -113,7 +122,9 @@ def configure_logging(logger, debug): logger.setLevel(logging.INFO) ch = logging.StreamHandler(sys.stdout) - formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') + formatter = logging.Formatter( + "%(asctime)s - %(name)s - %(levelname)s - %(message)s" + ) ch.setFormatter(formatter) logger.addHandler(ch) return logger @@ -121,6 +132,8 @@ def configure_logging(logger, debug): def parse_optional_args(optional_args): if optional_args is not None: - return {x.split('=')[0]: ast.literal_eval(x.split('=')[1]) - for x in optional_args.split(',')} + return { + x.split("=")[0]: ast.literal_eval(x.split("=")[1]) + for x in optional_args.split(",") + } return {} diff --git a/napalm/base/constants.py b/napalm/base/constants.py index d327763ed..4dfea025f 100644 --- a/napalm/base/constants.py +++ b/napalm/base/constants.py @@ -7,72 +7,64 @@ INTERFACE_NULL_SPEED = -1 -ACTION_TYPE_METHODS = ('ping', 'traceroute', ) +ACTION_TYPE_METHODS = ("ping", "traceroute") BGP_NEIGHBOR_NULL_COUNTER = -1 -SNMP_AUTHORIZATION_MODE_MAP = { - 'read-only': 'ro', - 'read-write': 'rw' -} +SNMP_AUTHORIZATION_MODE_MAP = {"read-only": "ro", "read-write": "rw"} ROUTE_COMMON_PROTOCOL_FIELDS = [ - 'destination', - 'prefix_length', - 'protocol', - 'current_active', - 'last_active', - 'age', - 'next_hop', - 'outgoing_interface', - 'selected_next_hop', - 'preference', - 'inactive_reason', - 'routing_table' + "destination", + "prefix_length", + "protocol", + "current_active", + "last_active", + "age", + "next_hop", + "outgoing_interface", + "selected_next_hop", + "preference", + "inactive_reason", + "routing_table", ] # identifies the list of fileds common for all protocols ROUTE_PROTOCOL_SPECIFIC_FIELDS = { - 'bgp': [ - 'local_as', - 'remote_as', - 'as_path', - 'communities', - 'local_preference', - 'preference2', - 'remote_address', - 'metric', - 'metric2' - ], - 'isis': [ - 'level', - 'metric', - 'local_as' + "bgp": [ + "local_as", + "remote_as", + "as_path", + "communities", + "local_preference", + "preference2", + "remote_address", + "metric", + "metric2", ], - 'static': [ # nothing specific to static routes - ] + "isis": ["level", "metric", "local_as"], + "static": [], # nothing specific to static routes } TRACEROUTE_TTL = 255 -TRACEROUTE_SOURCE = '' +TRACEROUTE_SOURCE = "" TRACEROUTE_TIMEOUT = 2 -TRACEROUTE_NULL_HOST_NAME = '*' -TRACEROUTE_NULL_IP_ADDRESS = '*' -TRACEROUTE_VRF = '' +TRACEROUTE_NULL_HOST_NAME = "*" +TRACEROUTE_NULL_IP_ADDRESS = "*" +TRACEROUTE_VRF = "" -OPTICS_NULL_LEVEL = '-Inf' +OPTICS_NULL_LEVEL = "-Inf" -PING_SOURCE = '' +PING_SOURCE = "" PING_TTL = 255 PING_TIMEOUT = 2 PING_SIZE = 100 PING_COUNT = 5 -PING_VRF = '' +PING_VRF = "" NETMIKO_MAP = { - 'ios': 'cisco_ios', - 'nxos': 'cisco_nxos', - 'nxos_ssh': 'cisco_nxos', - 'iosxr': 'cisco_iosxr', - 'eos': 'arista_eos', - 'junos': 'juniper_eos', + "ios": "cisco_ios", + "nxos": "cisco_nxos", + "nxos_ssh": "cisco_nxos", + "iosxr": "cisco_iosxr", + "eos": "arista_eos", + "junos": "juniper_eos", } diff --git a/napalm/base/exceptions.py b/napalm/base/exceptions.py index 35f8f5ae3..002c275ca 100644 --- a/napalm/base/exceptions.py +++ b/napalm/base/exceptions.py @@ -18,9 +18,10 @@ class NapalmException(Exception): - ''' + """ Base Exception Class. - ''' + """ + pass @@ -29,32 +30,35 @@ class ModuleImportError(NapalmException): class ConnectionException(NapalmException): - ''' + """ Unable to connect to the network device. - ''' + """ + pass class ConnectAuthError(ConnectionException): - ''' + """ Unable to connect to the network device due to invalid credentials. - ''' + """ + pass class ConnectTimeoutError(ConnectionException): - ''' + """ Exception raised when the connection to the network device takes too long. This may be avoided by adjusting the `timeout` argument. - ''' + """ + pass class ConnectionClosedException(ConnectionException): - ''' + """ The network device closed the connection. Raised whenever we try to execute a certain function, but we detect that the connection @@ -62,7 +66,8 @@ class ConnectionClosedException(ConnectionException): various reasons: the network device terminates the session or it is dropped by a firewall or the server. - ''' + """ + pass @@ -75,24 +80,27 @@ class MergeConfigException(NapalmException): class CommitError(NapalmException): - ''' + """ Raised when unable to commit the candidate config into the running config. - ''' + """ + pass class LockError(NapalmException): - ''' + """ Unable to lock the candidate config. - ''' + """ + pass class UnlockError(NapalmException): - ''' + """ Unable to unlock the candidate config. - ''' + """ + pass diff --git a/napalm/base/helpers.py b/napalm/base/helpers.py index b007ebfee..00e806417 100644 --- a/napalm/base/helpers.py +++ b/napalm/base/helpers.py @@ -30,60 +30,78 @@ class _MACFormat(mac_unix): pass -_MACFormat.word_fmt = '%.2X' +_MACFormat.word_fmt = "%.2X" # ---------------------------------------------------------------------------------------------------------------------- # callable helpers # ---------------------------------------------------------------------------------------------------------------------- -def load_template(cls, template_name, template_source=None, template_path=None, - openconfig=False, jinja_filters={}, **template_vars): +def load_template( + cls, + template_name, + template_source=None, + template_path=None, + openconfig=False, + jinja_filters={}, + **template_vars +): try: search_path = [] if isinstance(template_source, py23_compat.string_types): template = jinja2.Template(template_source) else: if template_path is not None: - if (isinstance(template_path, py23_compat.string_types) and - os.path.isdir(template_path) and os.path.isabs(template_path)): + if ( + isinstance(template_path, py23_compat.string_types) + and os.path.isdir(template_path) + and os.path.isabs(template_path) + ): # append driver name at the end of the custom path - search_path.append(os.path.join(template_path, cls.__module__.split('.')[-1])) + search_path.append( + os.path.join(template_path, cls.__module__.split(".")[-1]) + ) else: - raise IOError("Template path does not exist: {}".format(template_path)) + raise IOError( + "Template path does not exist: {}".format(template_path) + ) else: # Search modules for template paths - search_path = [os.path.dirname(os.path.abspath(sys.modules[c.__module__].__file__)) - for c in cls.__class__.mro() if c is not object] + search_path = [ + os.path.dirname(os.path.abspath(sys.modules[c.__module__].__file__)) + for c in cls.__class__.mro() + if c is not object + ] if openconfig: - search_path = ['{}/oc_templates'.format(s) for s in search_path] + search_path = ["{}/oc_templates".format(s) for s in search_path] else: - search_path = ['{}/templates'.format(s) for s in search_path] + search_path = ["{}/templates".format(s) for s in search_path] loader = jinja2.FileSystemLoader(search_path) environment = jinja2.Environment(loader=loader) for filter_name, filter_function in itertools.chain( - CustomJinjaFilters.filters().items(), - jinja_filters.items()): + CustomJinjaFilters.filters().items(), jinja_filters.items() + ): environment.filters[filter_name] = filter_function - template = environment.get_template('{template_name}.j2'.format( - template_name=template_name - )) + template = environment.get_template( + "{template_name}.j2".format(template_name=template_name) + ) configuration = template.render(**template_vars) except jinja2.exceptions.TemplateNotFound: raise napalm.base.exceptions.TemplateNotImplemented( "Config template {template_name}.j2 not found in search path: {sp}".format( - template_name=template_name, - sp=search_path + template_name=template_name, sp=search_path ) ) - except (jinja2.exceptions.UndefinedError, jinja2.exceptions.TemplateSyntaxError) as jinjaerr: + except ( + jinja2.exceptions.UndefinedError, + jinja2.exceptions.TemplateSyntaxError, + ) as jinjaerr: raise napalm.base.exceptions.TemplateRenderException( "Unable to render the Jinja config template {template_name}: {error}".format( - template_name=template_name, - error=py23_compat.text_type(jinjaerr), + template_name=template_name, error=py23_compat.text_type(jinjaerr) ) ) return cls.load_merge_candidate(config=configuration) @@ -106,13 +124,14 @@ def textfsm_extractor(cls, template_name, raw_text): for c in cls.__class__.mro(): if c is object: continue - current_dir = os.path.dirname(os.path.abspath(sys.modules[c.__module__].__file__)) - template_dir_path = '{current_dir}/utils/textfsm_templates'.format( + current_dir = os.path.dirname( + os.path.abspath(sys.modules[c.__module__].__file__) + ) + template_dir_path = "{current_dir}/utils/textfsm_templates".format( current_dir=current_dir ) - template_path = '{template_dir_path}/{template_name}.tpl'.format( - template_dir_path=template_dir_path, - template_name=template_name + template_path = "{template_dir_path}/{template_name}.tpl".format( + template_dir_path=template_dir_path, template_name=template_name ) try: @@ -131,20 +150,18 @@ def textfsm_extractor(cls, template_name, raw_text): except textfsm.TextFSMTemplateError as tfte: raise napalm.base.exceptions.TemplateRenderException( "Wrong format of TextFSM template {template_name}: {error}".format( - template_name=template_name, - error=py23_compat.text_type(tfte) + template_name=template_name, error=py23_compat.text_type(tfte) ) ) raise napalm.base.exceptions.TemplateNotImplemented( "TextFSM template {template_name}.tpl is not defined under {path}".format( - template_name=template_name, - path=template_dir_path + template_name=template_name, path=template_dir_path ) ) -def find_txt(xml_tree, path, default=''): +def find_txt(xml_tree, path, default=""): """ Extracts the text value from an XML tree, using XPath. In case of error, will return a default value. @@ -154,7 +171,7 @@ def find_txt(xml_tree, path, default=''): :param default: Value to be returned in case of error. :return: a str value. """ - value = '' + value = "" try: xpath_applied = xml_tree.xpath(path) # will consider the first match only if len(xpath_applied) and xpath_applied[0] is not None: @@ -168,7 +185,7 @@ def find_txt(xml_tree, path, default=''): return py23_compat.text_type(value) -def convert(to, who, default=u''): +def convert(to, who, default=""): """ Converts data to a specific datatype. In case of error, will return a default value. @@ -215,11 +232,10 @@ def mac(raw): >>> mac('23.4567.89ab') u'00:23:45:67:89:AB' """ - if raw.endswith(':'): - flat_raw = raw.replace(':', '') - raw = '{flat_raw}{zeros_stuffed}'.format( - flat_raw=flat_raw, - zeros_stuffed='0'*(12-len(flat_raw)) + if raw.endswith(":"): + flat_raw = raw.replace(":", "") + raw = "{flat_raw}{zeros_stuffed}".format( + flat_raw=flat_raw, zeros_stuffed="0" * (12 - len(flat_raw)) ) return py23_compat.text_type(EUI(raw, dialect=_MACFormat)) @@ -256,8 +272,8 @@ def ip(addr, version=None): def as_number(as_number_val): """Convert AS Number to standardized asplain notation as an integer.""" as_number_str = py23_compat.text_type(as_number_val) - if '.' in as_number_str: - big, little = as_number_str.split('.') + if "." in as_number_str: + big, little = as_number_str.split(".") return (int(big) << 16) + int(little) else: return int(as_number_str) @@ -265,8 +281,8 @@ def as_number(as_number_val): def split_interface(intf_name): """Split an interface name based on first digit, slash, or space match.""" - head = intf_name.rstrip(r'/\0123456789. ') - tail = intf_name[len(head):].lstrip() + head = intf_name.rstrip(r"/\0123456789. ") + tail = intf_name[len(head) :].lstrip() return (head, tail) @@ -332,7 +348,9 @@ def abbreviated_interface_name(interface, addl_name_map=None, addl_reverse_map=N canonical_type = interface_type try: - abbreviated_name = rev_name_map[canonical_type] + py23_compat.text_type(interface_number) + abbreviated_name = rev_name_map[canonical_type] + py23_compat.text_type( + interface_number + ) return abbreviated_name except KeyError: pass diff --git a/napalm/base/mock.py b/napalm/base/mock.py index 54ae45b1b..e98f46793 100644 --- a/napalm/base/mock.py +++ b/napalm/base/mock.py @@ -46,20 +46,23 @@ def is_mocked_method(method): def mocked_method(path, name, count): parent_method = getattr(NetworkDriver, name) parent_method_args = py23_compat.argspec(parent_method) - modifier = 0 if 'self' not in parent_method_args.args else 1 + modifier = 0 if "self" not in parent_method_args.args else 1 def _mocked_method(*args, **kwargs): # Check len(args) if len(args) + len(kwargs) + modifier > len(parent_method_args.args): raise TypeError( "{}: expected at most {} arguments, got {}".format( - name, len(parent_method_args.args), len(args) + modifier)) + name, len(parent_method_args.args), len(args) + modifier + ) + ) # Check kwargs unexpected = [x for x in kwargs if x not in parent_method_args.args] if unexpected: - raise TypeError("{} got an unexpected keyword argument '{}'".format(name, - unexpected[0])) + raise TypeError( + "{} got an unexpected keyword argument '{}'".format(name, unexpected[0]) + ) return mocked_data(path, name, count) return _mocked_method @@ -80,7 +83,6 @@ def mocked_data(path, name, count): class MockDevice(object): - def __init__(self, parent, profile): self.parent = parent self.profile = profile @@ -95,7 +97,6 @@ def show(self, command): class MockDriver(NetworkDriver): - def __init__(self, hostname, username, password, timeout=60, optional_args=None): """ Supported optional_args: @@ -141,12 +142,12 @@ def is_alive(self): def cli(self, commands): count = self._count_calls("cli") result = {} - regexp = re.compile('[^a-zA-Z0-9]+') + regexp = re.compile("[^a-zA-Z0-9]+") for i, c in enumerate(commands): - sanitized = re.sub(regexp, '_', c) + sanitized = re.sub(regexp, "_", c) name = "cli.{}.{}".format(count, sanitized) filename = "{}.{}".format(os.path.join(self.path, name), i) - with open(filename, 'r') as f: + with open(filename, "r") as f: result[c] = f.read() return result diff --git a/napalm/base/netmiko_helpers.py b/napalm/base/netmiko_helpers.py index f85745a0b..41f14a472 100644 --- a/napalm/base/netmiko_helpers.py +++ b/napalm/base/netmiko_helpers.py @@ -25,13 +25,13 @@ def netmiko_args(optional_args): defaults = fields[3] check_self = args.pop(0) - if check_self != 'self': + if check_self != "self": raise ValueError("Error processing Netmiko arguments") netmiko_argument_map = dict(zip(args, defaults)) # Netmiko arguments that are integrated into NAPALM already - netmiko_filter = ['ip', 'host', 'username', 'password', 'device_type', 'timeout'] + netmiko_filter = ["ip", "host", "username", "password", "device_type", "timeout"] # Filter out all of the arguments that are integrated into NAPALM for k in netmiko_filter: diff --git a/napalm/base/test/base.py b/napalm/base/test/base.py index 1f991412c..54bd699e3 100644 --- a/napalm/base/test/base.py +++ b/napalm/base/test/base.py @@ -26,7 +26,6 @@ class TestConfigNetworkDriver(object): - @classmethod def setup_class(cls): """Added for py.test/nosetests compatibility""" @@ -39,13 +38,13 @@ def teardown_class(cls): @classmethod def tearDownClass(cls): - cls.device.load_replace_candidate(filename='%s/initial.conf' % cls.vendor) + cls.device.load_replace_candidate(filename="%s/initial.conf" % cls.vendor) cls.device.commit_config() cls.device.close() @staticmethod def read_file(filename): - with open(filename, 'r') as f: + with open(filename, "r") as f: return f.read().strip() @staticmethod @@ -55,7 +54,9 @@ def print_diff_strings(orig, new): def test_replacing_and_committing_config(self): try: - self.device.load_replace_candidate(filename='%s/new_good.conf' % self.vendor) + self.device.load_replace_candidate( + filename="%s/new_good.conf" % self.vendor + ) self.device.commit_config() except NotImplementedError: raise SkipTest() @@ -64,7 +65,7 @@ def test_replacing_and_committing_config(self): diff = self.device.compare_config() # Reverting changes - self.device.load_replace_candidate(filename='%s/initial.conf' % self.vendor) + self.device.load_replace_candidate(filename="%s/initial.conf" % self.vendor) self.device.commit_config() self.assertEqual(len(diff), 0) @@ -72,21 +73,23 @@ def test_replacing_and_committing_config(self): def test_replacing_config_with_typo(self): result = False try: - self.device.load_replace_candidate(filename='%s/new_typo.conf' % self.vendor) + self.device.load_replace_candidate( + filename="%s/new_typo.conf" % self.vendor + ) self.device.commit_config() except NotImplementedError: raise SkipTest() except exceptions.ReplaceConfigException: - self.device.load_replace_candidate(filename='%s/initial.conf' % self.vendor) + self.device.load_replace_candidate(filename="%s/initial.conf" % self.vendor) diff = self.device.compare_config() self.device.discard_config() result = True and len(diff) == 0 self.assertTrue(result) def test_replacing_config_and_diff_and_discard(self): - intended_diff = self.read_file('%s/new_good.diff' % self.vendor) + intended_diff = self.read_file("%s/new_good.diff" % self.vendor) - self.device.load_replace_candidate(filename='%s/new_good.conf' % self.vendor) + self.device.load_replace_candidate(filename="%s/new_good.conf" % self.vendor) commit_diff = self.device.compare_config() print(commit_diff) @@ -95,11 +98,11 @@ def test_replacing_config_and_diff_and_discard(self): discard_diff = self.device.compare_config() self.device.discard_config() - result = (commit_diff == intended_diff) and (discard_diff == '') + result = (commit_diff == intended_diff) and (discard_diff == "") self.assertTrue(result) def test_replacing_config_and_rollback(self): - self.device.load_replace_candidate(filename='%s/new_good.conf' % self.vendor) + self.device.load_replace_candidate(filename="%s/new_good.conf" % self.vendor) orig_diff = self.device.compare_config() self.device.commit_config() @@ -109,7 +112,7 @@ def test_replacing_config_and_rollback(self): # We try to load the config again. If the rollback was successful new diff should be # like the first one - self.device.load_replace_candidate(filename='%s/new_good.conf' % self.vendor) + self.device.load_replace_candidate(filename="%s/new_good.conf" % self.vendor) last_diff = self.device.compare_config() self.device.discard_config() @@ -118,13 +121,13 @@ def test_replacing_config_and_rollback(self): self.assertTrue(result) def test_merge_configuration(self): - intended_diff = self.read_file('%s/merge_good.diff' % self.vendor) + intended_diff = self.read_file("%s/merge_good.diff" % self.vendor) - self.device.load_merge_candidate(filename='%s/merge_good.conf' % self.vendor) + self.device.load_merge_candidate(filename="%s/merge_good.conf" % self.vendor) self.device.commit_config() # Reverting changes - self.device.load_replace_candidate(filename='%s/initial.conf' % self.vendor) + self.device.load_replace_candidate(filename="%s/initial.conf" % self.vendor) diff = self.device.compare_config() print(diff) @@ -136,43 +139,51 @@ def test_merge_configuration(self): def test_merge_configuration_typo_and_rollback(self): result = False try: - self.device.load_merge_candidate(filename='%s/merge_typo.conf' % self.vendor) + self.device.load_merge_candidate( + filename="%s/merge_typo.conf" % self.vendor + ) self.device.compare_config() self.device.commit_config() raise Exception("We shouldn't be here") except exceptions.MergeConfigException: # We load the original config as candidate. If the commit failed cleanly the # compare_config should be empty - self.device.load_replace_candidate(filename='%s/initial.conf' % self.vendor) - result = self.device.compare_config() == '' + self.device.load_replace_candidate(filename="%s/initial.conf" % self.vendor) + result = self.device.compare_config() == "" self.device.discard_config() self.assertTrue(result) def test_load_template(self): """Test load_template method.""" - self.device.load_template('set_hostname', hostname='my-hostname') + self.device.load_template("set_hostname", hostname="my-hostname") diff = self.device.compare_config() self.device.discard_config() - self.assertTrue(diff is not '') + self.assertTrue(diff is not "") class TestGettersNetworkDriver(object): - @staticmethod def _test_model(model, data): same_keys = set(model.keys()) == set(data.keys()) if not same_keys: - print("model_keys: {}\ndata_keys: {}".format(sorted(model.keys()), sorted(data.keys()))) + print( + "model_keys: {}\ndata_keys: {}".format( + sorted(model.keys()), sorted(data.keys()) + ) + ) correct_class = True for key, instance_class in model.items(): same_class = isinstance(data[key], instance_class) correct_class = correct_class and same_class if not same_class: - print("key: {}\nmodel_class: {}\ndata_class: {}".format( - key, instance_class, data[key].__class__)) + print( + "key: {}\nmodel_class: {}\ndata_class: {}".format( + key, instance_class, data[key].__class__ + ) + ) return correct_class and same_keys @@ -186,7 +197,9 @@ def test_get_firewall_policies(self): for policy_name, policy_details in policies.items(): for policy_term in policy_details: - result = result and self._test_model(models.firewall_policies, policy_term) + result = result and self._test_model( + models.firewall_policies, policy_term + ) self.assertTrue(result) @@ -239,7 +252,9 @@ def test_get_interfaces_counters(self): result = len(self.device.get_interfaces_counters()) > 0 for interface, interface_data in get_interfaces_counters.items(): - result = result and self._test_model(models.interface_counters, interface_data) + result = result and self._test_model( + models.interface_counters, interface_data + ) self.assertTrue(result) @@ -250,19 +265,19 @@ def test_get_environment(self): raise SkipTest() result = len(environment) > 0 - for fan, fan_data in environment['fans'].items(): + for fan, fan_data in environment["fans"].items(): result = result and self._test_model(models.fan, fan_data) - for power, power_data in environment['power'].items(): + for power, power_data in environment["power"].items(): result = result and self._test_model(models.power, power_data) - for temperature, temperature_data in environment['temperature'].items(): + for temperature, temperature_data in environment["temperature"].items(): result = result and self._test_model(models.temperature, temperature_data) - for cpu, cpu_data in environment['cpu'].items(): + for cpu, cpu_data in environment["cpu"].items(): result = result and self._test_model(models.cpu, cpu_data) - result = result and self._test_model(models.memory, environment['memory']) + result = result and self._test_model(models.memory, environment["memory"]) self.assertTrue(result) @@ -271,20 +286,20 @@ def test_get_bgp_neighbors(self): get_bgp_neighbors = self.device.get_bgp_neighbors() except NotImplementedError: raise SkipTest() - result = 'global' in get_bgp_neighbors.keys() + result = "global" in get_bgp_neighbors.keys() if not result: - print('global is not part of the returned vrfs') + print("global is not part of the returned vrfs") else: for vrf, vrf_data in get_bgp_neighbors.items(): - result = result and isinstance(vrf_data['router_id'], text_type) + result = result and isinstance(vrf_data["router_id"], text_type) if not result: - print('router_id is not {}'.format(text_type)) + print("router_id is not {}".format(text_type)) - for peer, peer_data in vrf_data['peers'].items(): + for peer, peer_data in vrf_data["peers"].items(): result = result and self._test_model(models.peer, peer_data) - for af, af_data in peer_data['address_family'].items(): + for af, af_data in peer_data["address_family"].items(): result = result and self._test_model(models.af, af_data) self.assertTrue(result) @@ -298,7 +313,9 @@ def test_get_lldp_neighbors_detail(self): for interface, neighbor_list in get_lldp_neighbors_detail.items(): for neighbor in neighbor_list: - result = result and self._test_model(models.lldp_neighbors_detail, neighbor) + result = result and self._test_model( + models.lldp_neighbors_detail, neighbor + ) self.assertTrue(result) @@ -311,8 +328,10 @@ def test_get_bgp_config(self): for bgp_group in get_bgp_config.values(): result = result and self._test_model(models.bgp_config_group, bgp_group) - for bgp_neighbor in bgp_group.get('neighbors', {}).values(): - result = result and self._test_model(models.bgp_config_neighbor, bgp_neighbor) + for bgp_neighbor in bgp_group.get("neighbors", {}).values(): + result = result and self._test_model( + models.bgp_config_neighbor, bgp_neighbor + ) self.assertTrue(result) @@ -392,8 +411,8 @@ def test_get_interfaces_ip(self): result = len(get_interfaces_ip) > 0 for interface, interface_details in get_interfaces_ip.items(): - ipv4 = interface_details.get('ipv4', {}) - ipv6 = interface_details.get('ipv6', {}) + ipv4 = interface_details.get("ipv4", {}) + ipv6 = interface_details.get("ipv6", {}) for ip, ip_details in ipv4.items(): result = result and self._test_model(models.interfaces_ip, ip_details) for ip, ip_details in ipv6.items(): @@ -409,15 +428,19 @@ def test_get_mac_address_table(self): result = len(get_mac_address_table) > 0 for mac_table_entry in get_mac_address_table: - result = result and self._test_model(models.mac_address_table, mac_table_entry) + result = result and self._test_model( + models.mac_address_table, mac_table_entry + ) self.assertTrue(result) def test_get_route_to(self): - destination = '1.0.4.0/24' - protocol = 'bgp' + destination = "1.0.4.0/24" + protocol = "bgp" try: - get_route_to = self.device.get_route_to(destination=destination, protocol=protocol) + get_route_to = self.device.get_route_to( + destination=destination, protocol=protocol + ) except NotImplementedError: raise SkipTest() @@ -439,7 +462,7 @@ def test_get_snmp_information(self): for snmp_entry in get_snmp_information: result = result and self._test_model(models.snmp, get_snmp_information) - for community, community_data in get_snmp_information['community'].items(): + for community, community_data in get_snmp_information["community"].items(): result = result and self._test_model(models.snmp_community, community_data) self.assertTrue(result) @@ -467,38 +490,40 @@ def test_get_probes_results(self): for probe_name, probe_tests in get_probes_results.items(): for test_name, test_results in probe_tests.items(): - result = result and self._test_model(models.probe_test_results, test_results) + result = result and self._test_model( + models.probe_test_results, test_results + ) self.assertTrue(result) def test_ping(self): - destination = '8.8.8.8' + destination = "8.8.8.8" try: get_ping = self.device.ping(destination) except NotImplementedError: raise SkipTest() - result = isinstance(get_ping.get('success'), dict) - ping_results = get_ping.get('success', {}) + result = isinstance(get_ping.get("success"), dict) + ping_results = get_ping.get("success", {}) result = result and self._test_model(models.ping, ping_results) - for ping_result in ping_results.get('results', []): + for ping_result in ping_results.get("results", []): result = result and self._test_model(models.ping_result, ping_result) self.assertTrue(result) def test_traceroute(self): - destination = '8.8.8.8' + destination = "8.8.8.8" try: get_traceroute = self.device.traceroute(destination) except NotImplementedError: raise SkipTest() - result = isinstance(get_traceroute.get('success'), dict) - traceroute_results = get_traceroute.get('success', {}) + result = isinstance(get_traceroute.get("success"), dict) + traceroute_results = get_traceroute.get("success", {}) for hope_id, hop_result in traceroute_results.items(): - for probe_id, probe_result in hop_result.get('probes', {}).items(): + for probe_id, probe_result in hop_result.get("probes", {}).items(): result = result and self._test_model(models.traceroute, probe_result) self.assertTrue(result) @@ -513,7 +538,7 @@ def test_get_users(self): for user, user_details in get_users.items(): result = result and self._test_model(models.users, user_details) - result = result and (0 <= user_details.get('level') <= 15) + result = result and (0 <= user_details.get("level") <= 15) self.assertTrue(result) @@ -528,18 +553,16 @@ def test_get_optics(self): for iface, iface_data in get_optics.items(): assert isinstance(iface, text_type) - for channel in iface_data['physical_channels']['channel']: + for channel in iface_data["physical_channels"]["channel"]: assert len(channel) == 2 - assert isinstance(channel['index'], int) - for field in ['input_power', 'output_power', - 'laser_bias_current']: + assert isinstance(channel["index"], int) + for field in ["input_power", "output_power", "laser_bias_current"]: - assert len(channel['state'][field]) == 4 - assert isinstance(channel['state'][field]['instant'], - float) - assert isinstance(channel['state'][field]['avg'], float) - assert isinstance(channel['state'][field]['min'], float) - assert isinstance(channel['state'][field]['max'], float) + assert len(channel["state"][field]) == 4 + assert isinstance(channel["state"][field]["instant"], float) + assert isinstance(channel["state"][field]["avg"], float) + assert isinstance(channel["state"][field]["min"], float) + assert isinstance(channel["state"][field]["max"], float) def test_get_config(self): """Test get_config method.""" @@ -553,15 +576,15 @@ def test_get_config(self): def test_get_config_filtered(self): """Test get_config method.""" - for config in ['running', 'startup', 'candidate']: + for config in ["running", "startup", "candidate"]: try: get_config = self.device.get_config(retrieve=config) except NotImplementedError: raise SkipTest() - assert get_config['candidate'] == "" if config != "candidate" else True - assert get_config['startup'] == "" if config != "startup" else True - assert get_config['running'] == "" if config != "running" else True + assert get_config["candidate"] == "" if config != "candidate" else True + assert get_config["startup"] == "" if config != "startup" else True + assert get_config["running"] == "" if config != "running" else True def test_get_network_instances(self): """Test get_network_instances method.""" @@ -572,11 +595,15 @@ def test_get_network_instances(self): result = isinstance(get_network_instances, dict) for network_instance_name, network_instance in get_network_instances.items(): - result = result and self._test_model(models.network_instance, network_instance) - result = result and \ - self._test_model(models.network_instance_state, network_instance['state']) - result = result and \ - self._test_model(models.network_instance_interfaces, network_instance['interfaces']) + result = result and self._test_model( + models.network_instance, network_instance + ) + result = result and self._test_model( + models.network_instance_state, network_instance["state"] + ) + result = result and self._test_model( + models.network_instance_interfaces, network_instance["interfaces"] + ) self.assertTrue(result) diff --git a/napalm/base/test/conftest.py b/napalm/base/test/conftest.py index 1a8e27533..f214b5f85 100644 --- a/napalm/base/test/conftest.py +++ b/napalm/base/test/conftest.py @@ -9,11 +9,13 @@ import json import os -NAPALM_TEST_MOCK = ast.literal_eval(os.getenv('NAPALM_TEST_MOCK', default="1")) -NAPALM_HOSTNAME = os.getenv('NAPALM_HOSTNAME', default='127.0.0.1') -NAPALM_USERNAME = os.getenv('NAPALM_USERNAME', default='vagrant') -NAPALM_PASSWORD = os.getenv('NAPALM_PASSWORD', default='vagrant') -NAPALM_OPTIONAL_ARGS = json.loads(os.getenv('NAPALM_OPTIONAL_ARGS', default='{"port": 12443}')) +NAPALM_TEST_MOCK = ast.literal_eval(os.getenv("NAPALM_TEST_MOCK", default="1")) +NAPALM_HOSTNAME = os.getenv("NAPALM_HOSTNAME", default="127.0.0.1") +NAPALM_USERNAME = os.getenv("NAPALM_USERNAME", default="vagrant") +NAPALM_PASSWORD = os.getenv("NAPALM_PASSWORD", default="vagrant") +NAPALM_OPTIONAL_ARGS = json.loads( + os.getenv("NAPALM_OPTIONAL_ARGS", default='{"port": 12443}') +) def set_device_parameters(request): @@ -23,18 +25,22 @@ def set_device_parameters(request): else: driver = request.cls.driver - request.cls.device = driver(NAPALM_HOSTNAME, - NAPALM_USERNAME, - NAPALM_PASSWORD, - timeout=60, - optional_args=NAPALM_OPTIONAL_ARGS) + request.cls.device = driver( + NAPALM_HOSTNAME, + NAPALM_USERNAME, + NAPALM_PASSWORD, + timeout=60, + optional_args=NAPALM_OPTIONAL_ARGS, + ) request.cls.device.open() def pytest_generate_tests(metafunc, basefile): """Generate test cases dynamically.""" - if metafunc.function.__dict__.get('build_test_cases', False): - path = os.path.join(os.path.dirname(basefile), 'mocked_data', metafunc.function.__name__) + if metafunc.function.__dict__.get("build_test_cases", False): + path = os.path.join( + os.path.dirname(basefile), "mocked_data", metafunc.function.__name__ + ) if os.path.exists(path): sub_folders = os.listdir(path) diff --git a/napalm/base/test/double.py b/napalm/base/test/double.py index 4872ea227..a947e9bfe 100644 --- a/napalm/base/test/double.py +++ b/napalm/base/test/double.py @@ -15,16 +15,21 @@ class BaseTestDouble(object): def __init__(self, *args, **kwargs): """Initiate object.""" - self.current_test = '' - self.current_test_case = '' + self.current_test = "" + self.current_test_case = "" def find_file(self, filename): """Find the necessary file for the given test case.""" # Find base_dir of submodule module_dir = os.path.dirname(sys.modules[self.__module__].__file__) - full_path = os.path.join(module_dir, 'mocked_data', - self.current_test, self.current_test_case, filename) + full_path = os.path.join( + module_dir, + "mocked_data", + self.current_test, + self.current_test_case, + filename, + ) if os.path.exists(full_path): return full_path @@ -34,8 +39,8 @@ def find_file(self, filename): @staticmethod def sanitize_text(text): """Remove some weird characters from text, useful for building filenames from commands.""" - regexp = '[^a-zA-Z0-9]' - return re.sub(regexp, '_', text)[0:150] + regexp = "[^a-zA-Z0-9]" + return re.sub(regexp, "_", text)[0:150] @staticmethod def read_json_file(filename): @@ -52,13 +57,15 @@ def read_txt_file(filename): @property def expected_result(self): """Return the expected result for the current test case.""" - filename = self.find_file('expected_result.json') + filename = self.find_file("expected_result.json") - with open(filename, mode='r') as f: + with open(filename, mode="r") as f: try: return json.loads(f.read()) except ValueError: - raise ValueError("No JSON object could be decoded on filename: {}".format(filename)) + raise ValueError( + "No JSON object could be decoded on filename: {}".format(filename) + ) def _string_key_to_int(param): diff --git a/napalm/base/test/getters.py b/napalm/base/test/getters.py index e775abbce..470b44e09 100644 --- a/napalm/base/test/getters.py +++ b/napalm/base/test/getters.py @@ -48,17 +48,17 @@ def dict_diff(prv, nxt): result[k] = diff else: "If only one is a dict they are clearly different" - result[k] = {'result': prv.get(k), 'expected': nxt.get(k)} + result[k] = {"result": prv.get(k), "expected": nxt.get(k)} else: - "Ellipsis is a wildcard.""" + "Ellipsis is a wildcard." "" if prv.get(k) != nxt.get(k) and nxt.get(k) != "...": - result[k] = {'result': prv.get(k), 'expected': nxt.get(k)} + result[k] = {"result": prv.get(k), "expected": nxt.get(k)} return result def wrap_test_cases(func): """Wrap test cases.""" - func.__dict__['build_test_cases'] = True + func.__dict__["build_test_cases"] = True @functools.wraps(func) def mock_wrapper(cls, test_case): @@ -93,13 +93,14 @@ def mock_wrapper(cls, test_case): diff = dict_diff(result, expected_result) if diff: print("Resulting JSON object was: {}".format(json.dumps(result))) - raise AssertionError("Expected result varies on some keys {}".format( - json.dumps(diff))) + raise AssertionError( + "Expected result varies on some keys {}".format(json.dumps(diff)) + ) for patched_attr in cls.device.patched_attrs: attr = getattr(cls.device, patched_attr) - attr.current_test = '' # Empty them to avoid side effects - attr.current_test_case = '' # Empty them to avoid side effects + attr.current_test = "" # Empty them to avoid side effects + attr.current_test_case = "" # Empty them to avoid side effects return result @@ -125,22 +126,22 @@ def test_method_signatures(self): errors = {} cls = self.driver # Create fictional driver instance (py3 needs bound methods) - tmp_obj = cls(hostname='test', username='admin', password='pwd') + tmp_obj = cls(hostname="test", username="admin", password="pwd") attrs = [m for m, v in inspect.getmembers(tmp_obj)] for attr in attrs: func = getattr(tmp_obj, attr) - if attr.startswith('_') or not inspect.ismethod(func): + if attr.startswith("_") or not inspect.ismethod(func): continue try: orig = getattr(NetworkDriver, attr) orig_spec = argspec(orig) except AttributeError: - orig_spec = 'Method does not exist in napalm.base' + orig_spec = "Method does not exist in napalm.base" func_spec = argspec(func) if orig_spec != func_spec: errors[attr] = (orig_spec, func_spec) - EXTRA_METHODS = ['__init__', ] + EXTRA_METHODS = ["__init__"] for method in EXTRA_METHODS: orig_spec = argspec(getattr(NetworkDriver, method)) func_spec = argspec(getattr(cls, method)) @@ -203,19 +204,19 @@ def test_get_environment(self, test_case): environment = self.device.get_environment() assert len(environment) > 0 - for fan, fan_data in environment['fans'].items(): + for fan, fan_data in environment["fans"].items(): assert helpers.test_model(models.fan, fan_data) - for power, power_data in environment['power'].items(): + for power, power_data in environment["power"].items(): assert helpers.test_model(models.power, power_data) - for temperature, temperature_data in environment['temperature'].items(): + for temperature, temperature_data in environment["temperature"].items(): assert helpers.test_model(models.temperature, temperature_data) - for cpu, cpu_data in environment['cpu'].items(): + for cpu, cpu_data in environment["cpu"].items(): assert helpers.test_model(models.cpu, cpu_data) - assert helpers.test_model(models.memory, environment['memory']) + assert helpers.test_model(models.memory, environment["memory"]) return environment @@ -223,15 +224,15 @@ def test_get_environment(self, test_case): def test_get_bgp_neighbors(self, test_case): """Test get_bgp_neighbors.""" get_bgp_neighbors = self.device.get_bgp_neighbors() - assert 'global' in get_bgp_neighbors.keys() + assert "global" in get_bgp_neighbors.keys() for vrf, vrf_data in get_bgp_neighbors.items(): - assert isinstance(vrf_data['router_id'], text_type) + assert isinstance(vrf_data["router_id"], text_type) - for peer, peer_data in vrf_data['peers'].items(): + for peer, peer_data in vrf_data["peers"].items(): assert helpers.test_model(models.peer, peer_data) - for af, af_data in peer_data['address_family'].items(): + for af, af_data in peer_data["address_family"].items(): assert helpers.test_model(models.af, af_data) return get_bgp_neighbors @@ -256,7 +257,7 @@ def test_get_bgp_config(self, test_case): for bgp_group in get_bgp_config.values(): assert helpers.test_model(models.bgp_config_group, bgp_group) - for bgp_neighbor in bgp_group.get('neighbors', {}).values(): + for bgp_neighbor in bgp_group.get("neighbors", {}).values(): assert helpers.test_model(models.bgp_config_neighbor, bgp_neighbor) return get_bgp_config @@ -340,8 +341,8 @@ def test_get_interfaces_ip(self, test_case): assert len(get_interfaces_ip) > 0 for interface, interface_details in get_interfaces_ip.items(): - ipv4 = interface_details.get('ipv4', {}) - ipv6 = interface_details.get('ipv6', {}) + ipv4 = interface_details.get("ipv4", {}) + ipv6 = interface_details.get("ipv6", {}) for ip, ip_details in ipv4.items(): assert helpers.test_model(models.interfaces_ip, ip_details) for ip, ip_details in ipv6.items(): @@ -363,9 +364,11 @@ def test_get_mac_address_table(self, test_case): @wrap_test_cases def test_get_route_to(self, test_case): """Test get_route_to.""" - destination = '1.0.4.0/24' - protocol = 'bgp' - get_route_to = self.device.get_route_to(destination=destination, protocol=protocol) + destination = "1.0.4.0/24" + protocol = "bgp" + get_route_to = self.device.get_route_to( + destination=destination, protocol=protocol + ) assert len(get_route_to) > 0 @@ -385,7 +388,7 @@ def test_get_snmp_information(self, test_case): for snmp_entry in get_snmp_information: assert helpers.test_model(models.snmp, get_snmp_information) - for community, community_data in get_snmp_information['community'].items(): + for community, community_data in get_snmp_information["community"].items(): assert helpers.test_model(models.snmp_community, community_data) return get_snmp_information @@ -418,14 +421,14 @@ def test_get_probes_results(self, test_case): @wrap_test_cases def test_ping(self, test_case): """Test ping.""" - destination = '8.8.8.8' + destination = "8.8.8.8" get_ping = self.device.ping(destination) - assert isinstance(get_ping.get('success'), dict) - ping_results = get_ping.get('success', {}) + assert isinstance(get_ping.get("success"), dict) + ping_results = get_ping.get("success", {}) assert helpers.test_model(models.ping, ping_results) - for ping_result in ping_results.get('results', []): + for ping_result in ping_results.get("results", []): assert helpers.test_model(models.ping_result, ping_result) return get_ping @@ -433,13 +436,13 @@ def test_ping(self, test_case): @wrap_test_cases def test_traceroute(self, test_case): """Test traceroute.""" - destination = '8.8.8.8' + destination = "8.8.8.8" get_traceroute = self.device.traceroute(destination) - assert isinstance(get_traceroute.get('success'), dict) - traceroute_results = get_traceroute.get('success', {}) + assert isinstance(get_traceroute.get("success"), dict) + traceroute_results = get_traceroute.get("success", {}) for hope_id, hop_result in traceroute_results.items(): - for probe_id, probe_result in hop_result.get('probes', {}).items(): + for probe_id, probe_result in hop_result.get("probes", {}).items(): assert helpers.test_model(models.traceroute, probe_result) return get_traceroute @@ -452,7 +455,9 @@ def test_get_users(self, test_case): for user, user_details in get_users.items(): assert helpers.test_model(models.users, user_details) - assert (0 <= user_details.get('level') <= 15) or (user_details.get('level') == 20) + assert (0 <= user_details.get("level") <= 15) or ( + user_details.get("level") == 20 + ) return get_users @@ -464,18 +469,16 @@ def test_get_optics(self, test_case): for iface, iface_data in get_optics.items(): assert isinstance(iface, text_type) - for channel in iface_data['physical_channels']['channel']: + for channel in iface_data["physical_channels"]["channel"]: assert len(channel) == 2 - assert isinstance(channel['index'], int) - for field in ['input_power', 'output_power', - 'laser_bias_current']: + assert isinstance(channel["index"], int) + for field in ["input_power", "output_power", "laser_bias_current"]: - assert len(channel['state'][field]) == 4 - assert isinstance(channel['state'][field]['instant'], - float) - assert isinstance(channel['state'][field]['avg'], float) - assert isinstance(channel['state'][field]['min'], float) - assert isinstance(channel['state'][field]['max'], float) + assert len(channel["state"][field]) == 4 + assert isinstance(channel["state"][field]["instant"], float) + assert isinstance(channel["state"][field]["avg"], float) + assert isinstance(channel["state"][field]["min"], float) + assert isinstance(channel["state"][field]["max"], float) return get_optics @@ -492,12 +495,12 @@ def test_get_config(self, test_case): @wrap_test_cases def test_get_config_filtered(self, test_case): """Test get_config method.""" - for config in ['running', 'startup', 'candidate']: + for config in ["running", "startup", "candidate"]: get_config = self.device.get_config(retrieve=config) - assert get_config['candidate'] == "" if config != "candidate" else True - assert get_config['startup'] == "" if config != "startup" else True - assert get_config['running'] == "" if config != "running" else True + assert get_config["candidate"] == "" if config != "candidate" else True + assert get_config["startup"] == "" if config != "startup" else True + assert get_config["running"] == "" if config != "running" else True return get_config @@ -509,9 +512,12 @@ def test_get_network_instances(self, test_case): assert isinstance(get_network_instances, dict) for network_instance_name, network_instance in get_network_instances.items(): assert helpers.test_model(models.network_instance, network_instance) - assert helpers.test_model(models.network_instance_state, network_instance['state']) - assert helpers.test_model(models.network_instance_interfaces, - network_instance['interfaces']) + assert helpers.test_model( + models.network_instance_state, network_instance["state"] + ) + assert helpers.test_model( + models.network_instance_interfaces, network_instance["interfaces"] + ) return get_network_instances diff --git a/napalm/base/test/helpers.py b/napalm/base/test/helpers.py index 4824b3f8d..45184cb8f 100644 --- a/napalm/base/test/helpers.py +++ b/napalm/base/test/helpers.py @@ -12,20 +12,29 @@ def test_model(model, data): same_keys = set(model.keys()) == set(data.keys()) if not same_keys: - print("model_keys: {}\ndata_keys: {}".format(sorted(model.keys()), sorted(data.keys()))) + print( + "model_keys: {}\ndata_keys: {}".format( + sorted(model.keys()), sorted(data.keys()) + ) + ) correct_class = True for key, instance_class in model.items(): - if py23_compat.PY2 and isinstance(data[key], long): # noqa + if py23_compat.PY2 and isinstance(data[key], long): # noqa # Properly handle PY2 long - correct_class = (isinstance(data[key], long) and # noqa - isinstance(1, instance_class) and - correct_class) + correct_class = ( + isinstance(data[key], long) + and isinstance(1, instance_class) # noqa + and correct_class + ) else: correct_class = isinstance(data[key], instance_class) and correct_class if not correct_class: - print("key: {}\nmodel_class: {}\ndata_class: {}".format( - key, instance_class, data[key].__class__)) + print( + "key: {}\nmodel_class: {}\ndata_class: {}".format( + key, instance_class, data[key].__class__ + ) + ) return correct_class and same_keys diff --git a/napalm/base/test/models.py b/napalm/base/test/models.py index ab4922974..58fd000b3 100644 --- a/napalm/base/test/models.py +++ b/napalm/base/test/models.py @@ -1,184 +1,154 @@ - # text_type is 'unicode' for py2 and 'str' for py3 from napalm.base.utils.py23_compat import text_type -alive = { - 'is_alive': bool -} +alive = {"is_alive": bool} facts = { - 'os_version': text_type, - 'uptime': int, - 'interface_list': list, - 'vendor': text_type, - 'serial_number': text_type, - 'model': text_type, - 'hostname': text_type, - 'fqdn': text_type + "os_version": text_type, + "uptime": int, + "interface_list": list, + "vendor": text_type, + "serial_number": text_type, + "model": text_type, + "hostname": text_type, + "fqdn": text_type, } interface = { - 'is_up': bool, - 'is_enabled': bool, - 'description': text_type, - 'last_flapped': float, - 'speed': int, - 'mac_address': text_type, + "is_up": bool, + "is_enabled": bool, + "description": text_type, + "last_flapped": float, + "speed": int, + "mac_address": text_type, } -lldp_neighbors = { - 'hostname': text_type, - 'port': text_type, -} +lldp_neighbors = {"hostname": text_type, "port": text_type} interface_counters = { - 'tx_errors': int, - 'rx_errors': int, - 'tx_discards': int, - 'rx_discards': int, - 'tx_octets': int, - 'rx_octets': int, - 'tx_unicast_packets': int, - 'rx_unicast_packets': int, - 'tx_multicast_packets': int, - 'rx_multicast_packets': int, - 'tx_broadcast_packets': int, - 'rx_broadcast_packets': int, + "tx_errors": int, + "rx_errors": int, + "tx_discards": int, + "rx_discards": int, + "tx_octets": int, + "rx_octets": int, + "tx_unicast_packets": int, + "rx_unicast_packets": int, + "tx_multicast_packets": int, + "rx_multicast_packets": int, + "tx_broadcast_packets": int, + "rx_broadcast_packets": int, } -temperature = { - 'is_alert': bool, - 'is_critical': bool, - 'temperature': float, -} +temperature = {"is_alert": bool, "is_critical": bool, "temperature": float} -power = { - 'status': bool, - 'output': float, - 'capacity': float -} +power = {"status": bool, "output": float, "capacity": float} -memory = { - 'used_ram': int, - 'available_ram': int, -} +memory = {"used_ram": int, "available_ram": int} -fan = { - 'status': bool, -} +fan = {"status": bool} -cpu = { - '%usage': float, -} +cpu = {"%usage": float} peer = { - 'is_enabled': bool, - 'uptime': int, - 'remote_as': int, - 'description': text_type, - 'remote_id': text_type, - 'local_as': int, - 'is_up': bool, - 'address_family': dict, + "is_enabled": bool, + "uptime": int, + "remote_as": int, + "description": text_type, + "remote_id": text_type, + "local_as": int, + "is_up": bool, + "address_family": dict, } -af = { - 'sent_prefixes': int, - 'accepted_prefixes': int, - 'received_prefixes': int -} +af = {"sent_prefixes": int, "accepted_prefixes": int, "received_prefixes": int} lldp_neighbors_detail = { - 'parent_interface': text_type, - 'remote_port': text_type, - 'remote_chassis_id': text_type, - 'remote_port_description': text_type, - 'remote_system_name': text_type, - 'remote_system_description': text_type, - 'remote_system_capab': text_type, - 'remote_system_enable_capab': text_type + "parent_interface": text_type, + "remote_port": text_type, + "remote_chassis_id": text_type, + "remote_port_description": text_type, + "remote_system_name": text_type, + "remote_system_description": text_type, + "remote_system_capab": text_type, + "remote_system_enable_capab": text_type, } bgp_config_group = { - 'type': text_type, - 'description': text_type, - 'apply_groups': list, - 'multihop_ttl': int, - 'multipath': bool, - 'local_address': text_type, - 'local_as': int, - 'remote_as': int, - 'import_policy': text_type, - 'export_policy': text_type, - 'remove_private_as': bool, - 'prefix_limit': dict, - 'neighbors': dict + "type": text_type, + "description": text_type, + "apply_groups": list, + "multihop_ttl": int, + "multipath": bool, + "local_address": text_type, + "local_as": int, + "remote_as": int, + "import_policy": text_type, + "export_policy": text_type, + "remove_private_as": bool, + "prefix_limit": dict, + "neighbors": dict, } bgp_config_neighbor = { - 'description': text_type, - 'import_policy': text_type, - 'export_policy': text_type, - 'local_address': text_type, - 'authentication_key': text_type, - 'nhs': bool, - 'route_reflector_client': bool, - 'local_as': int, - 'remote_as': int, - 'prefix_limit': dict + "description": text_type, + "import_policy": text_type, + "export_policy": text_type, + "local_address": text_type, + "authentication_key": text_type, + "nhs": bool, + "route_reflector_client": bool, + "local_as": int, + "remote_as": int, + "prefix_limit": dict, } peer_details = { - 'up': bool, - 'local_as': int, - 'remote_as': int, - 'router_id': text_type, - 'local_address': text_type, - 'routing_table': text_type, - 'local_address_configured': bool, - 'local_port': int, - 'remote_address': text_type, - 'remote_port': int, - 'multihop': bool, - 'multipath': bool, - 'remove_private_as': bool, - 'import_policy': text_type, - 'export_policy': text_type, - 'input_messages': int, - 'output_messages': int, - 'input_updates': int, - 'output_updates': int, - 'messages_queued_out': int, - 'connection_state': text_type, - 'previous_connection_state': text_type, - 'last_event': text_type, - 'suppress_4byte_as': bool, - 'local_as_prepend': bool, - 'holdtime': int, - 'configured_holdtime': int, - 'keepalive': int, - 'configured_keepalive': int, - 'active_prefix_count': int, - 'received_prefix_count': int, - 'accepted_prefix_count': int, - 'suppressed_prefix_count': int, - 'advertised_prefix_count': int, - 'flap_count': int -} - -arp_table = { - 'interface': text_type, - 'mac': text_type, - 'ip': text_type, - 'age': float -} + "up": bool, + "local_as": int, + "remote_as": int, + "router_id": text_type, + "local_address": text_type, + "routing_table": text_type, + "local_address_configured": bool, + "local_port": int, + "remote_address": text_type, + "remote_port": int, + "multihop": bool, + "multipath": bool, + "remove_private_as": bool, + "import_policy": text_type, + "export_policy": text_type, + "input_messages": int, + "output_messages": int, + "input_updates": int, + "output_updates": int, + "messages_queued_out": int, + "connection_state": text_type, + "previous_connection_state": text_type, + "last_event": text_type, + "suppress_4byte_as": bool, + "local_as_prepend": bool, + "holdtime": int, + "configured_holdtime": int, + "keepalive": int, + "configured_keepalive": int, + "active_prefix_count": int, + "received_prefix_count": int, + "accepted_prefix_count": int, + "suppressed_prefix_count": int, + "advertised_prefix_count": int, + "flap_count": int, +} + +arp_table = {"interface": text_type, "mac": text_type, "ip": text_type, "age": float} ipv6_neighbor = { - 'interface': text_type, - 'mac': text_type, - 'ip': text_type, - 'age': float, - 'state': text_type + "interface": text_type, + "mac": text_type, + "ip": text_type, + "age": float, + "state": text_type, } ntp_peer = { @@ -190,153 +160,124 @@ } ntp_stats = { - 'remote': text_type, - 'referenceid': text_type, - 'synchronized': bool, - 'stratum': int, - 'type': text_type, - 'when': text_type, - 'hostpoll': int, - 'reachability': int, - 'delay': float, - 'offset': float, - 'jitter': float + "remote": text_type, + "referenceid": text_type, + "synchronized": bool, + "stratum": int, + "type": text_type, + "when": text_type, + "hostpoll": int, + "reachability": int, + "delay": float, + "offset": float, + "jitter": float, } -interfaces_ip = { - 'prefix_length': int -} +interfaces_ip = {"prefix_length": int} mac_address_table = { - 'mac': text_type, - 'interface': text_type, - 'vlan': int, - 'static': bool, - 'active': bool, - 'moves': int, - 'last_move': float + "mac": text_type, + "interface": text_type, + "vlan": int, + "static": bool, + "active": bool, + "moves": int, + "last_move": float, } route = { - 'protocol': text_type, - 'current_active': bool, - 'last_active': bool, - 'age': int, - 'next_hop': text_type, - 'outgoing_interface': text_type, - 'selected_next_hop': bool, - 'preference': int, - 'inactive_reason': text_type, - 'routing_table': text_type, - 'protocol_attributes': dict + "protocol": text_type, + "current_active": bool, + "last_active": bool, + "age": int, + "next_hop": text_type, + "outgoing_interface": text_type, + "selected_next_hop": bool, + "preference": int, + "inactive_reason": text_type, + "routing_table": text_type, + "protocol_attributes": dict, } snmp = { - 'chassis_id': text_type, - 'community': dict, - 'contact': text_type, - 'location': text_type + "chassis_id": text_type, + "community": dict, + "contact": text_type, + "location": text_type, } -snmp_community = { - 'acl': text_type, - 'mode': text_type, -} +snmp_community = {"acl": text_type, "mode": text_type} probe_test = { - 'probe_type': text_type, - 'target': text_type, - 'source': text_type, - 'probe_count': int, - 'test_interval': int + "probe_type": text_type, + "target": text_type, + "source": text_type, + "probe_count": int, + "test_interval": int, } probe_test_results = { - 'target': text_type, - 'source': text_type, - 'probe_type': text_type, - 'probe_count': int, - 'rtt': float, - 'round_trip_jitter': float, - 'last_test_loss': int, - 'current_test_min_delay': float, - 'current_test_max_delay': float, - 'current_test_avg_delay': float, - 'last_test_min_delay': float, - 'last_test_max_delay': float, - 'last_test_avg_delay': float, - 'global_test_min_delay': float, - 'global_test_max_delay': float, - 'global_test_avg_delay': float + "target": text_type, + "source": text_type, + "probe_type": text_type, + "probe_count": int, + "rtt": float, + "round_trip_jitter": float, + "last_test_loss": int, + "current_test_min_delay": float, + "current_test_max_delay": float, + "current_test_avg_delay": float, + "last_test_min_delay": float, + "last_test_max_delay": float, + "last_test_avg_delay": float, + "global_test_min_delay": float, + "global_test_max_delay": float, + "global_test_avg_delay": float, } ping = { - 'probes_sent': int, - 'packet_loss': int, - 'rtt_min': float, - 'rtt_max': float, - 'rtt_avg': float, - 'rtt_stddev': float, - 'results': list + "probes_sent": int, + "packet_loss": int, + "rtt_min": float, + "rtt_max": float, + "rtt_avg": float, + "rtt_stddev": float, + "results": list, } -ping_result = { - 'ip_address': text_type, - 'rtt': float -} +ping_result = {"ip_address": text_type, "rtt": float} -traceroute = { - 'rtt': float, - 'ip_address': text_type, - 'host_name': text_type -} +traceroute = {"rtt": float, "ip_address": text_type, "host_name": text_type} -users = { - 'level': int, - 'password': text_type, - 'sshkeys': list -} +users = {"level": int, "password": text_type, "sshkeys": list} -optics_state = { - 'instant': float, - 'avg': float, - 'min': float, - 'max': float -} +optics_state = {"instant": float, "avg": float, "min": float, "max": float} -config = { - 'running': text_type, - 'startup': text_type, - 'candidate': text_type, -} +config = {"running": text_type, "startup": text_type, "candidate": text_type} network_instance = { - 'name': text_type, - 'type': text_type, - 'state': dict, - 'interfaces': dict, + "name": text_type, + "type": text_type, + "state": dict, + "interfaces": dict, } -network_instance_state = { - 'route_distinguisher': text_type, -} +network_instance_state = {"route_distinguisher": text_type} -network_instance_interfaces = { - 'interface': dict, -} +network_instance_interfaces = {"interface": dict} firewall_policies = { - 'position': int, - 'packet_hits': int, - 'byte_hits': int, - 'id': text_type, - 'enabled': bool, - 'schedule': text_type, - 'log': text_type, - 'l3_src': text_type, - 'l3_dst': text_type, - 'service': text_type, - 'src_zone': text_type, - 'dst_zone': text_type, - 'action': text_type + "position": int, + "packet_hits": int, + "byte_hits": int, + "id": text_type, + "enabled": bool, + "schedule": text_type, + "log": text_type, + "l3_src": text_type, + "l3_dst": text_type, + "service": text_type, + "src_zone": text_type, + "dst_zone": text_type, + "action": text_type, } diff --git a/napalm/base/utils/jinja_filters.py b/napalm/base/utils/jinja_filters.py index bcc645d00..e4fde534e 100644 --- a/napalm/base/utils/jinja_filters.py +++ b/napalm/base/utils/jinja_filters.py @@ -10,9 +10,9 @@ class CustomJinjaFilters(object): def filters(cls): """Return jinja2 filters that this module provide.""" return { - 'oc_attr_isdefault': oc_attr_isdefault, - 'openconfig_to_cisco_af': openconfig_to_cisco_af, - 'openconfig_to_eos_af': openconfig_to_eos_af, + "oc_attr_isdefault": oc_attr_isdefault, + "openconfig_to_cisco_af": openconfig_to_cisco_af, + "openconfig_to_eos_af": openconfig_to_eos_af, } @@ -31,12 +31,12 @@ def openconfig_to_cisco_af(value): value = value.split(":")[1] mapd = { - 'IPV4_UNICAST': "ipv4 unicast", - 'IPV6_UNICAST': "ipv6 unicast", - 'IPV4_LABELED_UNICAST': "ipv4 unicast", - 'IPV6_LABELED_UNICAST': "ipv6 unicast", - 'L3VPN_IPV4_UNICAST': "vpnv4", - 'L3VPN_IPV6_UNICAST': "vpnv6", + "IPV4_UNICAST": "ipv4 unicast", + "IPV6_UNICAST": "ipv6 unicast", + "IPV4_LABELED_UNICAST": "ipv4 unicast", + "IPV6_LABELED_UNICAST": "ipv6 unicast", + "L3VPN_IPV4_UNICAST": "vpnv4", + "L3VPN_IPV6_UNICAST": "vpnv6", } return mapd[value] @@ -46,8 +46,5 @@ def openconfig_to_eos_af(value): if ":" in value: value = value.split(":")[1] - mapd = { - 'IPV4_UNICAST': "ipv4", - 'IPV6_UNICAST': "ipv6", - } + mapd = {"IPV4_UNICAST": "ipv4", "IPV6_UNICAST": "ipv6"} return mapd[value] diff --git a/napalm/base/utils/string_parsers.py b/napalm/base/utils/string_parsers.py index af68bd848..0ddf0d102 100644 --- a/napalm/base/utils/string_parsers.py +++ b/napalm/base/utils/string_parsers.py @@ -14,7 +14,7 @@ def convert(text): def alphanum_key(key): """ split on end numbers.""" - return [convert(c) for c in re.split('([0-9]+)', key)] + return [convert(c) for c in re.split("([0-9]+)", key)] def sorted_nicely(sort_me): @@ -22,8 +22,8 @@ def sorted_nicely(sort_me): return sorted(sort_me, key=alphanum_key) -def colon_separated_string_to_dict(string, separator=':'): - ''' +def colon_separated_string_to_dict(string, separator=":"): + """ Converts a string in the format: Name: Et3 @@ -40,56 +40,65 @@ def colon_separated_string_to_dict(string, separator=':'): into a dictionary - ''' + """ dictionary = dict() for line in string.splitlines(): line_data = line.split(separator) if len(line_data) > 1: - dictionary[line_data[0].strip()] = ''.join(line_data[1:]).strip() + dictionary[line_data[0].strip()] = "".join(line_data[1:]).strip() elif len(line_data) == 1: dictionary[line_data[0].strip()] = None else: - raise Exception('Something went wrong parsing the colo separated string {}' - .format(line)) + raise Exception( + "Something went wrong parsing the colo separated string {}".format(line) + ) return dictionary def hyphen_range(string): - ''' + """ Expands a string of numbers separated by commas and hyphens into a list of integers. For example: 2-3,5-7,20-21,23,100-200 - ''' + """ list_numbers = list() - temporary_list = string.split(',') + temporary_list = string.split(",") for element in temporary_list: - sub_element = element.split('-') + sub_element = element.split("-") if len(sub_element) == 1: list_numbers.append(int(sub_element[0])) elif len(sub_element) == 2: - for number in range(int(sub_element[0]), int(sub_element[1])+1): + for number in range(int(sub_element[0]), int(sub_element[1]) + 1): list_numbers.append(number) else: - raise Exception('Something went wrong expanding the range {}'.format(string)) + raise Exception( + "Something went wrong expanding the range {}".format(string) + ) return list_numbers def convert_uptime_string_seconds(uptime): - '''Convert uptime strings to seconds. The string can be formatted various ways.''' + """Convert uptime strings to seconds. The string can be formatted various ways.""" regex_list = [ # n years, n weeks, n days, n hours, n minutes where each of the fields except minutes # is optional. Additionally, can be either singular or plural - (r"((?P\d+) year(s)?,\s+)?((?P\d+) week(s)?,\s+)?" - r"((?P\d+) day(s)?,\s+)?((?P\d+) " - r"hour(s)?,\s+)?((?P\d+) minute(s)?)"), + ( + r"((?P\d+) year(s)?,\s+)?((?P\d+) week(s)?,\s+)?" + r"((?P\d+) day(s)?,\s+)?((?P\d+) " + r"hour(s)?,\s+)?((?P\d+) minute(s)?)" + ), # n days, HH:MM:SS where each field is required (except for days) - (r"((?P\d+) day(s)?,\s+)?" - r"((?P\d+)):((?P\d+)):((?P\d+))"), + ( + r"((?P\d+) day(s)?,\s+)?" + r"((?P\d+)):((?P\d+)):((?P\d+))" + ), # 7w6d5h4m3s where each field is optional - (r"((?P\d+)w)?((?P\d+)d)?((?P\d+)h)?" - r"((?P\d+)m)?((?P\d+)s)?"), + ( + r"((?P\d+)w)?((?P\d+)d)?((?P\d+)h)?" + r"((?P\d+)m)?((?P\d+)s)?" + ), ] regex_list = [re.compile(x) for x in regex_list] @@ -103,22 +112,24 @@ def convert_uptime_string_seconds(uptime): uptime_seconds = 0 for unit, value in uptime_dict.items(): if value is not None: - if unit == 'years': + if unit == "years": uptime_seconds += int(value) * 31536000 - elif unit == 'weeks': + elif unit == "weeks": uptime_seconds += int(value) * 604800 - elif unit == 'days': + elif unit == "days": uptime_seconds += int(value) * 86400 - elif unit == 'hours': + elif unit == "hours": uptime_seconds += int(value) * 3600 - elif unit == 'minutes': + elif unit == "minutes": uptime_seconds += int(value) * 60 - elif unit == 'seconds': + elif unit == "seconds": uptime_seconds += int(value) else: - raise Exception('Unrecognized unit "{}" in uptime:{}'.format(unit, uptime)) + raise Exception( + 'Unrecognized unit "{}" in uptime:{}'.format(unit, uptime) + ) if not uptime_dict: - raise Exception('Unrecognized uptime string:{}'.format(uptime)) + raise Exception("Unrecognized uptime string:{}".format(uptime)) return uptime_seconds diff --git a/napalm/base/validate.py b/napalm/base/validate.py index fa4f56f9d..90405fd98 100644 --- a/napalm/base/validate.py +++ b/napalm/base/validate.py @@ -20,7 +20,7 @@ def _get_validation_file(validation_file): try: - with open(validation_file, 'r') as stream: + with open(validation_file, "r") as stream: try: validation_source = yaml.safe_load(stream) except yaml.YAMLError as exc: @@ -31,7 +31,7 @@ def _get_validation_file(validation_file): def _mode(mode_string): - mode = {'strict': False} + mode = {"strict": False} for m in mode_string.split(): if m not in mode.keys(): @@ -49,8 +49,12 @@ def _compare_getter_list(src, dst, mode): while True: try: intermediate_match = compare(src_element, dst[i]) - if isinstance(intermediate_match, dict) and intermediate_match["complies"] or \ - not isinstance(intermediate_match, dict) and intermediate_match: + if ( + isinstance(intermediate_match, dict) + and intermediate_match["complies"] + or not isinstance(intermediate_match, dict) + and intermediate_match + ): found = True result["present"].append(src_element) dst.pop(i) @@ -87,7 +91,7 @@ def _compare_getter_dict(src, dst, mode): complies = intermediate_result["complies"] if not complies: - result["present"][key]['diff'] = intermediate_result + result["present"][key]["diff"] = intermediate_result else: complies = intermediate_result nested = False @@ -116,17 +120,17 @@ def compare(src, dst): src = py23_compat.text_type(src) if isinstance(src, dict): - mode = _mode(src.pop('_mode', '')) - if 'list' in src.keys(): + mode = _mode(src.pop("_mode", "")) + if "list" in src.keys(): if not isinstance(dst, list): # This can happen with nested lists return False - return _compare_getter_list(src['list'], dst, mode) + return _compare_getter_list(src["list"], dst, mode) return _compare_getter_dict(src, dst, mode) elif isinstance(src, py23_compat.string_types): - if src.startswith('<') or src.startswith('>'): + if src.startswith("<") or src.startswith(">"): cmp_result = _compare_numeric(src, dst) return cmp_result else: @@ -136,11 +140,13 @@ def compare(src, dst): else: return src == dst - elif(type(src) == type(dst) == list): + elif type(src) == type(dst) == list: pairs = zip(src, dst) - diff_lists = [[(k, x[k], y[k]) - for k in x if not re.search(x[k], y[k])] - for x, y in pairs if x != y] + diff_lists = [ + [(k, x[k], y[k]) for k in x if not re.search(x[k], y[k])] + for x, y in pairs + if x != y + ] return empty_tree(diff_lists) else: @@ -153,7 +159,9 @@ def _compare_numeric(src_num, dst_num): match = numeric_compare_regex.match(src_num) if not match: - error = "Failed numeric comparison. Collected: {}. Expected: {}".format(dst_num, src_num) + error = "Failed numeric comparison. Collected: {}. Expected: {}".format( + dst_num, src_num + ) raise ValueError(error) operand = { @@ -192,7 +200,7 @@ def compliance_report(cls, validation_file=None, validation_source=None): key = expected_results.pop("_name", "") or getter try: - kwargs = expected_results.pop('_kwargs', {}) + kwargs = expected_results.pop("_kwargs", {}) actual_results = getattr(cls, getter)(**kwargs) report[key] = compare(expected_results, actual_results) except NotImplementedError: diff --git a/napalm/eos/__init__.py b/napalm/eos/__init__.py index a05d8ae42..032ff1787 100644 --- a/napalm/eos/__init__.py +++ b/napalm/eos/__init__.py @@ -15,4 +15,4 @@ """napalm.eos package.""" from napalm.eos.eos import EOSDriver -__all__ = ('EOSDriver',) +__all__ = ("EOSDriver",) diff --git a/napalm/eos/eos.py b/napalm/eos/eos.py index 2b8ce2ea2..f68922a63 100644 --- a/napalm/eos/eos.py +++ b/napalm/eos/eos.py @@ -40,10 +40,16 @@ from napalm.base.base import NetworkDriver from napalm.base.utils import string_parsers from napalm.base.utils import py23_compat -from napalm.base.exceptions import ConnectionException, MergeConfigException, \ - ReplaceConfigException, SessionLockedException, CommandErrorException +from napalm.base.exceptions import ( + ConnectionException, + MergeConfigException, + ReplaceConfigException, + SessionLockedException, + CommandErrorException, +) import napalm.base.constants as c + # local modules # here add local imports # e.g. import napalm.eos.helpers etc. @@ -58,17 +64,26 @@ class EOSDriver(NetworkDriver): ("banner login", 1), ("banner motd", 1), ("comment", 1), - ("protocol https certificate", 2) + ("protocol https certificate", 2), ] - _RE_BGP_INFO = re.compile(r'BGP neighbor is (?P.*?), remote AS (?P.*?), .*') # noqa - _RE_BGP_RID_INFO = re.compile(r'.*BGP version 4, remote router ID (?P.*?), VRF (?P.*?)$') # noqa - _RE_BGP_DESC = re.compile(r'\s+Description: (?P.*?)$') - _RE_BGP_LOCAL = re.compile(r'Local AS is (?P.*?),.*') - _RE_BGP_PREFIX = re.compile(r'(\s*?)(?PIPv[46]) Unicast:\s*(?P\d+)\s*(?P\d+)') # noqa - _RE_SNMP_COMM = re.compile(r"""^snmp-server\s+community\s+(?P\S+) + _RE_BGP_INFO = re.compile( + r"BGP neighbor is (?P.*?), remote AS (?P.*?), .*" + ) # noqa + _RE_BGP_RID_INFO = re.compile( + r".*BGP version 4, remote router ID (?P.*?), VRF (?P.*?)$" + ) # noqa + _RE_BGP_DESC = re.compile(r"\s+Description: (?P.*?)$") + _RE_BGP_LOCAL = re.compile(r"Local AS is (?P.*?),.*") + _RE_BGP_PREFIX = re.compile( + r"(\s*?)(?PIPv[46]) Unicast:\s*(?P\d+)\s*(?P\d+)" + ) # noqa + _RE_SNMP_COMM = re.compile( + r"""^snmp-server\s+community\s+(?P\S+) (\s+view\s+(?P\S+))?(\s+(?Pro|rw)?) - (\s+ipv6\s+(?P\S+))?(\s+(?P\S+))?$""", re.VERBOSE) + (\s+ipv6\s+(?P\S+))?(\s+(?P\S+))?$""", + re.VERBOSE, + ) def __init__(self, hostname, username, password, timeout=60, optional_args=None): """Constructor.""" @@ -84,43 +99,47 @@ def __init__(self, hostname, username, password, timeout=60, optional_args=None) optional_args = {} # eos_transport is there for backwards compatibility, transport is the preferred method - self.transport = optional_args.get('transport', optional_args.get('eos_transport', 'https')) + self.transport = optional_args.get( + "transport", optional_args.get("eos_transport", "https") + ) - if self.transport == 'https': - self.port = optional_args.get('port', 443) - elif self.transport == 'http': - self.port = optional_args.get('port', 80) + if self.transport == "https": + self.port = optional_args.get("port", 443) + elif self.transport == "http": + self.port = optional_args.get("port", 80) - self.enablepwd = optional_args.get('enable_password', '') + self.enablepwd = optional_args.get("enable_password", "") self.platform = "eos" self.profile = [self.platform] - self.eos_autoComplete = optional_args.get('eos_autoComplete', None) + self.eos_autoComplete = optional_args.get("eos_autoComplete", None) def open(self): """Implementation of NAPALM method open.""" try: - if self.transport in ('http', 'https'): + if self.transport in ("http", "https"): connection = pyeapi.client.connect( transport=self.transport, host=self.hostname, username=self.username, password=self.password, port=self.port, - timeout=self.timeout + timeout=self.timeout, ) - elif self.transport == 'socket': + elif self.transport == "socket": connection = pyeapi.client.connect(transport=self.transport) else: - raise ConnectionException("Unknown transport: {}".format(self.transport)) + raise ConnectionException( + "Unknown transport: {}".format(self.transport) + ) if self.device is None: self.device = pyeapi.client.Node(connection, enablepwd=self.enablepwd) # does not raise an Exception if unusable # let's try to run a very simple command - self.device.run_commands(['show clock'], encoding='text') + self.device.run_commands(["show clock"], encoding="text") except ConnectionError as ce: # and this is raised either if device not avaiable # either if HTTP(S) agent is not enabled @@ -132,16 +151,18 @@ def close(self): self.discard_config() def is_alive(self): - return { - 'is_alive': True # always true as eAPI is HTTP-based - } + return {"is_alive": True} # always true as eAPI is HTTP-based def _lock(self): if self.config_session is None: - self.config_session = 'napalm_{}'.format(datetime.now().microsecond) - sess = self.device.run_commands(['show configuration sessions'])[0]['sessions'] - if [k for k, v in sess.items() if v['state'] == 'pending' and k != self.config_session]: - raise SessionLockedException('Session is already in use') + self.config_session = "napalm_{}".format(datetime.now().microsecond) + sess = self.device.run_commands(["show configuration sessions"])[0]["sessions"] + if [ + k + for k, v in sess.items() + if v["state"] == "pending" and k != self.config_session + ]: + raise SessionLockedException("Session is already in use") @staticmethod def _multiline_convert(config, start="banner login", end="EOF", depth=1): @@ -155,8 +176,8 @@ def _multiline_convert(config, start="banner login", end="EOF", depth=1): depth = depth - 1 except ValueError: # Couldn't find end, abort return ret - ret[s] = {'cmd': ret[s], 'input': "\n".join(ret[s+1:e])} - del ret[s + 1:e + 1] + ret[s] = {"cmd": ret[s], "input": "\n".join(ret[s + 1 : e])} + del ret[s + 1 : e + 1] return ret @@ -181,15 +202,25 @@ def _mode_comment_convert(commands): comment_count = 0 for idx, element in enumerate(commands): # Check first for stringiness, as we may have dicts in the command list already - if isinstance(element, py23_compat.string_types) and element.startswith('!!'): + if isinstance(element, py23_compat.string_types) and element.startswith( + "!!" + ): comment_count += 1 continue else: if comment_count > 0: # append the previous comment - ret.append({"cmd": "comment", - "input": "\n".join(map(lambda s: s.lstrip("! "), - commands[idx - comment_count:idx]))}) + ret.append( + { + "cmd": "comment", + "input": "\n".join( + map( + lambda s: s.lstrip("! "), + commands[idx - comment_count : idx], + ) + ), + } + ) comment_count = 0 ret.append(element) @@ -199,12 +230,12 @@ def _load_config(self, filename=None, config=None, replace=True): commands = [] self._lock() - commands.append('configure session {}'.format(self.config_session)) + commands.append("configure session {}".format(self.config_session)) if replace: - commands.append('rollback clean-config') + commands.append("rollback clean-config") if filename is not None: - with open(filename, 'r') as f: + with open(filename, "r") as f: lines = f.readlines() else: if isinstance(config, list): @@ -214,13 +245,15 @@ def _load_config(self, filename=None, config=None, replace=True): for line in lines: line = line.strip() - if line == '': + if line == "": continue - if line.startswith('!') and not line.startswith('!!'): + if line.startswith("!") and not line.startswith("!!"): continue commands.append(line) - for start, depth in [(s, d) for (s, d) in self.HEREDOC_COMMANDS if s in commands]: + for start, depth in [ + (s, d) for (s, d) in self.HEREDOC_COMMANDS if s in commands + ]: commands = self._multiline_convert(commands, start=start, depth=depth) commands = self._mode_comment_convert(commands) @@ -249,24 +282,26 @@ def load_merge_candidate(self, filename=None, config=None): def compare_config(self): """Implementation of NAPALM method compare_config.""" if self.config_session is None: - return '' + return "" else: - commands = ['show session-config named %s diffs' % self.config_session] - result = self.device.run_commands(commands, encoding='text')[0]['output'] + commands = ["show session-config named %s diffs" % self.config_session] + result = self.device.run_commands(commands, encoding="text")[0]["output"] - result = '\n'.join(result.splitlines()[2:]) + result = "\n".join(result.splitlines()[2:]) return result.strip() def commit_config(self, message=""): """Implementation of NAPALM method commit_config.""" if message: - raise NotImplementedError('Commit message not implemented for this platform') + raise NotImplementedError( + "Commit message not implemented for this platform" + ) commands = [ - 'copy startup-config flash:rollback-0', - 'configure session {}'.format(self.config_session), - 'commit', - 'write memory' + "copy startup-config flash:rollback-0", + "configure session {}".format(self.config_session), + "commit", + "write memory", ] self.device.run_commands(commands) @@ -275,116 +310,115 @@ def commit_config(self, message=""): def discard_config(self): """Implementation of NAPALM method discard_config.""" if self.config_session is not None: - commands = ['configure session {}'.format(self.config_session), 'abort'] + commands = ["configure session {}".format(self.config_session), "abort"] self.device.run_commands(commands) self.config_session = None def rollback(self): """Implementation of NAPALM method rollback.""" - commands = ['configure replace flash:rollback-0', 'write memory'] + commands = ["configure replace flash:rollback-0", "write memory"] self.device.run_commands(commands) def get_facts(self): """Implementation of NAPALM method get_facts.""" - commands = ['show version', 'show hostname', 'show interfaces'] + commands = ["show version", "show hostname", "show interfaces"] result = self.device.run_commands(commands) version = result[0] hostname = result[1] - interfaces_dict = result[2]['interfaces'] + interfaces_dict = result[2]["interfaces"] - uptime = time.time() - version['bootupTimestamp'] + uptime = time.time() - version["bootupTimestamp"] - interfaces = [i for i in interfaces_dict.keys() if '.' not in i] + interfaces = [i for i in interfaces_dict.keys() if "." not in i] interfaces = string_parsers.sorted_nicely(interfaces) return { - 'hostname': hostname['hostname'], - 'fqdn': hostname['fqdn'], - 'vendor': u'Arista', - 'model': version['modelName'], - 'serial_number': version['serialNumber'], - 'os_version': version['internalVersion'], - 'uptime': int(uptime), - 'interface_list': interfaces, + "hostname": hostname["hostname"], + "fqdn": hostname["fqdn"], + "vendor": "Arista", + "model": version["modelName"], + "serial_number": version["serialNumber"], + "os_version": version["internalVersion"], + "uptime": int(uptime), + "interface_list": interfaces, } def get_interfaces(self): - commands = ['show interfaces'] + commands = ["show interfaces"] output = self.device.run_commands(commands)[0] interfaces = {} - for interface, values in output['interfaces'].items(): + for interface, values in output["interfaces"].items(): interfaces[interface] = {} - if values['lineProtocolStatus'] == 'up': - interfaces[interface]['is_up'] = True - interfaces[interface]['is_enabled'] = True + if values["lineProtocolStatus"] == "up": + interfaces[interface]["is_up"] = True + interfaces[interface]["is_enabled"] = True else: - interfaces[interface]['is_up'] = False - if values['interfaceStatus'] == 'disabled': - interfaces[interface]['is_enabled'] = False + interfaces[interface]["is_up"] = False + if values["interfaceStatus"] == "disabled": + interfaces[interface]["is_enabled"] = False else: - interfaces[interface]['is_enabled'] = True + interfaces[interface]["is_enabled"] = True - interfaces[interface]['description'] = values['description'] + interfaces[interface]["description"] = values["description"] - interfaces[interface]['last_flapped'] = values.pop('lastStatusChangeTimestamp', None) + interfaces[interface]["last_flapped"] = values.pop( + "lastStatusChangeTimestamp", None + ) - interfaces[interface]['speed'] = int(values['bandwidth'] * 1e-6) - interfaces[interface]['mac_address'] = napalm.base.helpers.convert( - napalm.base.helpers.mac, values.pop('physicalAddress', u'')) + interfaces[interface]["speed"] = int(values["bandwidth"] * 1e-6) + interfaces[interface]["mac_address"] = napalm.base.helpers.convert( + napalm.base.helpers.mac, values.pop("physicalAddress", "") + ) return interfaces def get_lldp_neighbors(self): - commands = ['show lldp neighbors'] - output = self.device.run_commands(commands)[0]['lldpNeighbors'] + commands = ["show lldp neighbors"] + output = self.device.run_commands(commands)[0]["lldpNeighbors"] lldp = {} for n in output: - if n['port'] not in lldp.keys(): - lldp[n['port']] = [] + if n["port"] not in lldp.keys(): + lldp[n["port"]] = [] - lldp[n['port']].append( - { - 'hostname': n['neighborDevice'], - 'port': n['neighborPort'], - } + lldp[n["port"]].append( + {"hostname": n["neighborDevice"], "port": n["neighborPort"]} ) return lldp def get_interfaces_counters(self): - commands = ['show interfaces'] + commands = ["show interfaces"] output = self.device.run_commands(commands) interface_counters = defaultdict(dict) - for interface, data in output[0]['interfaces'].items(): - if data['hardware'] == 'subinterface': + for interface, data in output[0]["interfaces"].items(): + if data["hardware"] == "subinterface": # Subinterfaces will never have counters so no point in parsing them at all continue - counters = data.get('interfaceCounters', {}) + counters = data.get("interfaceCounters", {}) interface_counters[interface].update( - tx_octets=counters.get('outOctets', -1), - rx_octets=counters.get('inOctets', -1), - tx_unicast_packets=counters.get('outUcastPkts', -1), - rx_unicast_packets=counters.get('inUcastPkts', -1), - tx_multicast_packets=counters.get('outMulticastPkts', -1), - rx_multicast_packets=counters.get('inMulticastPkts', -1), - tx_broadcast_packets=counters.get('outBroadcastPkts', -1), - rx_broadcast_packets=counters.get('inBroadcastPkts', -1), - tx_discards=counters.get('outDiscards', -1), - rx_discards=counters.get('inDiscards', -1), - tx_errors=counters.get('totalOutErrors', -1), - rx_errors=counters.get('totalInErrors', -1) + tx_octets=counters.get("outOctets", -1), + rx_octets=counters.get("inOctets", -1), + tx_unicast_packets=counters.get("outUcastPkts", -1), + rx_unicast_packets=counters.get("inUcastPkts", -1), + tx_multicast_packets=counters.get("outMulticastPkts", -1), + rx_multicast_packets=counters.get("inMulticastPkts", -1), + tx_broadcast_packets=counters.get("outBroadcastPkts", -1), + rx_broadcast_packets=counters.get("inBroadcastPkts", -1), + tx_discards=counters.get("outDiscards", -1), + rx_discards=counters.get("inDiscards", -1), + tx_errors=counters.get("totalOutErrors", -1), + rx_errors=counters.get("totalInErrors", -1), ) return interface_counters def get_bgp_neighbors(self): - def get_re_group(res, key, default=None): """ Small helper to retrive data from re match groups""" try: @@ -392,13 +426,15 @@ def get_re_group(res, key, default=None): except KeyError: return default - NEIGHBOR_FILTER = 'bgp neighbors vrf all | include remote AS | remote router ID |IPv[46] Unicast:.*[0-9]+|^Local AS|Desc|BGP state' # noqa + NEIGHBOR_FILTER = "bgp neighbors vrf all | include remote AS | remote router ID |IPv[46] Unicast:.*[0-9]+|^Local AS|Desc|BGP state" # noqa output_summary_cmds = self.device.run_commands( - ['show ipv6 bgp summary vrf all', 'show ip bgp summary vrf all'], - encoding='json') + ["show ipv6 bgp summary vrf all", "show ip bgp summary vrf all"], + encoding="json", + ) output_neighbor_cmds = self.device.run_commands( - ['show ip ' + NEIGHBOR_FILTER, 'show ipv6 ' + NEIGHBOR_FILTER], - encoding='text') + ["show ip " + NEIGHBOR_FILTER, "show ipv6 " + NEIGHBOR_FILTER], + encoding="text", + ) bgp_counters = defaultdict(lambda: dict(peers={})) for summary in output_summary_cmds: @@ -426,21 +462,25 @@ def get_re_group(res, key, default=None): } } """ - for vrf, vrf_data in summary['vrfs'].items(): - bgp_counters[vrf]['router_id'] = vrf_data['routerId'] - for peer, peer_data in vrf_data['peers'].items(): - if peer_data['peerState'] == 'Idle': - is_enabled = True if peer_data['peerStateIdleReason'] != 'Admin' else False + for vrf, vrf_data in summary["vrfs"].items(): + bgp_counters[vrf]["router_id"] = vrf_data["routerId"] + for peer, peer_data in vrf_data["peers"].items(): + if peer_data["peerState"] == "Idle": + is_enabled = ( + True + if peer_data["peerStateIdleReason"] != "Admin" + else False + ) else: is_enabled = True peer_info = { - 'is_up': peer_data['peerState'] == 'Established', - 'is_enabled': is_enabled, - 'uptime': int(time.time() - peer_data['upDownTime']) + "is_up": peer_data["peerState"] == "Established", + "is_enabled": is_enabled, + "uptime": int(time.time() - peer_data["upDownTime"]), } - bgp_counters[vrf]['peers'][napalm.base.helpers.ip(peer)] = peer_info + bgp_counters[vrf]["peers"][napalm.base.helpers.ip(peer)] = peer_info lines = [] - [lines.extend(x['output'].splitlines()) for x in output_neighbor_cmds] + [lines.extend(x["output"].splitlines()) for x in output_neighbor_cmds] while lines: """ Raw output from the command looks like the following: @@ -459,116 +499,122 @@ def get_re_group(res, key, default=None): desc = re.match(self._RE_BGP_DESC, next_line) if desc is None: rid_info = re.match(self._RE_BGP_RID_INFO, next_line) - desc = '' + desc = "" else: rid_info = re.match(self._RE_BGP_RID_INFO, lines.pop(0)) - desc = desc.group('description') + desc = desc.group("description") lines.pop(0) v4_stats = re.match(self._RE_BGP_PREFIX, lines.pop(0)) v6_stats = re.match(self._RE_BGP_PREFIX, lines.pop(0)) local_as = re.match(self._RE_BGP_LOCAL, lines.pop(0)) data = { - 'remote_as': int(neighbor_info.group('as')), - 'remote_id': napalm.base.helpers.ip(get_re_group(rid_info, 'rid', '0.0.0.0')), - 'local_as': int(local_as.group('as')), - 'description': py23_compat.text_type(desc), - 'address_family': { - 'ipv4': { - 'sent_prefixes': int(get_re_group(v4_stats, 'sent', -1)), - 'received_prefixes': int(get_re_group(v4_stats, 'received', -1)), - 'accepted_prefixes': -1 + "remote_as": int(neighbor_info.group("as")), + "remote_id": napalm.base.helpers.ip( + get_re_group(rid_info, "rid", "0.0.0.0") + ), + "local_as": int(local_as.group("as")), + "description": py23_compat.text_type(desc), + "address_family": { + "ipv4": { + "sent_prefixes": int(get_re_group(v4_stats, "sent", -1)), + "received_prefixes": int( + get_re_group(v4_stats, "received", -1) + ), + "accepted_prefixes": -1, }, - 'ipv6': { - 'sent_prefixes': int(get_re_group(v6_stats, 'sent', -1)), - 'received_prefixes': int(get_re_group(v6_stats, 'received', -1)), - 'accepted_prefixes': -1 - } - } + "ipv6": { + "sent_prefixes": int(get_re_group(v6_stats, "sent", -1)), + "received_prefixes": int( + get_re_group(v6_stats, "received", -1) + ), + "accepted_prefixes": -1, + }, + }, } - peer_addr = napalm.base.helpers.ip(neighbor_info.group('neighbor')) - vrf = rid_info.group('vrf') - if peer_addr not in bgp_counters[vrf]['peers']: - bgp_counters[vrf]['peers'][peer_addr] = { - 'is_up': False, # if not found, means it was not found in the oper stats - # i.e. neighbor down, - 'uptime': 0, - 'is_enabled': True + peer_addr = napalm.base.helpers.ip(neighbor_info.group("neighbor")) + vrf = rid_info.group("vrf") + if peer_addr not in bgp_counters[vrf]["peers"]: + bgp_counters[vrf]["peers"][peer_addr] = { + "is_up": False, # if not found, means it was not found in the oper stats + # i.e. neighbor down, + "uptime": 0, + "is_enabled": True, } - bgp_counters[vrf]['peers'][peer_addr].update(data) - if 'default' in bgp_counters: - bgp_counters['global'] = bgp_counters.pop('default') + bgp_counters[vrf]["peers"][peer_addr].update(data) + if "default" in bgp_counters: + bgp_counters["global"] = bgp_counters.pop("default") return dict(bgp_counters) def get_environment(self): def extract_temperature_data(data): for s in data: - temp = s['currentTemperature'] if 'currentTemperature' in s else 0.0 - name = s['name'] + temp = s["currentTemperature"] if "currentTemperature" in s else 0.0 + name = s["name"] values = { - 'temperature': temp, - 'is_alert': temp > s['overheatThreshold'], - 'is_critical': temp > s['criticalThreshold'] + "temperature": temp, + "is_alert": temp > s["overheatThreshold"], + "is_critical": temp > s["criticalThreshold"], } yield name, values - sh_version_out = self.device.run_commands(['show version']) - is_veos = sh_version_out[0]['modelName'].lower() == 'veos' - commands = [ - 'show environment cooling', - 'show environment temperature' - ] + sh_version_out = self.device.run_commands(["show version"]) + is_veos = sh_version_out[0]["modelName"].lower() == "veos" + commands = ["show environment cooling", "show environment temperature"] if not is_veos: - commands.append('show environment power') + commands.append("show environment power") fans_output, temp_output, power_output = self.device.run_commands(commands) else: fans_output, temp_output = self.device.run_commands(commands) - environment_counters = { - 'fans': {}, - 'temperature': {}, - 'power': {}, - 'cpu': {} - } - cpu_output = self.device.run_commands(['show processes top once'], - encoding='text')[0]['output'] - for slot in fans_output['fanTraySlots']: - environment_counters['fans'][slot['label']] = {'status': slot['status'] == 'ok'} + environment_counters = {"fans": {}, "temperature": {}, "power": {}, "cpu": {}} + cpu_output = self.device.run_commands( + ["show processes top once"], encoding="text" + )[0]["output"] + for slot in fans_output["fanTraySlots"]: + environment_counters["fans"][slot["label"]] = { + "status": slot["status"] == "ok" + } # First check FRU's - for fru_type in ['cardSlots', 'powerSupplySlots']: + for fru_type in ["cardSlots", "powerSupplySlots"]: for fru in temp_output[fru_type]: - t = {name: value for name, value in extract_temperature_data(fru['tempSensors'])} - environment_counters['temperature'].update(t) + t = { + name: value + for name, value in extract_temperature_data(fru["tempSensors"]) + } + environment_counters["temperature"].update(t) # On board sensors - parsed = {n: v for n, v in extract_temperature_data(temp_output['tempSensors'])} - environment_counters['temperature'].update(parsed) + parsed = {n: v for n, v in extract_temperature_data(temp_output["tempSensors"])} + environment_counters["temperature"].update(parsed) if not is_veos: - for psu, data in power_output['powerSupplies'].items(): - environment_counters['power'][psu] = { - 'status': data.get('state', 'ok') == 'ok', - 'capacity': data.get('capacity', -1.0), - 'output': data.get('outputPower', -1.0), + for psu, data in power_output["powerSupplies"].items(): + environment_counters["power"][psu] = { + "status": data.get("state", "ok") == "ok", + "capacity": data.get("capacity", -1.0), + "output": data.get("outputPower", -1.0), } cpu_lines = cpu_output.splitlines() # Matches either of # Cpu(s): 5.2%us, 1.4%sy, 0.0%ni, 92.2%id, 0.6%wa, 0.3%hi, 0.4%si, 0.0%st ( 4.16 > ) # %Cpu(s): 4.2 us, 0.9 sy, 0.0 ni, 94.6 id, 0.0 wa, 0.1 hi, 0.2 si, 0.0 st ( 4.16 < ) - m = re.match('.*ni, (?P.*).id.*', cpu_lines[2]) - environment_counters['cpu'][0] = { - '%usage': round(100 - float(m.group('idle')), 1) + m = re.match(".*ni, (?P.*).id.*", cpu_lines[2]) + environment_counters["cpu"][0] = { + "%usage": round(100 - float(m.group("idle")), 1) } # Matches either of # Mem: 3844356k total, 3763184k used, 81172k free, 16732k buffers ( 4.16 > ) # KiB Mem: 32472080 total, 5697604 used, 26774476 free, 372052 buffers ( 4.16 < ) - mem_regex = (r'[^\d]*(?P\d+)[k\s]+total,' - r'\s+(?P\d+)[k\s]+used,' - r'\s+(?P\d+)[k\s]+free,.*') + mem_regex = ( + r"[^\d]*(?P\d+)[k\s]+total," + r"\s+(?P\d+)[k\s]+used," + r"\s+(?P\d+)[k\s]+free,.*" + ) m = re.match(mem_regex, cpu_lines[3]) - environment_counters['memory'] = { - 'available_ram': int(m.group('total')), - 'used_ram': int(m.group('used')) + environment_counters["memory"] = { + "available_ram": int(m.group("total")), + "used_ram": int(m.group("used")), } return environment_counters - def get_lldp_neighbors_detail(self, interface=''): + def get_lldp_neighbors_detail(self, interface=""): lldp_neighbors_out = {} @@ -577,14 +623,18 @@ def get_lldp_neighbors_detail(self, interface=''): filters.append(interface) commands = [ - 'show lldp neighbors {filters} detail'.format(filters=' '.join(filters)) + "show lldp neighbors {filters} detail".format(filters=" ".join(filters)) ] lldp_neighbors_in = {} - lldp_neighbors_in = self.device.run_commands(commands)[0].get('lldpNeighbors', {}) + lldp_neighbors_in = self.device.run_commands(commands)[0].get( + "lldpNeighbors", {} + ) for interface in lldp_neighbors_in: - interface_neighbors = lldp_neighbors_in.get(interface).get('lldpNeighborInfo', {}) + interface_neighbors = lldp_neighbors_in.get(interface).get( + "lldpNeighborInfo", {} + ) if not interface_neighbors: # in case of empty infos continue @@ -593,26 +643,37 @@ def get_lldp_neighbors_detail(self, interface=''): for neighbor in interface_neighbors: if interface not in lldp_neighbors_out.keys(): lldp_neighbors_out[interface] = [] - capabilities = neighbor.get('systemCapabilities', {}) + capabilities = neighbor.get("systemCapabilities", {}) capabilities_list = list(capabilities.keys()) capabilities_list.sort() - remote_chassis_id = neighbor.get('chassisId', u'') - if neighbor.get("chassisIdType", u'') == "macAddress": + remote_chassis_id = neighbor.get("chassisId", "") + if neighbor.get("chassisIdType", "") == "macAddress": remote_chassis_id = napalm.base.helpers.mac(remote_chassis_id) - neighbor_interface_info = neighbor.get('neighborInterfaceInfo', {}) + neighbor_interface_info = neighbor.get("neighborInterfaceInfo", {}) lldp_neighbors_out[interface].append( { - 'parent_interface': interface, # no parent interfaces - 'remote_port': neighbor_interface_info.get('interfaceId', u''), - 'remote_port_description': - neighbor_interface_info.get('interfaceDescription', u''), - 'remote_system_name': neighbor.get('systemName', u''), - 'remote_system_description': neighbor.get('systemDescription', u''), - 'remote_chassis_id': remote_chassis_id, - 'remote_system_capab': py23_compat.text_type(', '.join(capabilities_list)), - 'remote_system_enable_capab': py23_compat.text_type(', '.join( - [capability for capability in capabilities_list - if capabilities[capability]])) + "parent_interface": interface, # no parent interfaces + "remote_port": neighbor_interface_info.get("interfaceId", ""), + "remote_port_description": neighbor_interface_info.get( + "interfaceDescription", "" + ), + "remote_system_name": neighbor.get("systemName", ""), + "remote_system_description": neighbor.get( + "systemDescription", "" + ), + "remote_chassis_id": remote_chassis_id, + "remote_system_capab": py23_compat.text_type( + ", ".join(capabilities_list) + ), + "remote_system_enable_capab": py23_compat.text_type( + ", ".join( + [ + capability + for capability in capabilities_list + if capabilities[capability] + ] + ) + ), } ) return lldp_neighbors_out @@ -621,56 +682,59 @@ def cli(self, commands): cli_output = {} if type(commands) is not list: - raise TypeError('Please enter a valid list of commands!') + raise TypeError("Please enter a valid list of commands!") for command in commands: try: cli_output[py23_compat.text_type(command)] = self.device.run_commands( - [command], encoding='text')[0].get('output') + [command], encoding="text" + )[0].get("output") # not quite fair to not exploit rum_commands # but at least can have better control to point to wrong command in case of failure except pyeapi.eapilib.CommandError: # for sure this command failed - cli_output[py23_compat.text_type(command)] = 'Invalid command: "{cmd}"'.format( - cmd=command - ) + cli_output[ + py23_compat.text_type(command) + ] = 'Invalid command: "{cmd}"'.format(cmd=command) raise CommandErrorException(str(cli_output)) except Exception as e: # something bad happened - msg = 'Unable to execute command "{cmd}": {err}'.format(cmd=command, err=e) + msg = 'Unable to execute command "{cmd}": {err}'.format( + cmd=command, err=e + ) cli_output[py23_compat.text_type(command)] = msg raise CommandErrorException(str(cli_output)) return cli_output - def get_bgp_config(self, group='', neighbor=''): + def get_bgp_config(self, group="", neighbor=""): """Implementation of NAPALM method get_bgp_config.""" _GROUP_FIELD_MAP_ = { - 'type': 'type', - 'multipath': 'multipath', - 'apply-groups': 'apply_groups', - 'remove-private-as': 'remove_private_as', - 'ebgp-multihop': 'multihop_ttl', - 'remote-as': 'remote_as', - 'local-v4-addr': 'local_address', - 'local-v6-addr': 'local_address', - 'local-as': 'local_as', - 'description': 'description', - 'import-policy': 'import_policy', - 'export-policy': 'export_policy' + "type": "type", + "multipath": "multipath", + "apply-groups": "apply_groups", + "remove-private-as": "remove_private_as", + "ebgp-multihop": "multihop_ttl", + "remote-as": "remote_as", + "local-v4-addr": "local_address", + "local-v6-addr": "local_address", + "local-as": "local_as", + "description": "description", + "import-policy": "import_policy", + "export-policy": "export_policy", } _PEER_FIELD_MAP_ = { - 'description': 'description', - 'remote-as': 'remote_as', - 'local-v4-addr': 'local_address', - 'local-v6-addr': 'local_address', - 'local-as': 'local_as', - 'next-hop-self': 'nhs', - 'route-reflector-client': 'route_reflector_client', - 'import-policy': 'import_policy', - 'export-policy': 'export_policy', - 'passwd': 'authentication_key' + "description": "description", + "remote-as": "remote_as", + "local-v4-addr": "local_address", + "local-v6-addr": "local_address", + "local-as": "local_as", + "next-hop-self": "nhs", + "route-reflector-client": "route_reflector_client", + "import-policy": "import_policy", + "export-policy": "export_policy", + "passwd": "authentication_key", } _PROPERTY_FIELD_MAP_ = _GROUP_FIELD_MAP_.copy() @@ -679,55 +743,50 @@ def get_bgp_config(self, group='', neighbor=''): _PROPERTY_TYPE_MAP_ = { # used to determine the default value # and cast the values - 'remote-as': int, - 'ebgp-multihop': int, - 'local-v4-addr': py23_compat.text_type, - 'local-v6-addr': py23_compat.text_type, - 'local-as': int, - 'remove-private-as': bool, - 'next-hop-self': bool, - 'description': py23_compat.text_type, - 'route-reflector-client': bool, - 'password': py23_compat.text_type, - 'route-map': py23_compat.text_type, - 'apply-groups': list, - 'type': py23_compat.text_type, - 'import-policy': py23_compat.text_type, - 'export-policy': py23_compat.text_type, - 'multipath': bool + "remote-as": int, + "ebgp-multihop": int, + "local-v4-addr": py23_compat.text_type, + "local-v6-addr": py23_compat.text_type, + "local-as": int, + "remove-private-as": bool, + "next-hop-self": bool, + "description": py23_compat.text_type, + "route-reflector-client": bool, + "password": py23_compat.text_type, + "route-map": py23_compat.text_type, + "apply-groups": list, + "type": py23_compat.text_type, + "import-policy": py23_compat.text_type, + "export-policy": py23_compat.text_type, + "multipath": bool, } - _DATATYPE_DEFAULT_ = { - py23_compat.text_type: '', - int: 0, - bool: False, - list: [] - } + _DATATYPE_DEFAULT_ = {py23_compat.text_type: "", int: 0, bool: False, list: []} def default_group_dict(local_as): group_dict = {} - group_dict.update({ - key: _DATATYPE_DEFAULT_.get(_PROPERTY_TYPE_MAP_.get(prop)) - for prop, key in _GROUP_FIELD_MAP_.items() - }) - group_dict.update({ - 'prefix_limit': {}, - 'neighbors': {}, - 'local_as': local_as - }) # few more default values + group_dict.update( + { + key: _DATATYPE_DEFAULT_.get(_PROPERTY_TYPE_MAP_.get(prop)) + for prop, key in _GROUP_FIELD_MAP_.items() + } + ) + group_dict.update( + {"prefix_limit": {}, "neighbors": {}, "local_as": local_as} + ) # few more default values return group_dict def default_neighbor_dict(local_as): neighbor_dict = {} - neighbor_dict.update({ - key: _DATATYPE_DEFAULT_.get(_PROPERTY_TYPE_MAP_.get(prop)) - for prop, key in _PEER_FIELD_MAP_.items() - }) # populating with default values - neighbor_dict.update({ - 'prefix_limit': {}, - 'local_as': local_as, - 'authentication_key': u'' - }) # few more default values + neighbor_dict.update( + { + key: _DATATYPE_DEFAULT_.get(_PROPERTY_TYPE_MAP_.get(prop)) + for prop, key in _PEER_FIELD_MAP_.items() + } + ) # populating with default values + neighbor_dict.update( + {"prefix_limit": {}, "local_as": local_as, "authentication_key": ""} + ) # few more default values return neighbor_dict def parse_options(options, default_value=False): @@ -746,60 +805,66 @@ def parse_options(options, default_value=False): if not default_value: if len(options) > 1: - field_value = napalm.base.helpers.convert(field_type, - options[1], - _DATATYPE_DEFAULT_.get(field_type)) + field_value = napalm.base.helpers.convert( + field_type, options[1], _DATATYPE_DEFAULT_.get(field_type) + ) else: if field_type is bool: field_value = True if field_name is not None: return {field_name: field_value} - elif config_property in ['route-map', 'password']: + elif config_property in ["route-map", "password"]: # do not respect the pattern neighbor [IP_ADDRESS] [PROPERTY] [VALUE] # or need special output (e.g.: maximum-routes) - if config_property == 'password': - return {'authentication_key': py23_compat.text_type(options[2])} + if config_property == "password": + return {"authentication_key": py23_compat.text_type(options[2])} # returns the MD5 password - if config_property == 'route-map': + if config_property == "route-map": direction = None if len(options) == 3: direction = options[2] field_value = field_type(options[1]) # the name of the policy elif len(options) == 2: direction = options[1] - if direction == 'in': - field_name = 'import_policy' + if direction == "in": + field_name = "import_policy" else: - field_name = 'export_policy' + field_name = "export_policy" return {field_name: field_value} return {} bgp_config = {} - commands = ['show running-config | section router bgp'] - bgp_conf = self.device.run_commands(commands, encoding='text')[0].get('output', '\n\n') + commands = ["show running-config | section router bgp"] + bgp_conf = self.device.run_commands(commands, encoding="text")[0].get( + "output", "\n\n" + ) bgp_conf_lines = bgp_conf.splitlines() bgp_neighbors = {} if not group: - neighbor = '' # noqa + neighbor = "" # noqa local_as = 0 bgp_neighbors = {} for bgp_conf_line in bgp_conf_lines: default_value = False bgp_conf_line = bgp_conf_line.strip() - if bgp_conf_line.startswith('router bgp'): - local_as = int(bgp_conf_line.replace('router bgp', '').strip()) + if bgp_conf_line.startswith("router bgp"): + local_as = int(bgp_conf_line.replace("router bgp", "").strip()) continue - if not (bgp_conf_line.startswith('neighbor') or - bgp_conf_line.startswith('no neighbor')): + if not ( + bgp_conf_line.startswith("neighbor") + or bgp_conf_line.startswith("no neighbor") + ): continue - if bgp_conf_line.startswith('no'): + if bgp_conf_line.startswith("no"): default_value = True - bgp_conf_line = bgp_conf_line.replace('no neighbor ', '').replace('neighbor ', '') + bgp_conf_line = bgp_conf_line.replace("no neighbor ", "").replace( + "neighbor ", "" + ) bgp_conf_line_details = bgp_conf_line.split() group_or_neighbor = py23_compat.text_type(bgp_conf_line_details[0]) options = bgp_conf_line_details[1:] @@ -812,8 +877,8 @@ def parse_options(options, default_value=False): peer_address = group_or_neighbor if peer_address not in bgp_neighbors: bgp_neighbors[peer_address] = default_neighbor_dict(local_as) - if options[0] == 'peer-group': - bgp_neighbors[peer_address]['__group'] = options[1] + if options[0] == "peer-group": + bgp_neighbors[peer_address]["__group"] = options[1] # in the config, neighbor details are lister after # the group is specified for the neighbor: @@ -839,17 +904,15 @@ def parse_options(options, default_value=False): continue if group_name not in bgp_config.keys(): bgp_config[group_name] = default_group_dict(local_as) - bgp_config[group_name].update( - parse_options(options, default_value) - ) + bgp_config[group_name].update(parse_options(options, default_value)) for peer, peer_details in bgp_neighbors.items(): - peer_group = peer_details.pop('__group', None) + peer_group = peer_details.pop("__group", None) if not peer_group: - peer_group = '_' + peer_group = "_" if peer_group not in bgp_config: bgp_config[peer_group] = default_group_dict(local_as) - bgp_config[peer_group]['neighbors'][peer] = peer_details + bgp_config[peer_group]["neighbors"][peer] = peer_details return bgp_config @@ -857,59 +920,70 @@ def get_arp_table(self): arp_table = [] - commands = ['show arp'] + commands = ["show arp"] ipv4_neighbors = [] try: - ipv4_neighbors = self.device.run_commands(commands)[0].get('ipV4Neighbors', []) + ipv4_neighbors = self.device.run_commands(commands)[0].get( + "ipV4Neighbors", [] + ) except pyeapi.eapilib.CommandError: return [] for neighbor in ipv4_neighbors: - interface = py23_compat.text_type(neighbor.get('interface')) - mac_raw = neighbor.get('hwAddress') - ip = py23_compat.text_type(neighbor.get('address')) - age = float(neighbor.get('age')) + interface = py23_compat.text_type(neighbor.get("interface")) + mac_raw = neighbor.get("hwAddress") + ip = py23_compat.text_type(neighbor.get("address")) + age = float(neighbor.get("age")) arp_table.append( { - 'interface': interface, - 'mac': napalm.base.helpers.mac(mac_raw), - 'ip': napalm.base.helpers.ip(ip), - 'age': age + "interface": interface, + "mac": napalm.base.helpers.mac(mac_raw), + "ip": napalm.base.helpers.ip(ip), + "age": age, } ) return arp_table def get_ntp_servers(self): - commands = ['show running-config | section ntp'] + commands = ["show running-config | section ntp"] - raw_ntp_config = self.device.run_commands(commands, encoding='text')[0].get('output', '') + raw_ntp_config = self.device.run_commands(commands, encoding="text")[0].get( + "output", "" + ) - ntp_config = napalm.base.helpers.textfsm_extractor(self, 'ntp_peers', raw_ntp_config) + ntp_config = napalm.base.helpers.textfsm_extractor( + self, "ntp_peers", raw_ntp_config + ) - return {py23_compat.text_type(ntp_peer.get('ntppeer')): {} - for ntp_peer in ntp_config if ntp_peer.get('ntppeer', '')} + return { + py23_compat.text_type(ntp_peer.get("ntppeer")): {} + for ntp_peer in ntp_config + if ntp_peer.get("ntppeer", "") + } def get_ntp_stats(self): ntp_stats = [] REGEX = ( - r'^\s?(\+|\*|x|-)?([a-zA-Z0-9\.+-:]+)' - r'\s+([a-zA-Z0-9\.]+)\s+([0-9]{1,2})' - r'\s+(-|u)\s+([0-9h-]+)\s+([0-9]+)' - r'\s+([0-9]+)\s+([0-9\.]+)\s+([0-9\.-]+)' - r'\s+([0-9\.]+)\s?$' + r"^\s?(\+|\*|x|-)?([a-zA-Z0-9\.+-:]+)" + r"\s+([a-zA-Z0-9\.]+)\s+([0-9]{1,2})" + r"\s+(-|u)\s+([0-9h-]+)\s+([0-9]+)" + r"\s+([0-9]+)\s+([0-9\.]+)\s+([0-9\.-]+)" + r"\s+([0-9\.]+)\s?$" ) - commands = ['show ntp associations'] + commands = ["show ntp associations"] # output = self.device.run_commands(commands) # pyeapi.eapilib.CommandError: CLI command 2 of 2 'show ntp associations' # failed: unconverted command # JSON output not yet implemented... - ntp_assoc = self.device.run_commands(commands, encoding='text')[0].get('output', '\n\n') + ntp_assoc = self.device.run_commands(commands, encoding="text")[0].get( + "output", "\n\n" + ) ntp_assoc_lines = ntp_assoc.splitlines()[2:] for ntp_assoc in ntp_assoc_lines: @@ -918,19 +992,21 @@ def get_ntp_stats(self): continue # pattern not found line_groups = line_search.groups() try: - ntp_stats.append({ - 'remote': py23_compat.text_type(line_groups[1]), - 'synchronized': (line_groups[0] == '*'), - 'referenceid': py23_compat.text_type(line_groups[2]), - 'stratum': int(line_groups[3]), - 'type': py23_compat.text_type(line_groups[4]), - 'when': py23_compat.text_type(line_groups[5]), - 'hostpoll': int(line_groups[6]), - 'reachability': int(line_groups[7]), - 'delay': float(line_groups[8]), - 'offset': float(line_groups[9]), - 'jitter': float(line_groups[10]) - }) + ntp_stats.append( + { + "remote": py23_compat.text_type(line_groups[1]), + "synchronized": (line_groups[0] == "*"), + "referenceid": py23_compat.text_type(line_groups[2]), + "stratum": int(line_groups[3]), + "type": py23_compat.text_type(line_groups[4]), + "when": py23_compat.text_type(line_groups[5]), + "hostpoll": int(line_groups[6]), + "reachability": int(line_groups[7]), + "delay": float(line_groups[8]), + "offset": float(line_groups[9]), + "jitter": float(line_groups[10]), + } + ) except Exception: continue # jump to next line @@ -940,12 +1016,16 @@ def get_interfaces_ip(self): interfaces_ip = {} - interfaces_ipv4_out = self.device.run_commands(['show ip interface'])[0]['interfaces'] + interfaces_ipv4_out = self.device.run_commands(["show ip interface"])[0][ + "interfaces" + ] try: - interfaces_ipv6_out = self.device.run_commands(['show ipv6 interface'])[0]['interfaces'] + interfaces_ipv6_out = self.device.run_commands(["show ipv6 interface"])[0][ + "interfaces" + ] except pyeapi.eapilib.CommandError as e: msg = py23_compat.text_type(e) - if 'No IPv6 configured interfaces' in msg: + if "No IPv6 configured interfaces" in msg: interfaces_ipv6_out = {} else: raise @@ -955,34 +1035,37 @@ def get_interfaces_ip(self): if interface_name not in interfaces_ip.keys(): interfaces_ip[interface_name] = {} - if u'ipv4' not in interfaces_ip.get(interface_name): - interfaces_ip[interface_name][u'ipv4'] = {} - if u'ipv6' not in interfaces_ip.get(interface_name): - interfaces_ip[interface_name][u'ipv6'] = {} + if "ipv4" not in interfaces_ip.get(interface_name): + interfaces_ip[interface_name]["ipv4"] = {} + if "ipv6" not in interfaces_ip.get(interface_name): + interfaces_ip[interface_name]["ipv6"] = {} - iface_details = interface_details.get('interfaceAddress', {}) - if iface_details.get('primaryIp', {}).get('address') != '0.0.0.0': + iface_details = interface_details.get("interfaceAddress", {}) + if iface_details.get("primaryIp", {}).get("address") != "0.0.0.0": ipv4_list.append( { - 'address': napalm.base.helpers.ip(iface_details.get( - 'primaryIp', {}).get('address')), - 'masklen': iface_details.get('primaryIp', {}).get('maskLen') + "address": napalm.base.helpers.ip( + iface_details.get("primaryIp", {}).get("address") + ), + "masklen": iface_details.get("primaryIp", {}).get("maskLen"), } ) - for secondary_ip in iface_details.get('secondaryIpsOrderedList', []): + for secondary_ip in iface_details.get("secondaryIpsOrderedList", []): ipv4_list.append( { - 'address': napalm.base.helpers.ip(secondary_ip.get('address')), - 'masklen': secondary_ip.get('maskLen') + "address": napalm.base.helpers.ip(secondary_ip.get("address")), + "masklen": secondary_ip.get("maskLen"), } ) for ip in ipv4_list: - if not ip.get('address'): + if not ip.get("address"): continue - if ip.get('address') not in interfaces_ip.get(interface_name).get(u'ipv4'): - interfaces_ip[interface_name][u'ipv4'][ip.get('address')] = { - u'prefix_length': ip.get('masklen') + if ip.get("address") not in interfaces_ip.get(interface_name).get( + "ipv4" + ): + interfaces_ip[interface_name]["ipv4"][ip.get("address")] = { + "prefix_length": ip.get("masklen") } for interface_name, interface_details in interfaces_ipv6_out.items(): @@ -990,34 +1073,40 @@ def get_interfaces_ip(self): if interface_name not in interfaces_ip.keys(): interfaces_ip[interface_name] = {} - if u'ipv4' not in interfaces_ip.get(interface_name): - interfaces_ip[interface_name][u'ipv4'] = {} - if u'ipv6' not in interfaces_ip.get(interface_name): - interfaces_ip[interface_name][u'ipv6'] = {} + if "ipv4" not in interfaces_ip.get(interface_name): + interfaces_ip[interface_name]["ipv4"] = {} + if "ipv6" not in interfaces_ip.get(interface_name): + interfaces_ip[interface_name]["ipv6"] = {} ipv6_list.append( { - 'address': napalm.base.helpers.convert( - napalm.base.helpers.ip, interface_details.get('linkLocal', {}) - .get('address')), - 'masklen': int( - interface_details.get('linkLocal', {}).get('subnet', '::/0').split('/')[-1]) + "address": napalm.base.helpers.convert( + napalm.base.helpers.ip, + interface_details.get("linkLocal", {}).get("address"), + ), + "masklen": int( + interface_details.get("linkLocal", {}) + .get("subnet", "::/0") + .split("/")[-1] + ) # when no link-local set, address will be None and maslken 0 } ) - for address in interface_details.get('addresses'): + for address in interface_details.get("addresses"): ipv6_list.append( { - 'address': napalm.base.helpers.ip(address.get('address')), - 'masklen': int(address.get('subnet').split('/')[-1]) + "address": napalm.base.helpers.ip(address.get("address")), + "masklen": int(address.get("subnet").split("/")[-1]), } ) for ip in ipv6_list: - if not ip.get('address'): + if not ip.get("address"): continue - if ip.get('address') not in interfaces_ip.get(interface_name).get(u'ipv6'): - interfaces_ip[interface_name][u'ipv6'][ip.get('address')] = { - u'prefix_length': ip.get('masklen') + if ip.get("address") not in interfaces_ip.get(interface_name).get( + "ipv6" + ): + interfaces_ip[interface_name]["ipv6"][ip.get("address")] = { + "prefix_length": ip.get("masklen") } return interfaces_ip @@ -1026,161 +1115,185 @@ def get_mac_address_table(self): mac_table = [] - commands = ['show mac address-table'] + commands = ["show mac address-table"] - mac_entries = self.device.run_commands(commands)[0].get( - 'unicastTable', {}).get('tableEntries', []) + mac_entries = ( + self.device.run_commands(commands)[0] + .get("unicastTable", {}) + .get("tableEntries", []) + ) for mac_entry in mac_entries: - vlan = mac_entry.get('vlanId') - interface = mac_entry.get('interface') - mac_raw = mac_entry.get('macAddress') - static = (mac_entry.get('entryType') == 'static') - last_move = mac_entry.get('lastMove', 0.0) - moves = mac_entry.get('moves', 0) + vlan = mac_entry.get("vlanId") + interface = mac_entry.get("interface") + mac_raw = mac_entry.get("macAddress") + static = mac_entry.get("entryType") == "static" + last_move = mac_entry.get("lastMove", 0.0) + moves = mac_entry.get("moves", 0) mac_table.append( { - 'mac': napalm.base.helpers.mac(mac_raw), - 'interface': interface, - 'vlan': vlan, - 'active': True, - 'static': static, - 'moves': moves, - 'last_move': last_move + "mac": napalm.base.helpers.mac(mac_raw), + "interface": interface, + "vlan": vlan, + "active": True, + "static": static, + "moves": moves, + "last_move": last_move, } ) return mac_table - def get_route_to(self, destination='', protocol=''): + def get_route_to(self, destination="", protocol=""): routes = {} # Placeholder for vrf arg - vrf = '' + vrf = "" # Right not iterating through vrfs is necessary # show ipv6 route doesn't support vrf 'all' - if vrf == '': + if vrf == "": vrfs = sorted(self._get_vrfs()) else: vrfs = [vrf] - if protocol.lower() == 'direct': - protocol = 'connected' + if protocol.lower() == "direct": + protocol = "connected" - ipv = '' + ipv = "" if IPNetwork(destination).version == 6: - ipv = 'v6' + ipv = "v6" commands = [] for _vrf in vrfs: - commands.append('show ip{ipv} route vrf {_vrf} {destination} {protocol} detail'.format( - ipv=ipv, - _vrf=_vrf, - destination=destination, - protocol=protocol, - )) + commands.append( + "show ip{ipv} route vrf {_vrf} {destination} {protocol} detail".format( + ipv=ipv, _vrf=_vrf, destination=destination, protocol=protocol + ) + ) commands_output = self.device.run_commands(commands) for _vrf, command_output in zip(vrfs, commands_output): - if ipv == 'v6': - routes_out = command_output.get('routes', {}) + if ipv == "v6": + routes_out = command_output.get("routes", {}) else: - routes_out = command_output.get('vrfs', {}).get(_vrf, {}).get('routes', {}) + routes_out = ( + command_output.get("vrfs", {}).get(_vrf, {}).get("routes", {}) + ) for prefix, route_details in routes_out.items(): if prefix not in routes.keys(): routes[prefix] = [] - route_protocol = route_details.get('routeType') - preference = route_details.get('preference', 0) + route_protocol = route_details.get("routeType") + preference = route_details.get("preference", 0) route = { - 'current_active': True, - 'last_active': True, - 'age': 0, - 'next_hop': u'', - 'protocol': route_protocol, - 'outgoing_interface': u'', - 'preference': preference, - 'inactive_reason': u'', - 'routing_table': _vrf, - 'selected_next_hop': True, - 'protocol_attributes': {} + "current_active": True, + "last_active": True, + "age": 0, + "next_hop": "", + "protocol": route_protocol, + "outgoing_interface": "", + "preference": preference, + "inactive_reason": "", + "routing_table": _vrf, + "selected_next_hop": True, + "protocol_attributes": {}, } - if protocol == 'bgp' or route_protocol.lower() in ('ebgp', 'ibgp'): + if protocol == "bgp" or route_protocol.lower() in ("ebgp", "ibgp"): nexthop_interface_map = {} - for next_hop in route_details.get('vias'): - nexthop_ip = napalm.base.helpers.ip(next_hop.get('nexthopAddr')) - nexthop_interface_map[nexthop_ip] = next_hop.get('interface') - metric = route_details.get('metric') - command = 'show ip{ipv} bgp {destination} detail vrf {_vrf}'.format( - ipv=ipv, - destination=prefix, - _vrf=_vrf + for next_hop in route_details.get("vias"): + nexthop_ip = napalm.base.helpers.ip(next_hop.get("nexthopAddr")) + nexthop_interface_map[nexthop_ip] = next_hop.get("interface") + metric = route_details.get("metric") + command = "show ip{ipv} bgp {destination} detail vrf {_vrf}".format( + ipv=ipv, destination=prefix, _vrf=_vrf + ) + vrf_details = ( + self.device.run_commands([command])[0] + .get("vrfs", {}) + .get(_vrf, {}) + ) + local_as = vrf_details.get("asn") + bgp_routes = ( + vrf_details.get("bgpRouteEntries", {}) + .get(prefix, {}) + .get("bgpRoutePaths", []) ) - vrf_details = self.device.run_commands([command])[0].get( - 'vrfs', {}).get(_vrf, {}) - local_as = vrf_details.get('asn') - bgp_routes = vrf_details.get( - 'bgpRouteEntries', {}).get(prefix, {}).get('bgpRoutePaths', []) for bgp_route_details in bgp_routes: bgp_route = route.copy() - as_path = bgp_route_details.get('asPathEntry', {}).get('asPath', u'') + as_path = bgp_route_details.get("asPathEntry", {}).get( + "asPath", "" + ) remote_as = int(as_path.strip("()").split()[-1]) - remote_address = napalm.base.helpers.ip(bgp_route_details.get( - 'routeDetail', {}).get('peerEntry', {}).get('peerAddr', '')) - local_preference = bgp_route_details.get('localPreference') - next_hop = napalm.base.helpers.ip(bgp_route_details.get('nextHop')) - active_route = bgp_route_details.get('routeType', {}).get('active', False) + remote_address = napalm.base.helpers.ip( + bgp_route_details.get("routeDetail", {}) + .get("peerEntry", {}) + .get("peerAddr", "") + ) + local_preference = bgp_route_details.get("localPreference") + next_hop = napalm.base.helpers.ip( + bgp_route_details.get("nextHop") + ) + active_route = bgp_route_details.get("routeType", {}).get( + "active", False + ) last_active = active_route # should find smth better - communities = bgp_route_details.get('routeDetail', {}).get( - 'communityList', []) - preference2 = bgp_route_details.get('weight') - inactive_reason = bgp_route_details.get('reasonNotBestpath', '') - bgp_route.update({ - 'current_active': active_route, - 'inactive_reason': inactive_reason, - 'last_active': last_active, - 'next_hop': next_hop, - 'outgoing_interface': nexthop_interface_map.get(next_hop), - 'selected_next_hop': active_route, - 'protocol_attributes': { - 'metric': metric, - 'as_path': as_path, - 'local_preference': local_preference, - 'local_as': local_as, - 'remote_as': remote_as, - 'remote_address': remote_address, - 'preference2': preference2, - 'communities': communities + communities = bgp_route_details.get("routeDetail", {}).get( + "communityList", [] + ) + preference2 = bgp_route_details.get("weight") + inactive_reason = bgp_route_details.get("reasonNotBestpath", "") + bgp_route.update( + { + "current_active": active_route, + "inactive_reason": inactive_reason, + "last_active": last_active, + "next_hop": next_hop, + "outgoing_interface": nexthop_interface_map.get( + next_hop + ), + "selected_next_hop": active_route, + "protocol_attributes": { + "metric": metric, + "as_path": as_path, + "local_preference": local_preference, + "local_as": local_as, + "remote_as": remote_as, + "remote_address": remote_address, + "preference2": preference2, + "communities": communities, + }, } - }) + ) routes[prefix].append(bgp_route) else: - if route_details.get('routeAction') in ('drop',): - route['next_hop'] = 'NULL' - if route_details.get('routingDisabled') is True: - route['last_active'] = False - route['current_active'] = False - for next_hop in route_details.get('vias'): + if route_details.get("routeAction") in ("drop",): + route["next_hop"] = "NULL" + if route_details.get("routingDisabled") is True: + route["last_active"] = False + route["current_active"] = False + for next_hop in route_details.get("vias"): route_next_hop = route.copy() - if next_hop.get('nexthopAddr') is None: + if next_hop.get("nexthopAddr") is None: route_next_hop.update( { - 'next_hop': '', - 'outgoing_interface': next_hop.get('interface') + "next_hop": "", + "outgoing_interface": next_hop.get("interface"), } ) else: route_next_hop.update( { - 'next_hop': napalm.base.helpers.ip(next_hop.get('nexthopAddr')), - 'outgoing_interface': next_hop.get('interface') + "next_hop": napalm.base.helpers.ip( + next_hop.get("nexthopAddr") + ), + "outgoing_interface": next_hop.get("interface"), } ) routes[prefix].append(route_next_hop) - if route_details.get('vias') == []: # empty list + if route_details.get("vias") == []: # empty list routes[prefix].append(route) return routes @@ -1188,104 +1301,97 @@ def get_snmp_information(self): """get_snmp_information() for EOS. Re-written to not use TextFSM""" # Default values - snmp_dict = { - 'chassis_id': '', - 'location': '', - 'contact': '', - 'community': {} - } + snmp_dict = {"chassis_id": "", "location": "", "contact": "", "community": {}} - commands = [ - 'show snmp chassis', - 'show snmp location', - 'show snmp contact' - ] - snmp_config = self.device.run_commands(commands, encoding='json') + commands = ["show snmp chassis", "show snmp location", "show snmp contact"] + snmp_config = self.device.run_commands(commands, encoding="json") for line in snmp_config: for k, v in line.items(): - if k == 'chassisId': - snmp_dict['chassis_id'] = v + if k == "chassisId": + snmp_dict["chassis_id"] = v else: # Some EOS versions add extra quotes snmp_dict[k] = v.strip('"') - commands = ['show running-config | section snmp-server community'] - raw_snmp_config = self.device.run_commands(commands, encoding='text')[0].get('output', '') + commands = ["show running-config | section snmp-server community"] + raw_snmp_config = self.device.run_commands(commands, encoding="text")[0].get( + "output", "" + ) for line in raw_snmp_config.splitlines(): match = self._RE_SNMP_COMM.search(line) if match: - matches = match.groupdict('') - snmp_dict['community'][match.group('community')] = { - 'acl': py23_compat.text_type(matches['v4_acl']), - 'mode': py23_compat.text_type(matches['access']) + matches = match.groupdict("") + snmp_dict["community"][match.group("community")] = { + "acl": py23_compat.text_type(matches["v4_acl"]), + "mode": py23_compat.text_type(matches["access"]), } return snmp_dict def get_users(self): - def _sshkey_type(sshkey): - if sshkey.startswith('ssh-rsa'): - return u'ssh_rsa', py23_compat.text_type(sshkey) - elif sshkey.startswith('ssh-dss'): - return u'ssh_dsa', py23_compat.text_type(sshkey) - return u'ssh_rsa', u'' + if sshkey.startswith("ssh-rsa"): + return "ssh_rsa", py23_compat.text_type(sshkey) + elif sshkey.startswith("ssh-dss"): + return "ssh_dsa", py23_compat.text_type(sshkey) + return "ssh_rsa", "" users = {} - commands = ['show user-account'] - user_items = self.device.run_commands(commands)[0].get('users', {}) + commands = ["show user-account"] + user_items = self.device.run_commands(commands)[0].get("users", {}) for user, user_details in user_items.items(): - user_details.pop('username', '') - sshkey_value = user_details.pop('sshAuthorizedKey', '') + user_details.pop("username", "") + sshkey_value = user_details.pop("sshAuthorizedKey", "") sshkey_type, sshkey_value = _sshkey_type(sshkey_value) - if sshkey_value != '': + if sshkey_value != "": sshkey_list = [sshkey_value] else: sshkey_list = [] - user_details.update({ - 'level': user_details.pop('privLevel', 0), - 'password': py23_compat.text_type(user_details.pop('secret', '')), - 'sshkeys': sshkey_list - }) + user_details.update( + { + "level": user_details.pop("privLevel", 0), + "password": py23_compat.text_type(user_details.pop("secret", "")), + "sshkeys": sshkey_list, + } + ) users[user] = user_details return users - def traceroute(self, - destination, - source=c.TRACEROUTE_SOURCE, - ttl=c.TRACEROUTE_TTL, - timeout=c.TRACEROUTE_TIMEOUT, - vrf=c.TRACEROUTE_VRF): + def traceroute( + self, + destination, + source=c.TRACEROUTE_SOURCE, + ttl=c.TRACEROUTE_TTL, + timeout=c.TRACEROUTE_TIMEOUT, + vrf=c.TRACEROUTE_VRF, + ): _HOP_ENTRY_PROBE = [ - r'\s+', - r'(', # beginning of host_name (ip_address) RTT group - r'(', # beginning of host_name (ip_address) group only - r'([a-zA-Z0-9\.:-]*)', # hostname - r'\s+', - r'\(?([a-fA-F0-9\.:][^\)]*)\)?' # IP Address between brackets - r')?', # end of host_name (ip_address) group only + r"\s+", + r"(", # beginning of host_name (ip_address) RTT group + r"(", # beginning of host_name (ip_address) group only + r"([a-zA-Z0-9\.:-]*)", # hostname + r"\s+", + r"\(?([a-fA-F0-9\.:][^\)]*)\)?" # IP Address between brackets + r")?", # end of host_name (ip_address) group only # also hostname/ip are optional -- they can or cannot be specified # if not specified, means the current probe followed the same path as the previous - r'\s+', - r'(\d+\.\d+)\s+ms', # RTT - r'|\*', # OR *, when non responsive hop - r')' # end of host_name (ip_address) RTT group + r"\s+", + r"(\d+\.\d+)\s+ms", # RTT + r"|\*", # OR *, when non responsive hop + r")", # end of host_name (ip_address) RTT group ] - _HOP_ENTRY = [ - r'\s?', # space before hop index? - r'(\d+)', # hop index - ] + _HOP_ENTRY = [r"\s?", r"(\d+)"] # space before hop index? # hop index traceroute_result = {} - source_opt = '' - ttl_opt = '' - timeout_opt = '' + source_opt = "" + ttl_opt = "" + timeout_opt = "" # if not ttl: # ttl = 20 @@ -1296,53 +1402,60 @@ def traceroute(self, commands = [] if vrf: - commands.append('routing-context vrf {vrf}'.format(vrf=vrf)) + commands.append("routing-context vrf {vrf}".format(vrf=vrf)) if source: - source_opt = '-s {source}'.format(source=source) + source_opt = "-s {source}".format(source=source) if ttl: - ttl_opt = '-m {ttl}'.format(ttl=ttl) + ttl_opt = "-m {ttl}".format(ttl=ttl) if timeout: - timeout_opt = '-w {timeout}'.format(timeout=timeout) + timeout_opt = "-w {timeout}".format(timeout=timeout) total_timeout = timeout * ttl # `ttl`, `source` and `timeout` are not supported by default CLI # so we need to go through the bash and set a specific timeout commands.append( - ('bash timeout {total_timeout} traceroute {destination} ' - '{source_opt} {ttl_opt} {timeout_opt}').format( + ( + "bash timeout {total_timeout} traceroute {destination} " + "{source_opt} {ttl_opt} {timeout_opt}" + ).format( total_timeout=total_timeout, destination=destination, source_opt=source_opt, ttl_opt=ttl_opt, - timeout_opt=timeout_opt + timeout_opt=timeout_opt, ) ) try: - traceroute_raw_output = self.device.run_commands( - commands, encoding='text')[-1].get('output') + traceroute_raw_output = self.device.run_commands(commands, encoding="text")[ + -1 + ].get("output") except CommandErrorException: - return {'error': 'Cannot execute traceroute on the device: {}'.format(commands[0])} + return { + "error": "Cannot execute traceroute on the device: {}".format( + commands[0] + ) + } - hop_regex = ''.join(_HOP_ENTRY + _HOP_ENTRY_PROBE * probes) + hop_regex = "".join(_HOP_ENTRY + _HOP_ENTRY_PROBE * probes) - traceroute_result['success'] = {} + traceroute_result["success"] = {} for line in traceroute_raw_output.splitlines(): hop_search = re.search(hop_regex, line) if not hop_search: continue hop_details = hop_search.groups() hop_index = int(hop_details[0]) - previous_probe_host_name = '*' - previous_probe_ip_address = '*' - traceroute_result['success'][hop_index] = {'probes': {}} + previous_probe_host_name = "*" + previous_probe_ip_address = "*" + traceroute_result["success"][hop_index] = {"probes": {}} for probe_index in range(probes): - host_name = hop_details[3+probe_index*5] - hop_addr = hop_details[4+probe_index*5] + host_name = hop_details[3 + probe_index * 5] + hop_addr = hop_details[4 + probe_index * 5] ip_address = napalm.base.helpers.convert( napalm.base.helpers.ip, hop_addr, hop_addr ) - rtt = hop_details[5+probe_index*5] + rtt = hop_details[5 + probe_index * 5] if rtt: rtt = float(rtt) else: @@ -1351,89 +1464,102 @@ def traceroute(self, host_name = previous_probe_host_name if not ip_address: ip_address = previous_probe_ip_address - if hop_details[1+probe_index*5] == '*': - host_name = '*' - ip_address = '*' - traceroute_result['success'][hop_index]['probes'][probe_index+1] = { - 'host_name': py23_compat.text_type(host_name), - 'ip_address': py23_compat.text_type(ip_address), - 'rtt': rtt + if hop_details[1 + probe_index * 5] == "*": + host_name = "*" + ip_address = "*" + traceroute_result["success"][hop_index]["probes"][probe_index + 1] = { + "host_name": py23_compat.text_type(host_name), + "ip_address": py23_compat.text_type(ip_address), + "rtt": rtt, } previous_probe_host_name = host_name previous_probe_ip_address = ip_address return traceroute_result - def get_bgp_neighbors_detail(self, neighbor_address=''): + def get_bgp_neighbors_detail(self, neighbor_address=""): """Implementation of get_bgp_neighbors_detail""" + def _parse_per_peer_bgp_detail(peer_output): """This function parses the raw data per peer and returns a json structure per peer. """ - int_fields = ['local_as', 'remote_as', - 'local_port', 'remote_port', 'local_port', - 'input_messages', 'output_messages', 'input_updates', - 'output_updates', 'messages_queued_out', 'holdtime', - 'configured_holdtime', 'keepalive', - 'configured_keepalive', 'advertised_prefix_count', - 'received_prefix_count'] + int_fields = [ + "local_as", + "remote_as", + "local_port", + "remote_port", + "local_port", + "input_messages", + "output_messages", + "input_updates", + "output_updates", + "messages_queued_out", + "holdtime", + "configured_holdtime", + "keepalive", + "configured_keepalive", + "advertised_prefix_count", + "received_prefix_count", + ] peer_details = [] # Using preset template to extract peer info - peer_info = ( - napalm.base.helpers.textfsm_extractor( - self, 'bgp_detail', peer_output)) + peer_info = napalm.base.helpers.textfsm_extractor( + self, "bgp_detail", peer_output + ) for item in peer_info: # Determining a few other fields in the final peer_info - item['up'] = ( - True if item['up'] == "up" else False) - item['local_address_configured'] = ( - True if item['local_address'] else False) - item['multihop'] = ( - False if item['multihop'] == 0 or - item['multihop'] == '' else True) + item["up"] = True if item["up"] == "up" else False + item["local_address_configured"] = ( + True if item["local_address"] else False + ) + item["multihop"] = ( + False if item["multihop"] == 0 or item["multihop"] == "" else True + ) # TODO: The below fields need to be retrieved # Currently defaulting their values to False or 0 - item['multipath'] = False - item['remove_private_as'] = False - item['suppress_4byte_as'] = False - item['local_as_prepend'] = False - item['flap_count'] = 0 - item['active_prefix_count'] = 0 - item['suppressed_prefix_count'] = 0 + item["multipath"] = False + item["remove_private_as"] = False + item["suppress_4byte_as"] = False + item["local_as_prepend"] = False + item["flap_count"] = 0 + item["active_prefix_count"] = 0 + item["suppressed_prefix_count"] = 0 # Converting certain fields into int for key in int_fields: item[key] = napalm.base.helpers.convert(int, item[key], 0) # Conforming with the datatypes defined by the base class - item['export_policy'] = ( - napalm.base.helpers.convert( - py23_compat.text_type, item['export_policy'])) - item['last_event'] = ( - napalm.base.helpers.convert( - py23_compat.text_type, item['last_event'])) - item['remote_address'] = napalm.base.helpers.ip(item['remote_address']) - item['previous_connection_state'] = ( - napalm.base.helpers.convert( - py23_compat.text_type, item['previous_connection_state'])) - item['import_policy'] = ( - napalm.base.helpers.convert( - py23_compat.text_type, item['import_policy'])) - item['connection_state'] = ( - napalm.base.helpers.convert( - py23_compat.text_type, item['connection_state'])) - item['routing_table'] = ( - napalm.base.helpers.convert( - py23_compat.text_type, item['routing_table'])) - item['router_id'] = napalm.base.helpers.ip(item['router_id']) - item['local_address'] = napalm.base.helpers.convert( - napalm.base.helpers.ip, item['local_address']) + item["export_policy"] = napalm.base.helpers.convert( + py23_compat.text_type, item["export_policy"] + ) + item["last_event"] = napalm.base.helpers.convert( + py23_compat.text_type, item["last_event"] + ) + item["remote_address"] = napalm.base.helpers.ip(item["remote_address"]) + item["previous_connection_state"] = napalm.base.helpers.convert( + py23_compat.text_type, item["previous_connection_state"] + ) + item["import_policy"] = napalm.base.helpers.convert( + py23_compat.text_type, item["import_policy"] + ) + item["connection_state"] = napalm.base.helpers.convert( + py23_compat.text_type, item["connection_state"] + ) + item["routing_table"] = napalm.base.helpers.convert( + py23_compat.text_type, item["routing_table"] + ) + item["router_id"] = napalm.base.helpers.ip(item["router_id"]) + item["local_address"] = napalm.base.helpers.convert( + napalm.base.helpers.ip, item["local_address"] + ) peer_details.append(item) @@ -1441,8 +1567,8 @@ def _parse_per_peer_bgp_detail(peer_output): def _append(bgp_dict, peer_info): - remote_as = peer_info['remote_as'] - vrf_name = peer_info['routing_table'] + remote_as = peer_info["remote_as"] + vrf_name = peer_info["routing_table"] if vrf_name not in bgp_dict.keys(): bgp_dict[vrf_name] = {} @@ -1454,10 +1580,10 @@ def _append(bgp_dict, peer_info): commands = [] summary_commands = [] if not neighbor_address: - commands.append('show ip bgp neighbors vrf all') - commands.append('show ipv6 bgp neighbors vrf all') - summary_commands.append('show ip bgp summary vrf all') - summary_commands.append('show ipv6 bgp summary vrf all') + commands.append("show ip bgp neighbors vrf all") + commands.append("show ipv6 bgp neighbors vrf all") + summary_commands.append("show ip bgp summary vrf all") + summary_commands.append("show ipv6 bgp summary vrf all") else: try: peer_ver = IPAddress(neighbor_address).version @@ -1465,18 +1591,14 @@ def _append(bgp_dict, peer_info): raise e if peer_ver == 4: - commands.append('show ip bgp neighbors %s vrf all' % - neighbor_address) - summary_commands.append('show ip bgp summary vrf all') + commands.append("show ip bgp neighbors %s vrf all" % neighbor_address) + summary_commands.append("show ip bgp summary vrf all") elif peer_ver == 6: - commands.append('show ipv6 bgp neighbors %s vrf all' % - neighbor_address) - summary_commands.append('show ipv6 bgp summary vrf all') + commands.append("show ipv6 bgp neighbors %s vrf all" % neighbor_address) + summary_commands.append("show ipv6 bgp summary vrf all") - raw_output = ( - self.device.run_commands(commands, encoding='text')) - bgp_summary = ( - self.device.run_commands(summary_commands, encoding='json')) + raw_output = self.device.run_commands(commands, encoding="text") + bgp_summary = self.device.run_commands(summary_commands, encoding="json") bgp_detail_info = {} @@ -1484,7 +1606,7 @@ def _append(bgp_dict, peer_info): v6_peer_info = [] if neighbor_address: - peer_info = _parse_per_peer_bgp_detail(raw_output[0]['output']) + peer_info = _parse_per_peer_bgp_detail(raw_output[0]["output"]) if peer_ver == 4: v4_peer_info.append(peer_info[0]) @@ -1493,16 +1615,18 @@ def _append(bgp_dict, peer_info): else: # Using preset template to extract peer info - v4_peer_info = _parse_per_peer_bgp_detail(raw_output[0]['output']) - v6_peer_info = _parse_per_peer_bgp_detail(raw_output[1]['output']) + v4_peer_info = _parse_per_peer_bgp_detail(raw_output[0]["output"]) + v6_peer_info = _parse_per_peer_bgp_detail(raw_output[1]["output"]) for peer_info in v4_peer_info: - vrf_name = peer_info['routing_table'] - peer_remote_addr = peer_info['remote_address'] - peer_info['accepted_prefix_count'] = ( - bgp_summary[0]['vrfs'][vrf_name]['peers'][peer_remote_addr]['prefixAccepted'] - if peer_remote_addr in bgp_summary[0]['vrfs'][vrf_name]['peers'].keys() + vrf_name = peer_info["routing_table"] + peer_remote_addr = peer_info["remote_address"] + peer_info["accepted_prefix_count"] = ( + bgp_summary[0]["vrfs"][vrf_name]["peers"][peer_remote_addr][ + "prefixAccepted" + ] + if peer_remote_addr in bgp_summary[0]["vrfs"][vrf_name]["peers"].keys() else 0 ) @@ -1510,11 +1634,13 @@ def _append(bgp_dict, peer_info): for peer_info in v6_peer_info: - vrf_name = peer_info['routing_table'] - peer_remote_addr = peer_info['remote_address'] - peer_info['accepted_prefix_count'] = ( - bgp_summary[1]['vrfs'][vrf_name]['peers'][peer_remote_addr]['prefixAccepted'] - if peer_remote_addr in bgp_summary[1]['vrfs'][vrf_name]['peers'].keys() + vrf_name = peer_info["routing_table"] + peer_remote_addr = peer_info["remote_address"] + peer_info["accepted_prefix_count"] = ( + bgp_summary[1]["vrfs"][vrf_name]["peers"][peer_remote_addr][ + "prefixAccepted" + ] + if peer_remote_addr in bgp_summary[1]["vrfs"][vrf_name]["peers"].keys() else 0 ) @@ -1524,50 +1650,49 @@ def _append(bgp_dict, peer_info): def get_optics(self): - command = ['show interfaces transceiver'] + command = ["show interfaces transceiver"] - output = ( - self.device.run_commands( - command, encoding='json')[0]['interfaces']) + output = self.device.run_commands(command, encoding="json")[0]["interfaces"] # Formatting data into return data structure optics_detail = {} for port, port_values in output.items(): - port_detail = { - 'physical_channels': {'channel': []} - } + port_detail = {"physical_channels": {"channel": []}} # Defaulting avg, min, max values to 0.0 since device does not # return these values optic_states = { - 'index': 0, - 'state': { - 'input_power': { - 'instant': (port_values['rxPower'] - if 'rxPower' in port_values else 0.0), - 'avg': 0.0, - 'min': 0.0, - 'max': 0.0 + "index": 0, + "state": { + "input_power": { + "instant": ( + port_values["rxPower"] if "rxPower" in port_values else 0.0 + ), + "avg": 0.0, + "min": 0.0, + "max": 0.0, }, - 'output_power': { - 'instant': (port_values['txPower'] - if 'txPower' in port_values else 0.0), - 'avg': 0.0, - 'min': 0.0, - 'max': 0.0 + "output_power": { + "instant": ( + port_values["txPower"] if "txPower" in port_values else 0.0 + ), + "avg": 0.0, + "min": 0.0, + "max": 0.0, }, - 'laser_bias_current': { - 'instant': (port_values['txBias'] - if 'txBias' in port_values else 0.0), - 'avg': 0.0, - 'min': 0.0, - 'max': 0.0 - } - } + "laser_bias_current": { + "instant": ( + port_values["txBias"] if "txBias" in port_values else 0.0 + ), + "avg": 0.0, + "min": 0.0, + "max": 0.0, + }, + }, } - port_detail['physical_channels']['channel'].append(optic_states) + port_detail["physical_channels"]["channel"].append(optic_states) optics_detail[port] = port_detail return optics_detail @@ -1576,107 +1701,116 @@ def get_config(self, retrieve="all"): """get_config implementation for EOS.""" get_startup = retrieve == "all" or retrieve == "startup" get_running = retrieve == "all" or retrieve == "running" - get_candidate = (retrieve == "all" or retrieve == "candidate") and self.config_session + get_candidate = ( + retrieve == "all" or retrieve == "candidate" + ) and self.config_session if retrieve == "all": - commands = ['show startup-config', - 'show running-config'] + commands = ["show startup-config", "show running-config"] if self.config_session: - commands.append('show session-config named {}'.format(self.config_session)) + commands.append( + "show session-config named {}".format(self.config_session) + ) output = self.device.run_commands(commands, encoding="text") return { - 'startup': py23_compat.text_type(output[0]['output']) if get_startup else u"", - 'running': py23_compat.text_type(output[1]['output']) if get_running else u"", - 'candidate': py23_compat.text_type(output[2]['output']) if get_candidate else u"", + "startup": py23_compat.text_type(output[0]["output"]) + if get_startup + else "", + "running": py23_compat.text_type(output[1]["output"]) + if get_running + else "", + "candidate": py23_compat.text_type(output[2]["output"]) + if get_candidate + else "", } elif get_startup or get_running: - commands = ['show {}-config'.format(retrieve)] + commands = ["show {}-config".format(retrieve)] output = self.device.run_commands(commands, encoding="text") return { - 'startup': py23_compat.text_type(output[0]['output']) if get_startup else u"", - 'running': py23_compat.text_type(output[0]['output']) if get_running else u"", - 'candidate': "", + "startup": py23_compat.text_type(output[0]["output"]) + if get_startup + else "", + "running": py23_compat.text_type(output[0]["output"]) + if get_running + else "", + "candidate": "", } elif get_candidate: - commands = ['show session-config named {}'.format(self.config_session)] + commands = ["show session-config named {}".format(self.config_session)] output = self.device.run_commands(commands, encoding="text") return { - 'startup': "", - 'running': "", - 'candidate': py23_compat.text_type(output[0]['output']), + "startup": "", + "running": "", + "candidate": py23_compat.text_type(output[0]["output"]), } elif retrieve == "candidate": # If we get here it means that we want the candidate but there is none. - return { - 'startup': "", - 'running': "", - 'candidate': "", - } + return {"startup": "", "running": "", "candidate": ""} else: raise Exception("Wrong retrieve filter: {}".format(retrieve)) def _show_vrf(self): - commands = ['show vrf'] + commands = ["show vrf"] # This command has no JSON yet - raw_output = self.device.run_commands(commands, encoding='text')[0].get('output', '') + raw_output = self.device.run_commands(commands, encoding="text")[0].get( + "output", "" + ) - output = napalm.base.helpers.textfsm_extractor(self, 'vrf', raw_output) + output = napalm.base.helpers.textfsm_extractor(self, "vrf", raw_output) return output def _get_vrfs(self): output = self._show_vrf() - vrfs = [py23_compat.text_type(vrf['name']) for vrf in output] + vrfs = [py23_compat.text_type(vrf["name"]) for vrf in output] - vrfs.append(u'default') + vrfs.append("default") return vrfs - def get_network_instances(self, name=''): + def get_network_instances(self, name=""): """get_network_instances implementation for EOS.""" output = self._show_vrf() vrfs = {} all_vrf_interfaces = {} for vrf in output: - if (vrf.get('route_distinguisher', '') == "" or - vrf.get('route_distinguisher', '') == 'None'): - vrf['route_distinguisher'] = u'' + if ( + vrf.get("route_distinguisher", "") == "" + or vrf.get("route_distinguisher", "") == "None" + ): + vrf["route_distinguisher"] = "" else: - vrf['route_distinguisher'] = py23_compat.text_type(vrf['route_distinguisher']) + vrf["route_distinguisher"] = py23_compat.text_type( + vrf["route_distinguisher"] + ) interfaces = {} - for interface_raw in vrf.get('interfaces', []): - interface = interface_raw.split(',') + for interface_raw in vrf.get("interfaces", []): + interface = interface_raw.split(",") for line in interface: - if line.strip() != '': + if line.strip() != "": interfaces[py23_compat.text_type(line.strip())] = {} all_vrf_interfaces[py23_compat.text_type(line.strip())] = {} - vrfs[py23_compat.text_type(vrf['name'])] = { - u'name': py23_compat.text_type(vrf['name']), - u'type': u'L3VRF', - u'state': { - u'route_distinguisher': vrf['route_distinguisher'], - }, - u'interfaces': { - u'interface': interfaces, - }, + vrfs[py23_compat.text_type(vrf["name"])] = { + "name": py23_compat.text_type(vrf["name"]), + "type": "L3VRF", + "state": {"route_distinguisher": vrf["route_distinguisher"]}, + "interfaces": {"interface": interfaces}, } all_interfaces = self.get_interfaces_ip().keys() - vrfs[u'default'] = { - u'name': u'default', - u'type': u'DEFAULT_INSTANCE', - u'state': { - u'route_distinguisher': u'', - }, - u'interfaces': { - u'interface': { + vrfs["default"] = { + "name": "default", + "type": "DEFAULT_INSTANCE", + "state": {"route_distinguisher": ""}, + "interfaces": { + "interface": { k: {} for k in all_interfaces if k not in all_vrf_interfaces.keys() - }, + } }, } @@ -1687,8 +1821,16 @@ def get_network_instances(self, name=''): else: return vrfs - def ping(self, destination, source=c.PING_SOURCE, ttl=c.PING_TTL, timeout=c.PING_TIMEOUT, - size=c.PING_SIZE, count=c.PING_COUNT, vrf=c.PING_VRF): + def ping( + self, + destination, + source=c.PING_SOURCE, + ttl=c.PING_TTL, + timeout=c.PING_TIMEOUT, + size=c.PING_SIZE, + count=c.PING_COUNT, + vrf=c.PING_VRF, + ): """ Execute ping on the device and returns a dictionary with the result. Output dictionary has one of following keys: @@ -1710,74 +1852,90 @@ def ping(self, destination, source=c.PING_SOURCE, ttl=c.PING_TTL, timeout=c.PING commands = [] if vrf: - commands.append('routing-context vrf {vrf}'.format(vrf=vrf)) + commands.append("routing-context vrf {vrf}".format(vrf=vrf)) - command = 'ping {}'.format(destination) - command += ' timeout {}'.format(timeout) - command += ' size {}'.format(size) - command += ' repeat {}'.format(count) - if source != '': - command += ' source {}'.format(source) + command = "ping {}".format(destination) + command += " timeout {}".format(timeout) + command += " size {}".format(size) + command += " repeat {}".format(count) + if source != "": + command += " source {}".format(source) commands.append(command) - output = self.device.run_commands(commands, encoding='text')[-1]['output'] - - if 'connect:' in output: - ping_dict['error'] = output - elif 'PING' in output: - ping_dict['success'] = { - 'probes_sent': 0, - 'packet_loss': 0, - 'rtt_min': 0.0, - 'rtt_max': 0.0, - 'rtt_avg': 0.0, - 'rtt_stddev': 0.0, - 'results': [] + output = self.device.run_commands(commands, encoding="text")[-1]["output"] + + if "connect:" in output: + ping_dict["error"] = output + elif "PING" in output: + ping_dict["success"] = { + "probes_sent": 0, + "packet_loss": 0, + "rtt_min": 0.0, + "rtt_max": 0.0, + "rtt_avg": 0.0, + "rtt_stddev": 0.0, + "results": [], } results_array = [] for line in output.splitlines(): fields = line.split() - if 'icmp' in line: - if 'Unreachable' in line: + if "icmp" in line: + if "Unreachable" in line: if "(" in fields[2]: results_array.append( { - 'ip_address': py23_compat.text_type(fields[2][1:-1]), - 'rtt': 0.0, + "ip_address": py23_compat.text_type( + fields[2][1:-1] + ), + "rtt": 0.0, } ) else: - results_array.append({'ip_address': py23_compat.text_type(fields[1]), - 'rtt': 0.0}) - elif 'truncated' in line: + results_array.append( + { + "ip_address": py23_compat.text_type(fields[1]), + "rtt": 0.0, + } + ) + elif "truncated" in line: if "(" in fields[4]: results_array.append( { - 'ip_address': py23_compat.text_type(fields[4][1:-2]), - 'rtt': 0.0, + "ip_address": py23_compat.text_type( + fields[4][1:-2] + ), + "rtt": 0.0, } ) else: results_array.append( { - 'ip_address': py23_compat.text_type(fields[3][:-1]), - 'rtt': 0.0, + "ip_address": py23_compat.text_type(fields[3][:-1]), + "rtt": 0.0, } ) - elif fields[1] == 'bytes': + elif fields[1] == "bytes": m = fields[6][5:] - results_array.append({'ip_address': py23_compat.text_type(fields[3][:-1]), - 'rtt': float(m)}) - elif 'packets transmitted' in line: - ping_dict['success']['probes_sent'] = int(fields[0]) - ping_dict['success']['packet_loss'] = int(fields[0]) - int(fields[3]) - elif 'min/avg/max' in line: - m = fields[3].split('/') - ping_dict['success'].update({ - 'rtt_min': float(m[0]), - 'rtt_avg': float(m[1]), - 'rtt_max': float(m[2]), - 'rtt_stddev': float(m[3]), - }) - ping_dict['success'].update({'results': results_array}) + results_array.append( + { + "ip_address": py23_compat.text_type(fields[3][:-1]), + "rtt": float(m), + } + ) + elif "packets transmitted" in line: + ping_dict["success"]["probes_sent"] = int(fields[0]) + ping_dict["success"]["packet_loss"] = int(fields[0]) - int( + fields[3] + ) + elif "min/avg/max" in line: + m = fields[3].split("/") + ping_dict["success"].update( + { + "rtt_min": float(m[0]), + "rtt_avg": float(m[1]), + "rtt_max": float(m[2]), + "rtt_stddev": float(m[3]), + } + ) + ping_dict["success"].update({"results": results_array}) return ping_dict diff --git a/napalm/ios/__init__.py b/napalm/ios/__init__.py index 2c983a186..f86ba2acf 100644 --- a/napalm/ios/__init__.py +++ b/napalm/ios/__init__.py @@ -16,4 +16,4 @@ from napalm.ios.ios import IOSDriver -__all__ = ['IOSDriver'] +__all__ = ["IOSDriver"] diff --git a/napalm/ios/ios.py b/napalm/ios/ios.py index 5a918a0b9..069a4502a 100644 --- a/napalm/ios/ios.py +++ b/napalm/ios/ios.py @@ -31,8 +31,12 @@ import napalm.base.constants as C import napalm.base.helpers from napalm.base.base import NetworkDriver -from napalm.base.exceptions import ReplaceConfigException, MergeConfigException, \ - ConnectionClosedException, CommandErrorException +from napalm.base.exceptions import ( + ReplaceConfigException, + MergeConfigException, + ConnectionClosedException, + CommandErrorException, +) from napalm.base.helpers import canonical_interface_name from napalm.base.helpers import textfsm_extractor from napalm.base.netmiko_helpers import netmiko_args @@ -49,10 +53,14 @@ IPV4_ADDR_REGEX = IP_ADDR_REGEX IPV6_ADDR_REGEX_1 = r"::" IPV6_ADDR_REGEX_2 = r"[0-9a-fA-F:]{1,39}::[0-9a-fA-F:]{1,39}" -IPV6_ADDR_REGEX_3 = r"[0-9a-fA-F]{1,4}:[0-9a-fA-F]{1,4}:[0-9a-fA-F]{1,4}:[0-9a-fA-F]{1,4}:" \ - "[0-9a-fA-F]{1,4}:[0-9a-fA-F]{1,4}:[0-9a-fA-F]{1,4}:[0-9a-fA-F]{1,4}" +IPV6_ADDR_REGEX_3 = ( + r"[0-9a-fA-F]{1,4}:[0-9a-fA-F]{1,4}:[0-9a-fA-F]{1,4}:[0-9a-fA-F]{1,4}:" + "[0-9a-fA-F]{1,4}:[0-9a-fA-F]{1,4}:[0-9a-fA-F]{1,4}:[0-9a-fA-F]{1,4}" +) # Should validate IPv6 address using an IP address library after matching with this regex -IPV6_ADDR_REGEX = "(?:{}|{}|{})".format(IPV6_ADDR_REGEX_1, IPV6_ADDR_REGEX_2, IPV6_ADDR_REGEX_3) +IPV6_ADDR_REGEX = "(?:{}|{}|{})".format( + IPV6_ADDR_REGEX_1, IPV6_ADDR_REGEX_2, IPV6_ADDR_REGEX_3 +) MAC_REGEX = r"[a-fA-F0-9]{4}\.[a-fA-F0-9]{4}\.[a-fA-F0-9]{4}" VLAN_REGEX = r"\d{1,4}" @@ -65,21 +73,21 @@ ASN_REGEX = r"[\d\.]+" IOS_COMMANDS = { - 'show_mac_address': ['show mac-address-table', 'show mac address-table'], + "show_mac_address": ["show mac-address-table", "show mac address-table"] } AFI_COMMAND_MAP = { - 'IPv4 Unicast': 'ipv4 unicast', - 'IPv6 Unicast': 'ipv6 unicast', - 'VPNv4 Unicast': 'vpnv4 all', - 'VPNv6 Unicast': 'vpnv6 unicast all', - 'IPv4 Multicast': 'ipv4 multicast', - 'IPv6 Multicast': 'ipv6 multicast', - 'L2VPN E-VPN': 'l2vpn evpn', - 'MVPNv4 Unicast': 'ipv4 mvpn all', - 'MVPNv6 Unicast': 'ipv6 mvpn all', - 'VPNv4 Flowspec': 'ipv4 flowspec', - 'VPNv6 Flowspec': 'ipv6 flowspec', + "IPv4 Unicast": "ipv4 unicast", + "IPv6 Unicast": "ipv6 unicast", + "VPNv4 Unicast": "vpnv4 all", + "VPNv6 Unicast": "vpnv6 unicast all", + "IPv4 Multicast": "ipv4 multicast", + "IPv6 Multicast": "ipv6 multicast", + "L2VPN E-VPN": "l2vpn evpn", + "MVPNv4 Unicast": "ipv4 mvpn all", + "MVPNv6 Unicast": "ipv6 mvpn all", + "VPNv4 Flowspec": "ipv4 flowspec", + "VPNv6 Flowspec": "ipv6 flowspec", } @@ -95,56 +103,54 @@ def __init__(self, hostname, username, password, timeout=60, optional_args=None) self.password = password self.timeout = timeout - self.transport = optional_args.get('transport', 'ssh') + self.transport = optional_args.get("transport", "ssh") # Retrieve file names - self.candidate_cfg = optional_args.get('candidate_cfg', 'candidate_config.txt') - self.merge_cfg = optional_args.get('merge_cfg', 'merge_config.txt') - self.rollback_cfg = optional_args.get('rollback_cfg', 'rollback_config.txt') - self.inline_transfer = optional_args.get('inline_transfer', False) - if self.transport == 'telnet': + self.candidate_cfg = optional_args.get("candidate_cfg", "candidate_config.txt") + self.merge_cfg = optional_args.get("merge_cfg", "merge_config.txt") + self.rollback_cfg = optional_args.get("rollback_cfg", "rollback_config.txt") + self.inline_transfer = optional_args.get("inline_transfer", False) + if self.transport == "telnet": # Telnet only supports inline_transfer self.inline_transfer = True # None will cause autodetection of dest_file_system - self._dest_file_system = optional_args.get('dest_file_system', None) - self.auto_rollback_on_error = optional_args.get('auto_rollback_on_error', True) + self._dest_file_system = optional_args.get("dest_file_system", None) + self.auto_rollback_on_error = optional_args.get("auto_rollback_on_error", True) # Control automatic toggling of 'file prompt quiet' for file operations - self.auto_file_prompt = optional_args.get('auto_file_prompt', True) + self.auto_file_prompt = optional_args.get("auto_file_prompt", True) self.netmiko_optional_args = netmiko_args(optional_args) # Set the default port if not set - default_port = { - 'ssh': 22, - 'telnet': 23 - } - self.netmiko_optional_args.setdefault('port', default_port[self.transport]) + default_port = {"ssh": 22, "telnet": 23} + self.netmiko_optional_args.setdefault("port", default_port[self.transport]) self.device = None self.config_replace = False self.platform = "ios" self.profile = [self.platform] - self.use_canonical_interface = optional_args.get('canonical_int', False) + self.use_canonical_interface = optional_args.get("canonical_int", False) def open(self): """Open a connection to the device.""" - device_type = 'cisco_ios' - if self.transport == 'telnet': - device_type = 'cisco_ios_telnet' + device_type = "cisco_ios" + if self.transport == "telnet": + device_type = "cisco_ios_telnet" self.device = self._netmiko_open( - device_type, - netmiko_optional_args=self.netmiko_optional_args, + device_type, netmiko_optional_args=self.netmiko_optional_args ) def _discover_file_system(self): try: return self.device._autodetect_fs() except Exception: - msg = "Netmiko _autodetect_fs failed (to workaround specify " \ - "dest_file_system in optional_args.)" + msg = ( + "Netmiko _autodetect_fs failed (to workaround specify " + "dest_file_system in optional_args.)" + ) raise CommandErrorException(msg) def close(self): @@ -172,28 +178,28 @@ def is_alive(self): """Returns a flag with the state of the connection.""" null = chr(0) if self.device is None: - return {'is_alive': False} - if self.transport == 'telnet': + return {"is_alive": False} + if self.transport == "telnet": try: # Try sending IAC + NOP (IAC is telnet way of sending command # IAC = Interpret as Command (it comes before the NOP) self.device.write_channel(telnetlib.IAC + telnetlib.NOP) - return {'is_alive': True} + return {"is_alive": True} except UnicodeDecodeError: # Netmiko logging bug (remove after Netmiko >= 1.4.3) - return {'is_alive': True} + return {"is_alive": True} except AttributeError: - return {'is_alive': False} + return {"is_alive": False} else: # SSH try: # Try sending ASCII null byte to maintain the connection alive self.device.write_channel(null) - return {'is_alive': self.device.remote_conn.transport.is_active()} + return {"is_alive": self.device.remote_conn.transport.is_active()} except (socket.error, EOFError): # If unable to send, we can tell for sure that the connection is unusable - return {'is_alive': False} - return {'is_alive': False} + return {"is_alive": False} + return {"is_alive": False} @staticmethod def _create_tmp_file(config): @@ -201,44 +207,53 @@ def _create_tmp_file(config): tmp_dir = tempfile.gettempdir() rand_fname = py23_compat.text_type(uuid.uuid4()) filename = os.path.join(tmp_dir, rand_fname) - with open(filename, 'wt') as fobj: + with open(filename, "wt") as fobj: fobj.write(config) return filename - def _load_candidate_wrapper(self, source_file=None, source_config=None, dest_file=None, - file_system=None): + def _load_candidate_wrapper( + self, source_file=None, source_config=None, dest_file=None, file_system=None + ): """ Transfer file to remote device for either merge or replace operations Returns (return_status, msg) """ return_status = False - msg = '' + msg = "" if source_file and source_config: raise ValueError("Cannot simultaneously set source_file and source_config") if source_config: if self.inline_transfer: - (return_status, msg) = self._inline_tcl_xfer(source_config=source_config, - dest_file=dest_file, - file_system=file_system) + (return_status, msg) = self._inline_tcl_xfer( + source_config=source_config, + dest_file=dest_file, + file_system=file_system, + ) else: # Use SCP tmp_file = self._create_tmp_file(source_config) - (return_status, msg) = self._scp_file(source_file=tmp_file, dest_file=dest_file, - file_system=file_system) + (return_status, msg) = self._scp_file( + source_file=tmp_file, dest_file=dest_file, file_system=file_system + ) if tmp_file and os.path.isfile(tmp_file): os.remove(tmp_file) if source_file: if self.inline_transfer: - (return_status, msg) = self._inline_tcl_xfer(source_file=source_file, - dest_file=dest_file, - file_system=file_system) + (return_status, msg) = self._inline_tcl_xfer( + source_file=source_file, + dest_file=dest_file, + file_system=file_system, + ) else: - (return_status, msg) = self._scp_file(source_file=source_file, dest_file=dest_file, - file_system=file_system) + (return_status, msg) = self._scp_file( + source_file=source_file, + dest_file=dest_file, + file_system=file_system, + ) if not return_status: - if msg == '': + if msg == "": msg = "Transfer to remote device failed" return (return_status, msg) @@ -249,10 +264,12 @@ def load_replace_candidate(self, filename=None, config=None): Return None or raise exception """ self.config_replace = True - return_status, msg = self._load_candidate_wrapper(source_file=filename, - source_config=config, - dest_file=self.candidate_cfg, - file_system=self.dest_file_system) + return_status, msg = self._load_candidate_wrapper( + source_file=filename, + source_config=config, + dest_file=self.candidate_cfg, + file_system=self.dest_file_system, + ) if not return_status: raise ReplaceConfigException(msg) @@ -263,19 +280,24 @@ def load_merge_candidate(self, filename=None, config=None): Merge configuration in: copy running-config """ self.config_replace = False - return_status, msg = self._load_candidate_wrapper(source_file=filename, - source_config=config, - dest_file=self.merge_cfg, - file_system=self.dest_file_system) + return_status, msg = self._load_candidate_wrapper( + source_file=filename, + source_config=config, + dest_file=self.merge_cfg, + file_system=self.dest_file_system, + ) if not return_status: raise MergeConfigException(msg) def _normalize_compare_config(self, diff): """Filter out strings that should not show up in the diff.""" - ignore_strings = ['Contextual Config Diffs', 'No changes were found', - 'ntp clock-period'] + ignore_strings = [ + "Contextual Config Diffs", + "No changes were found", + "ntp clock-period", + ] if self.auto_file_prompt: - ignore_strings.append('file prompt quiet') + ignore_strings.append("file prompt quiet") new_list = [] for line in diff.splitlines(): @@ -301,25 +323,25 @@ def _normalize_merge_diff_incr(diff): changes_found = False for line in diff.splitlines(): - if re.search(r'order-dependent line.*re-ordered', line): + if re.search(r"order-dependent line.*re-ordered", line): changes_found = True - elif 'No changes were found' in line: + elif "No changes were found" in line: # IOS in the re-order case still claims "No changes were found" if not changes_found: - return '' + return "" else: continue - if line.strip() == 'end': + if line.strip() == "end": continue - elif 'List of Commands' in line: + elif "List of Commands" in line: continue # Filter blank lines and prepend +sign elif line.strip(): if re.search(r"^no\s+", line.strip()): - new_diff.append('-' + line) + new_diff.append("-" + line) else: - new_diff.append('+' + line) + new_diff.append("+" + line) return "\n".join(new_diff) @staticmethod @@ -329,11 +351,13 @@ def _normalize_merge_diff(diff): for line in diff.splitlines(): # Filter blank lines and prepend +sign if line.strip(): - new_diff.append('+' + line) + new_diff.append("+" + line) if new_diff: - new_diff.insert(0, '! incremental-diff failed; falling back to echo of merge file') + new_diff.insert( + 0, "! incremental-diff failed; falling back to echo of merge file" + ) else: - new_diff.append('! No changes specified in merge file.') + new_diff.append("! No changes specified in merge file.") return "\n".join(new_diff) def compare_config(self): @@ -343,32 +367,42 @@ def compare_config(self): Default operation is to compare system:running-config to self.candidate_cfg """ # Set defaults - base_file = 'running-config' - base_file_system = 'system:' + base_file = "running-config" + base_file_system = "system:" if self.config_replace: new_file = self.candidate_cfg else: new_file = self.merge_cfg new_file_system = self.dest_file_system - base_file_full = self._gen_full_path(filename=base_file, file_system=base_file_system) - new_file_full = self._gen_full_path(filename=new_file, file_system=new_file_system) + base_file_full = self._gen_full_path( + filename=base_file, file_system=base_file_system + ) + new_file_full = self._gen_full_path( + filename=new_file, file_system=new_file_system + ) if self.config_replace: - cmd = 'show archive config differences {} {}'.format(base_file_full, new_file_full) + cmd = "show archive config differences {} {}".format( + base_file_full, new_file_full + ) diff = self.device.send_command_expect(cmd) diff = self._normalize_compare_config(diff) else: # merge - cmd = 'show archive config incremental-diffs {} ignorecase'.format(new_file_full) + cmd = "show archive config incremental-diffs {} ignorecase".format( + new_file_full + ) diff = self.device.send_command_expect(cmd) - if 'error code 5' in diff or 'returned error 5' in diff: - diff = "You have encountered the obscure 'error 5' message. This generally " \ - "means you need to add an 'end' statement to the end of your merge changes." - elif '% Invalid' not in diff: + if "error code 5" in diff or "returned error 5" in diff: + diff = ( + "You have encountered the obscure 'error 5' message. This generally " + "means you need to add an 'end' statement to the end of your merge changes." + ) + elif "% Invalid" not in diff: diff = self._normalize_merge_diff_incr(diff) else: - cmd = 'more {}'.format(new_file_full) + cmd = "more {}".format(new_file_full) diff = self.device.send_command_expect(cmd) diff = self._normalize_merge_diff(diff) @@ -376,29 +410,33 @@ def compare_config(self): def _file_prompt_quiet(f): """Decorator to toggle 'file prompt quiet' around methods that perform file operations.""" + @functools.wraps(f) def wrapper(self, *args, **kwargs): # only toggle config if 'auto_file_prompt' is true if self.auto_file_prompt: # disable file operation prompts - self.device.send_config_set(['file prompt quiet']) + self.device.send_config_set(["file prompt quiet"]) # call wrapped function retval = f(self, *args, **kwargs) # re-enable prompts - self.device.send_config_set(['no file prompt quiet']) + self.device.send_config_set(["no file prompt quiet"]) else: # check if the command is already in the running-config - cmd = 'file prompt quiet' + cmd = "file prompt quiet" show_cmd = "show running-config | inc {}".format(cmd) output = self.device.send_command_expect(show_cmd) if cmd in output: # call wrapped function retval = f(self, *args, **kwargs) else: - msg = "on-device file operations require prompts to be disabled. " \ - "Configure 'file prompt quiet' or set 'auto_file_prompt=True'" + msg = ( + "on-device file operations require prompts to be disabled. " + "Configure 'file prompt quiet' or set 'auto_file_prompt=True'" + ) raise CommandErrorException(msg) return retval + return wrapper @_file_prompt_quiet @@ -421,9 +459,7 @@ def _commit_handler(self, cmd): if re.search(pattern2, new_output): # Send confirmation if username removal new_output = self.device.send_command_timing( - "\n", - strip_prompt=False, - strip_command=False, + "\n", strip_prompt=False, strip_command=False ) output += new_output else: @@ -439,7 +475,9 @@ def commit_config(self, message=""): If merge operation, perform copy running-config. """ if message: - raise NotImplementedError('Commit message not implemented for this platform') + raise NotImplementedError( + "Commit message not implemented for this platform" + ) # Always generate a rollback config on commit self._gen_rollback_cfg() @@ -450,17 +488,19 @@ def commit_config(self, message=""): if not self._check_file_exists(cfg_file): raise ReplaceConfigException("Candidate config file does not exist") if self.auto_rollback_on_error: - cmd = 'configure replace {} force revert trigger error'.format(cfg_file) + cmd = "configure replace {} force revert trigger error".format(cfg_file) else: - cmd = 'configure replace {} force'.format(cfg_file) + cmd = "configure replace {} force".format(cfg_file) output = self._commit_handler(cmd) - if ('original configuration has been successfully restored' in output) or \ - ('error' in output.lower()) or \ - ('not a valid config file' in output.lower()) or \ - ('failed' in output.lower()): + if ( + ("original configuration has been successfully restored" in output) + or ("error" in output.lower()) + or ("not a valid config file" in output.lower()) + or ("failed" in output.lower()) + ): msg = "Candidate config could not be applied\n{}".format(output) raise ReplaceConfigException(msg) - elif '%Please turn config archive on' in output: + elif "%Please turn config archive on" in output: msg = "napalm-ios replace() requires Cisco 'archive' feature to be enabled." raise ReplaceConfigException(msg) else: @@ -469,9 +509,9 @@ def commit_config(self, message=""): cfg_file = self._gen_full_path(filename) if not self._check_file_exists(cfg_file): raise MergeConfigException("Merge source config file does not exist") - cmd = 'copy {} running-config'.format(cfg_file) + cmd = "copy {} running-config".format(cfg_file) output = self._commit_handler(cmd) - if 'Invalid input detected' in output: + if "Invalid input detected" in output: self.rollback() err_header = "Configuration merge failed; automatic rollback attempted" merge_error = "{0}:\n{1}".format(err_header, output) @@ -487,8 +527,10 @@ def discard_config(self): @_file_prompt_quiet def _discard_config(self): """Set candidate_cfg to current running-config. Erase the merge_cfg file.""" - discard_candidate = 'copy running-config {}'.format(self._gen_full_path(self.candidate_cfg)) - discard_merge = 'copy null: {}'.format(self._gen_full_path(self.merge_cfg)) + discard_candidate = "copy running-config {}".format( + self._gen_full_path(self.candidate_cfg) + ) + discard_merge = "copy null: {}".format(self._gen_full_path(self.merge_cfg)) self.device.send_command_expect(discard_candidate) self.device.send_command_expect(discard_merge) @@ -498,14 +540,15 @@ def rollback(self): cfg_file = self._gen_full_path(filename) if not self._check_file_exists(cfg_file): raise ReplaceConfigException("Rollback config file does not exist") - cmd = 'configure replace {} force'.format(cfg_file) + cmd = "configure replace {} force".format(cfg_file) self.device.send_command_expect(cmd) # Save config to startup self.device.save_config() - def _inline_tcl_xfer(self, source_file=None, source_config=None, dest_file=None, - file_system=None): + def _inline_tcl_xfer( + self, source_file=None, source_config=None, dest_file=None, file_system=None + ): """ Use Netmiko InlineFileTransfer (TCL) to transfer file or config to remote device. @@ -514,11 +557,19 @@ def _inline_tcl_xfer(self, source_file=None, source_config=None, dest_file=None, msg = details on what happened """ if source_file: - return self._xfer_file(source_file=source_file, dest_file=dest_file, - file_system=file_system, TransferClass=InLineTransfer) + return self._xfer_file( + source_file=source_file, + dest_file=dest_file, + file_system=file_system, + TransferClass=InLineTransfer, + ) if source_config: - return self._xfer_file(source_config=source_config, dest_file=dest_file, - file_system=file_system, TransferClass=InLineTransfer) + return self._xfer_file( + source_config=source_config, + dest_file=dest_file, + file_system=file_system, + TransferClass=InLineTransfer, + ) raise ValueError("File source not specified for transfer.") def _scp_file(self, source_file, dest_file, file_system): @@ -529,11 +580,21 @@ def _scp_file(self, source_file, dest_file, file_system): status = boolean msg = details on what happened """ - return self._xfer_file(source_file=source_file, dest_file=dest_file, - file_system=file_system, TransferClass=FileTransfer) + return self._xfer_file( + source_file=source_file, + dest_file=dest_file, + file_system=file_system, + TransferClass=FileTransfer, + ) - def _xfer_file(self, source_file=None, source_config=None, dest_file=None, file_system=None, - TransferClass=FileTransfer): + def _xfer_file( + self, + source_file=None, + source_config=None, + dest_file=None, + file_system=None, + TransferClass=FileTransfer, + ): """Transfer file to remote device. By default, this will use Secure Copy if self.inline_transfer is set, then will use @@ -550,11 +611,21 @@ def _xfer_file(self, source_file=None, source_config=None, dest_file=None, file_ raise ValueError("Destination file or file system not specified.") if source_file: - kwargs = dict(ssh_conn=self.device, source_file=source_file, dest_file=dest_file, - direction='put', file_system=file_system) + kwargs = dict( + ssh_conn=self.device, + source_file=source_file, + dest_file=dest_file, + direction="put", + file_system=file_system, + ) elif source_config: - kwargs = dict(ssh_conn=self.device, source_config=source_config, dest_file=dest_file, - direction='put', file_system=file_system) + kwargs = dict( + ssh_conn=self.device, + source_config=source_config, + dest_file=dest_file, + direction="put", + file_system=file_system, + ) use_scp = True if self.inline_transfer: use_scp = False @@ -570,12 +641,14 @@ def _xfer_file(self, source_file=None, source_config=None, dest_file=None, file_ return (False, msg) if use_scp: - cmd = 'ip scp server enable' + cmd = "ip scp server enable" show_cmd = "show running-config | inc {}".format(cmd) output = self.device.send_command_expect(show_cmd) if cmd not in output: - msg = "SCP file transfers are not enabled. " \ - "Configure 'ip scp server enable' on the device." + msg = ( + "SCP file transfers are not enabled. " + "Configure 'ip scp server enable' on the device." + ) raise CommandErrorException(msg) # Transfer file @@ -588,22 +661,24 @@ def _xfer_file(self, source_file=None, source_config=None, dest_file=None, file_ else: msg = "File transfer to remote device failed" return (False, msg) - return (False, '') + return (False, "") def _gen_full_path(self, filename, file_system=None): """Generate full file path on remote device.""" if file_system is None: - return '{}/{}'.format(self.dest_file_system, filename) + return "{}/{}".format(self.dest_file_system, filename) else: if ":" not in file_system: - raise ValueError("Invalid file_system specified: {}".format(file_system)) - return '{}/{}'.format(file_system, filename) + raise ValueError( + "Invalid file_system specified: {}".format(file_system) + ) + return "{}/{}".format(file_system, filename) @_file_prompt_quiet def _gen_rollback_cfg(self): """Save a configuration that can be used for rollback.""" cfg_file = self._gen_full_path(self.rollback_cfg) - cmd = 'copy running-config {}'.format(cfg_file) + cmd = "copy running-config {}".format(cfg_file) self.device.send_command_expect(cmd) def _check_file_exists(self, cfg_file): @@ -620,10 +695,10 @@ def _check_file_exists(self, cfg_file): return boolean """ - cmd = 'dir {}'.format(cfg_file) - success_pattern = 'Directory of {}'.format(cfg_file) + cmd = "dir {}".format(cfg_file) + success_pattern = "Directory of {}".format(cfg_file) output = self.device.send_command_expect(cmd) - if 'Error opening' in output: + if "Error opening" in output: return False elif success_pattern in output: return True @@ -642,18 +717,18 @@ def _send_command_postprocess(output): return output.strip() def get_optics(self): - command = 'show interfaces transceiver' + command = "show interfaces transceiver" output = self._send_command(command) # Check if router supports the command - if '% Invalid input' in output: + if "% Invalid input" in output: return {} # Formatting data into return data structure optics_detail = {} try: - split_output = re.split(r'^---------.*$', output, flags=re.M)[1] + split_output = re.split(r"^---------.*$", output, flags=re.M)[1] except IndexError: return {} @@ -672,9 +747,7 @@ def get_optics(self): port = canonical_interface_name(int_brief) - port_detail = { - 'physical_channels': {'channel': []} - } + port_detail = {"physical_channels": {"channel": []}} # If interface is shutdown it returns "N/A" as output power. # Converting that to -100.0 float @@ -686,30 +759,30 @@ def get_optics(self): # Defaulting avg, min, max values to -100.0 since device does not # return these values optic_states = { - 'index': 0, - 'state': { - 'input_power': { - 'instant': (float(input_power) if 'input_power' else -100.0), - 'avg': -100.0, - 'min': -100.0, - 'max': -100.0 + "index": 0, + "state": { + "input_power": { + "instant": (float(input_power) if "input_power" else -100.0), + "avg": -100.0, + "min": -100.0, + "max": -100.0, }, - 'output_power': { - 'instant': (float(output_power) if 'output_power' else -100.0), - 'avg': -100.0, - 'min': -100.0, - 'max': -100.0 + "output_power": { + "instant": (float(output_power) if "output_power" else -100.0), + "avg": -100.0, + "min": -100.0, + "max": -100.0, }, - 'laser_bias_current': { - 'instant': 0.0, - 'avg': 0.0, - 'min': 0.0, - 'max': 0.0 - } - } + "laser_bias_current": { + "instant": 0.0, + "avg": 0.0, + "min": 0.0, + "max": 0.0, + }, + }, } - port_detail['physical_channels']['channel'].append(optic_states) + port_detail["physical_channels"]["channel"].append(optic_states) optics_detail[port] = port_detail return optics_detail @@ -721,29 +794,28 @@ def get_lldp_neighbors(self): for intf_name, entries in neighbors_detail.items(): lldp[intf_name] = [] for lldp_entry in entries: - hostname = lldp_entry['remote_system_name'] + hostname = lldp_entry["remote_system_name"] # Match IOS behaviour of taking remote chassis ID # When lacking a system name (in show lldp neighbors) if hostname == "N/A": - hostname = lldp_entry['remote_chassis_id'] - lldp_dict = { - 'port': lldp_entry['remote_port'], - 'hostname': hostname, - } + hostname = lldp_entry["remote_chassis_id"] + lldp_dict = {"port": lldp_entry["remote_port"], "hostname": hostname} lldp[intf_name].append(lldp_dict) return lldp - def get_lldp_neighbors_detail(self, interface=''): + def get_lldp_neighbors_detail(self, interface=""): lldp = {} lldp_interfaces = [] if interface: - command = 'show lldp neighbors {} detail'.format(interface) + command = "show lldp neighbors {} detail".format(interface) else: - command = 'show lldp neighbors detail' + command = "show lldp neighbors detail" lldp_entries = self._send_command(command) - lldp_entries = textfsm_extractor(self, 'show_lldp_neighbors_detail', lldp_entries) + lldp_entries = textfsm_extractor( + self, "show_lldp_neighbors_detail", lldp_entries + ) if len(lldp_entries) == 0: return {} @@ -753,12 +825,12 @@ def get_lldp_neighbors_detail(self, interface=''): # which is in the same sequence as the detailed output if not lldp_entries[0]["local_interface"]: if interface: - command = 'show lldp neighbors {}'.format(interface) + command = "show lldp neighbors {}".format(interface) else: - command = 'show lldp neighbors' + command = "show lldp neighbors" lldp_brief = self._send_command(command) - lldp_interfaces = textfsm_extractor(self, 'show_lldp_neighbors', lldp_brief) - lldp_interfaces = [x['local_interface'] for x in lldp_interfaces] + lldp_interfaces = textfsm_extractor(self, "show_lldp_neighbors", lldp_brief) + lldp_interfaces = [x["local_interface"] for x in lldp_interfaces] if len(lldp_interfaces) != len(lldp_entries): raise ValueError( "LLDP neighbor count has changed between commands. " @@ -766,13 +838,13 @@ def get_lldp_neighbors_detail(self, interface=''): ) for idx, lldp_entry in enumerate(lldp_entries): - local_intf = lldp_entry.pop('local_interface') or lldp_interfaces[idx] + local_intf = lldp_entry.pop("local_interface") or lldp_interfaces[idx] # Convert any 'not advertised' to 'N/A' for field in lldp_entry: - if 'not advertised' in lldp_entry[field]: - lldp_entry[field] = 'N/A' + if "not advertised" in lldp_entry[field]: + lldp_entry[field] = "N/A" # Add field missing on IOS - lldp_entry['parent_interface'] = u'N/A' + lldp_entry["parent_interface"] = "N/A" # Turn the interfaces into their long version local_intf = canonical_interface_name(local_intf) lldp.setdefault(local_intf, []) @@ -791,7 +863,7 @@ def parse_uptime(uptime_str): (years, weeks, days, hours, minutes) = (0, 0, 0, 0, 0) uptime_str = uptime_str.strip() - time_list = uptime_str.split(',') + time_list = uptime_str.split(",") for element in time_list: if re.search("year", element): years = int(element.split()[0]) @@ -804,30 +876,35 @@ def parse_uptime(uptime_str): elif re.search("minute", element): minutes = int(element.split()[0]) - uptime_sec = (years * YEAR_SECONDS) + (weeks * WEEK_SECONDS) + (days * DAY_SECONDS) + \ - (hours * 3600) + (minutes * 60) + uptime_sec = ( + (years * YEAR_SECONDS) + + (weeks * WEEK_SECONDS) + + (days * DAY_SECONDS) + + (hours * 3600) + + (minutes * 60) + ) return uptime_sec def get_facts(self): """Return a set of facts from the devices.""" # default values. - vendor = u'Cisco' + vendor = "Cisco" uptime = -1 - serial_number, fqdn, os_version, hostname, domain_name = ('Unknown',) * 5 + serial_number, fqdn, os_version, hostname, domain_name = ("Unknown",) * 5 # obtain output from device - show_ver = self._send_command('show version') - show_hosts = self._send_command('show hosts') - show_ip_int_br = self._send_command('show ip interface brief') + show_ver = self._send_command("show version") + show_hosts = self._send_command("show hosts") + show_ip_int_br = self._send_command("show ip interface brief") # uptime/serial_number/IOS version for line in show_ver.splitlines(): - if ' uptime is ' in line: - hostname, uptime_str = line.split(' uptime is ') + if " uptime is " in line: + hostname, uptime_str = line.split(" uptime is ") uptime = self.parse_uptime(uptime_str) hostname = hostname.strip() - if 'Processor board ID' in line: + if "Processor board ID" in line: _, serial_number = line.split("Processor board ID ") serial_number = serial_number.strip() @@ -844,38 +921,40 @@ def get_facts(self): # Determine domain_name and fqdn for line in show_hosts.splitlines(): - if 'Default domain' in line: + if "Default domain" in line: _, domain_name = line.split("Default domain is ") domain_name = domain_name.strip() break - if domain_name != 'Unknown' and hostname != 'Unknown': - fqdn = u'{}.{}'.format(hostname, domain_name) + if domain_name != "Unknown" and hostname != "Unknown": + fqdn = "{}.{}".format(hostname, domain_name) # model filter try: - match_model = re.search(r"Cisco (.+?) .+bytes of", show_ver, flags=re.IGNORECASE) + match_model = re.search( + r"Cisco (.+?) .+bytes of", show_ver, flags=re.IGNORECASE + ) model = match_model.group(1) except AttributeError: - model = u'Unknown' + model = "Unknown" # interface_list filter interface_list = [] show_ip_int_br = show_ip_int_br.strip() for line in show_ip_int_br.splitlines(): - if 'Interface ' in line: + if "Interface " in line: continue interface = line.split()[0] interface_list.append(interface) return { - 'uptime': uptime, - 'vendor': vendor, - 'os_version': py23_compat.text_type(os_version), - 'serial_number': py23_compat.text_type(serial_number), - 'model': py23_compat.text_type(model), - 'hostname': py23_compat.text_type(hostname), - 'fqdn': fqdn, - 'interface_list': interface_list + "uptime": uptime, + "vendor": vendor, + "os_version": py23_compat.text_type(os_version), + "serial_number": py23_compat.text_type(serial_number), + "model": py23_compat.text_type(model), + "hostname": py23_compat.text_type(hostname), + "fqdn": fqdn, + "interface_list": interface_list, } def get_interfaces(self): @@ -908,10 +987,10 @@ def get_interfaces(self): # default values. last_flapped = -1.0 - command = 'show interfaces' + command = "show interfaces" output = self._send_command(command) - interface = description = mac_address = speed = speedformat = '' + interface = description = mac_address = speed = speedformat = "" is_enabled = is_up = None interface_dict = {} @@ -927,15 +1006,15 @@ def get_interfaces(self): try: protocol = interface_match.group(3) except IndexError: - protocol = '' - if 'admin' in status.lower(): + protocol = "" + if "admin" in status.lower(): is_enabled = False else: is_enabled = True if protocol: - is_up = bool('up' in protocol) + is_up = bool("up" in protocol) else: - is_up = bool('up' in status) + is_up = bool("up" in status) break mac_addr_regex = r"^\s+Hardware.+address\s+is\s+({})".format(MAC_REGEX) @@ -954,23 +1033,30 @@ def get_interfaces(self): speed = speed_match.groups()[0] speedformat = speed_match.groups()[1] speed = float(speed) - if speedformat.startswith('Kb'): + if speedformat.startswith("Kb"): speed = speed / 1000.0 - elif speedformat.startswith('Gb'): + elif speedformat.startswith("Gb"): speed = speed * 1000 speed = int(round(speed)) - if interface == '': - raise ValueError("Interface attributes were \ - found without any known interface") + if interface == "": + raise ValueError( + "Interface attributes were \ + found without any known interface" + ) if not isinstance(is_up, bool) or not isinstance(is_enabled, bool): raise ValueError("Did not correctly find the interface status") - interface_dict[interface] = {'is_enabled': is_enabled, 'is_up': is_up, - 'description': description, 'mac_address': mac_address, - 'last_flapped': last_flapped, 'speed': speed} + interface_dict[interface] = { + "is_enabled": is_enabled, + "is_up": is_up, + "description": description, + "mac_address": mac_address, + "last_flapped": last_flapped, + "speed": speed, + } - interface = description = mac_address = speed = speedformat = '' + interface = description = mac_address = speed = speedformat = "" is_enabled = is_up = None return interface_dict @@ -998,40 +1084,44 @@ def get_interfaces_ip(self): """ interfaces = {} - command = 'show ip interface' + command = "show ip interface" show_ip_interface = self._send_command(command) - command = 'show ipv6 interface' + command = "show ipv6 interface" show_ipv6_interface = self._send_command(command) - INTERNET_ADDRESS = r'\s+(?:Internet address is|Secondary address)' - INTERNET_ADDRESS += r' (?P{})/(?P\d+)'.format(IPV4_ADDR_REGEX) - LINK_LOCAL_ADDRESS = r'\s+IPv6 is enabled, link-local address is (?P[a-fA-F0-9:]+)' - GLOBAL_ADDRESS = r'\s+(?P[a-fA-F0-9:]+), subnet is (?:[a-fA-F0-9:]+)/(?P\d+)' + INTERNET_ADDRESS = r"\s+(?:Internet address is|Secondary address)" + INTERNET_ADDRESS += r" (?P{})/(?P\d+)".format(IPV4_ADDR_REGEX) + LINK_LOCAL_ADDRESS = ( + r"\s+IPv6 is enabled, link-local address is (?P[a-fA-F0-9:]+)" + ) + GLOBAL_ADDRESS = ( + r"\s+(?P[a-fA-F0-9:]+), subnet is (?:[a-fA-F0-9:]+)/(?P\d+)" + ) interfaces = {} for line in show_ip_interface.splitlines(): - if(len(line.strip()) == 0): + if len(line.strip()) == 0: continue - if(line[0] != ' '): + if line[0] != " ": ipv4 = {} interface_name = line.split()[0] m = re.match(INTERNET_ADDRESS, line) if m: ip, prefix = m.groups() ipv4.update({ip: {"prefix_length": int(prefix)}}) - interfaces[interface_name] = {'ipv4': ipv4} + interfaces[interface_name] = {"ipv4": ipv4} - if '% Invalid input detected at' not in show_ipv6_interface: + if "% Invalid input detected at" not in show_ipv6_interface: for line in show_ipv6_interface.splitlines(): - if(len(line.strip()) == 0): + if len(line.strip()) == 0: continue - if(line[0] != ' '): + if line[0] != " ": ifname = line.split()[0] ipv6 = {} if ifname not in interfaces: - interfaces[ifname] = {'ipv6': ipv6} + interfaces[ifname] = {"ipv6": ipv6} else: - interfaces[ifname].update({'ipv6': ipv6}) + interfaces[ifname].update({"ipv6": ipv6}) m = re.match(LINK_LOCAL_ADDRESS, line) if m: ip = m.group(1) @@ -1063,20 +1153,20 @@ def bgp_time_conversion(bgp_uptime): never """ bgp_uptime = bgp_uptime.strip() - uptime_letters = set(['w', 'h', 'd']) + uptime_letters = set(["w", "h", "d"]) - if 'never' in bgp_uptime: + if "never" in bgp_uptime: return -1 - elif ':' in bgp_uptime: + elif ":" in bgp_uptime: times = bgp_uptime.split(":") times = [int(x) for x in times] hours, minutes, seconds = times return (hours * 3600) + (minutes * 60) + seconds # Check if any letters 'w', 'h', 'd' are in the time string elif uptime_letters & set(bgp_uptime): - form1 = r'(\d+)d(\d+)h' # 1d17h - form2 = r'(\d+)w(\d+)d' # 8w5d - form3 = r'(\d+)y(\d+)w' # 1y28w + form1 = r"(\d+)d(\d+)h" # 1d17h + form2 = r"(\d+)w(\d+)d" # 8w5d + form3 = r"(\d+)y(\d+)w" # 1y28w match = re.search(form1, bgp_uptime) if match: days = int(match.group(1)) @@ -1092,128 +1182,188 @@ def bgp_time_conversion(bgp_uptime): years = int(match.group(1)) weeks = int(match.group(2)) return (years * YEAR_SECONDS) + (weeks * WEEK_SECONDS) - raise ValueError("Unexpected value for BGP uptime string: {}".format(bgp_uptime)) + raise ValueError( + "Unexpected value for BGP uptime string: {}".format(bgp_uptime) + ) def get_bgp_neighbors(self): """BGP neighbor information. Currently no VRF support. Supports both IPv4 and IPv6. """ - supported_afi = ['ipv4', 'ipv6'] + supported_afi = ["ipv4", "ipv6"] bgp_neighbor_data = dict() - bgp_neighbor_data['global'] = {} + bgp_neighbor_data["global"] = {} # get summary output from device - cmd_bgp_all_sum = 'show bgp all summary' + cmd_bgp_all_sum = "show bgp all summary" summary_output = self._send_command(cmd_bgp_all_sum).strip() # get neighbor output from device - neighbor_output = '' + neighbor_output = "" for afi in supported_afi: - cmd_bgp_neighbor = 'show bgp %s unicast neighbors' % afi + cmd_bgp_neighbor = "show bgp %s unicast neighbors" % afi neighbor_output += self._send_command(cmd_bgp_neighbor).strip() # trailing newline required for parsing neighbor_output += "\n" # Regular expressions used for parsing BGP summary parse_summary = { - 'patterns': [ + "patterns": [ # For address family: IPv4 Unicast - {'regexp': re.compile(r'^For address family: (?P\S+) '), - 'record': False}, + { + "regexp": re.compile(r"^For address family: (?P\S+) "), + "record": False, + }, # Capture router_id and local_as values, e.g.: # BGP router identifier 10.0.1.1, local AS number 65000 - {'regexp': re.compile(r'^.* router identifier (?P{}), ' - r'local AS number (?P{})'.format( - IPV4_ADDR_REGEX, ASN_REGEX - )), - 'record': False}, + { + "regexp": re.compile( + r"^.* router identifier (?P{}), " + r"local AS number (?P{})".format( + IPV4_ADDR_REGEX, ASN_REGEX + ) + ), + "record": False, + }, # Match neighbor summary row, capturing useful details and # discarding the 5 columns that we don't care about, e.g.: # Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd # 10.0.0.2 4 65000 1336020 64337701 1011343614 0 0 8w0d 3143 - {'regexp': re.compile(r'^\*?(?P({})|({}))' - r'\s+\d+\s+(?P{})(\s+\S+){{5}}\s+' - r'(?P(never)|\d+\S+)' - r'\s+(?P\d+)'.format( - IPV4_ADDR_REGEX, IPV6_ADDR_REGEX, ASN_REGEX - )), - 'record': True}, + { + "regexp": re.compile( + r"^\*?(?P({})|({}))" + r"\s+\d+\s+(?P{})(\s+\S+){{5}}\s+" + r"(?P(never)|\d+\S+)" + r"\s+(?P\d+)".format( + IPV4_ADDR_REGEX, IPV6_ADDR_REGEX, ASN_REGEX + ) + ), + "record": True, + }, # Same as above, but for peer that are not Established, e.g.: # Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd # 192.168.0.2 4 65002 0 0 1 0 0 never Active - {'regexp': re.compile(r'^\*?(?P({})|({}))' - r'\s+\d+\s+(?P{})(\s+\S+){{5}}\s+' - r'(?P(never)|\d+\S+)\s+(?P\D.*)'.format( - IPV4_ADDR_REGEX, IPV6_ADDR_REGEX, ASN_REGEX - )), - 'record': True}, + { + "regexp": re.compile( + r"^\*?(?P({})|({}))" + r"\s+\d+\s+(?P{})(\s+\S+){{5}}\s+" + r"(?P(never)|\d+\S+)\s+(?P\D.*)".format( + IPV4_ADDR_REGEX, IPV6_ADDR_REGEX, ASN_REGEX + ) + ), + "record": True, + }, # ipv6 peers often break accross rows because of the longer peer address, # match as above, but in separate expressions, e.g.: # Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd # 2001:DB8::4 # 4 65004 9900690 612449 155362939 0 0 26w6d 36391 - {'regexp': re.compile(r'^\*?(?P({})|({}))'.format( - IPV4_ADDR_REGEX, IPV6_ADDR_REGEX - )), - 'record': False}, - {'regexp': re.compile(r'^\s+\d+\s+(?P{})(\s+\S+){{5}}\s+' - r'(?P(never)|\d+\S+)' - r'\s+(?P\d+)'.format( - ASN_REGEX - )), - 'record': True}, + { + "regexp": re.compile( + r"^\*?(?P({})|({}))".format( + IPV4_ADDR_REGEX, IPV6_ADDR_REGEX + ) + ), + "record": False, + }, + { + "regexp": re.compile( + r"^\s+\d+\s+(?P{})(\s+\S+){{5}}\s+" + r"(?P(never)|\d+\S+)" + r"\s+(?P\d+)".format(ASN_REGEX) + ), + "record": True, + }, # Same as above, but for peers that are not Established, e.g.: # Neighbor V AS MsgRcvd MsgSent TblVer InQ OutQ Up/Down State/PfxRcd # 2001:DB8::3 # 4 65003 0 0 1 0 0 never Idle (Admin) - {'regexp': re.compile(r'^\s+\d+\s+(?P{})(\s+\S+){{5}}\s+' - r'(?P(never)|\d+\S+)\s+(?P\D.*)'.format( - ASN_REGEX - )), - 'record': True} + { + "regexp": re.compile( + r"^\s+\d+\s+(?P{})(\s+\S+){{5}}\s+" + r"(?P(never)|\d+\S+)\s+(?P\D.*)".format( + ASN_REGEX + ) + ), + "record": True, + }, + ], + "no_fill_fields": [ + "accepted_prefixes", + "state", + "uptime", + "remote_as", + "remote_addr", ], - 'no_fill_fields': ['accepted_prefixes', 'state', 'uptime', 'remote_as', 'remote_addr'] } parse_neighbors = { - 'patterns': [ + "patterns": [ # Capture BGP neighbor is 10.0.0.2, remote AS 65000, internal link - {'regexp': re.compile(r'^BGP neighbor is (?P({})|({})),' - r'\s+remote AS (?P{}).*'.format( - IPV4_ADDR_REGEX, IPV6_ADDR_REGEX, ASN_REGEX - )), - 'record': False}, + { + "regexp": re.compile( + r"^BGP neighbor is (?P({})|({}))," + r"\s+remote AS (?P{}).*".format( + IPV4_ADDR_REGEX, IPV6_ADDR_REGEX, ASN_REGEX + ) + ), + "record": False, + }, # Capture description - {'regexp': re.compile(r'^\s+Description: (?P.+)'), - 'record': False}, + { + "regexp": re.compile(r"^\s+Description: (?P.+)"), + "record": False, + }, # Capture remote_id, e.g.: # BGP version 4, remote router ID 10.0.1.2 - {'regexp': re.compile(r'^\s+BGP version \d+, remote router ID ' - r'(?P{})'.format(IPV4_ADDR_REGEX)), - 'record': False}, + { + "regexp": re.compile( + r"^\s+BGP version \d+, remote router ID " + r"(?P{})".format(IPV4_ADDR_REGEX) + ), + "record": False, + }, # Capture state - {'regexp': re.compile(r'^\s+BGP state = (?P\w+)'), - 'record': False}, + { + "regexp": re.compile(r"^\s+BGP state = (?P\w+)"), + "record": False, + }, # Capture AFI and SAFI names, e.g.: # For address family: IPv4 Unicast - {'regexp': re.compile(r'^\s+For address family: (?P\S+) '), - 'record': False}, + { + "regexp": re.compile(r"^\s+For address family: (?P\S+) "), + "record": False, + }, # Capture current sent and accepted prefixes, e.g.: # Prefixes Current: 637213 3142 (Consumes 377040 bytes) - {'regexp': re.compile(r'^\s+Prefixes Current:\s+(?P\d+)\s+' - r'(?P\d+).*'), - 'record': False}, + { + "regexp": re.compile( + r"^\s+Prefixes Current:\s+(?P\d+)\s+" + r"(?P\d+).*" + ), + "record": False, + }, # Capture received_prefixes if soft-reconfig is enabled for the peer - {'regexp': re.compile(r'^\s+Saved (soft-reconfig):.+(?P\d+).*'), - 'record': True}, + { + "regexp": re.compile( + r"^\s+Saved (soft-reconfig):.+(?P\d+).*" + ), + "record": True, + }, # Otherwise, use the following as an end of row marker - {'regexp': re.compile(r'^\s+Local Policy Denied Prefixes:.+'), - 'record': True} + { + "regexp": re.compile(r"^\s+Local Policy Denied Prefixes:.+"), + "record": True, + }, ], # fields that should not be "filled down" across table rows - 'no_fill_fields': ['received_prefixes', 'accepted_prefixes', 'sent_prefixes'] + "no_fill_fields": [ + "received_prefixes", + "accepted_prefixes", + "sent_prefixes", + ], } # Parse outputs into a list of dicts @@ -1222,17 +1372,17 @@ def get_bgp_neighbors(self): for line in summary_output.splitlines(): # check for matches against each pattern - for item in parse_summary['patterns']: - match = item['regexp'].match(line) + for item in parse_summary["patterns"]: + match = item["regexp"].match(line) if match: # a match was found, so update the temp entry with the match's groupdict summary_data_entry.update(match.groupdict()) - if item['record']: + if item["record"]: # Record indicates the last piece of data has been obtained; move # on to next entry summary_data.append(copy.deepcopy(summary_data_entry)) # remove keys that are listed in no_fill_fields before the next pass - for field in parse_summary['no_fill_fields']: + for field in parse_summary["no_fill_fields"]: try: del summary_data_entry[field] except KeyError: @@ -1243,17 +1393,17 @@ def get_bgp_neighbors(self): neighbor_data_entry = {} for line in neighbor_output.splitlines(): # check for matches against each pattern - for item in parse_neighbors['patterns']: - match = item['regexp'].match(line) + for item in parse_neighbors["patterns"]: + match = item["regexp"].match(line) if match: # a match was found, so update the temp entry with the match's groupdict neighbor_data_entry.update(match.groupdict()) - if item['record']: + if item["record"]: # Record indicates the last piece of data has been obtained; move # on to next entry neighbor_data.append(copy.deepcopy(neighbor_data_entry)) # remove keys that are listed in no_fill_fields before the next pass - for field in parse_neighbors['no_fill_fields']: + for field in parse_neighbors["no_fill_fields"]: try: del neighbor_data_entry[field] except KeyError: @@ -1264,39 +1414,43 @@ def get_bgp_neighbors(self): for entry in summary_data: if not router_id: - router_id = entry['router_id'] - elif entry['router_id'] != router_id: + router_id = entry["router_id"] + elif entry["router_id"] != router_id: raise ValueError # check the router_id looks like an ipv4 address router_id = napalm.base.helpers.ip(router_id, version=4) # add parsed data to output dict - bgp_neighbor_data['global']['router_id'] = router_id - bgp_neighbor_data['global']['peers'] = {} + bgp_neighbor_data["global"]["router_id"] = router_id + bgp_neighbor_data["global"]["peers"] = {} for entry in summary_data: - remote_addr = napalm.base.helpers.ip(entry['remote_addr']) - afi = entry['afi'].lower() + remote_addr = napalm.base.helpers.ip(entry["remote_addr"]) + afi = entry["afi"].lower() # check that we're looking at a supported afi if afi not in supported_afi: continue # get neighbor_entry out of neighbor data neighbor_entry = None for neighbor in neighbor_data: - if (neighbor['afi'].lower() == afi and - napalm.base.helpers.ip(neighbor['remote_addr']) == remote_addr): + if ( + neighbor["afi"].lower() == afi + and napalm.base.helpers.ip(neighbor["remote_addr"]) == remote_addr + ): neighbor_entry = neighbor break # check for proper session data for the afi if neighbor_entry is None: continue elif not isinstance(neighbor_entry, dict): - raise ValueError(msg="Couldn't find neighbor data for %s in afi %s" % - (remote_addr, afi)) + raise ValueError( + msg="Couldn't find neighbor data for %s in afi %s" + % (remote_addr, afi) + ) # check for admin down state try: - if "(Admin)" in entry['state']: + if "(Admin)" in entry["state"]: is_enabled = False else: is_enabled = True @@ -1304,14 +1458,14 @@ def get_bgp_neighbors(self): is_enabled = True # parse uptime value - uptime = self.bgp_time_conversion(entry['uptime']) + uptime = self.bgp_time_conversion(entry["uptime"]) # BGP is up if state is Established - is_up = 'Established' in neighbor_entry['state'] + is_up = "Established" in neighbor_entry["state"] # check whether session is up for address family and get prefix count try: - accepted_prefixes = int(entry['accepted_prefixes']) + accepted_prefixes = int(entry["accepted_prefixes"]) except (ValueError, KeyError): accepted_prefixes = -1 @@ -1319,15 +1473,17 @@ def get_bgp_neighbors(self): if is_up: try: # overide accepted_prefixes with neighbor data if possible (since that's newer) - accepted_prefixes = int(neighbor_entry['accepted_prefixes']) + accepted_prefixes = int(neighbor_entry["accepted_prefixes"]) except (ValueError, KeyError): pass # try to get received prefix count, otherwise set to accepted_prefixes - received_prefixes = neighbor_entry.get('received_prefixes', accepted_prefixes) + received_prefixes = neighbor_entry.get( + "received_prefixes", accepted_prefixes + ) # try to get sent prefix count and convert to int, otherwise set to -1 - sent_prefixes = int(neighbor_entry.get('sent_prefixes', -1)) + sent_prefixes = int(neighbor_entry.get("sent_prefixes", -1)) else: received_prefixes = -1 sent_prefixes = -1 @@ -1335,146 +1491,191 @@ def get_bgp_neighbors(self): # get description try: - description = py23_compat.text_type(neighbor_entry['description']) + description = py23_compat.text_type(neighbor_entry["description"]) except KeyError: - description = '' + description = "" # check the remote router_id looks like an ipv4 address - remote_id = napalm.base.helpers.ip(neighbor_entry['remote_id'], version=4) - - if remote_addr not in bgp_neighbor_data['global']['peers']: - bgp_neighbor_data['global']['peers'][remote_addr] = { - 'local_as': napalm.base.helpers.as_number(entry['local_as']), - 'remote_as': napalm.base.helpers.as_number(entry['remote_as']), - 'remote_id': remote_id, - 'is_up': is_up, - 'is_enabled': is_enabled, - 'description': description, - 'uptime': uptime, - 'address_family': { + remote_id = napalm.base.helpers.ip(neighbor_entry["remote_id"], version=4) + + if remote_addr not in bgp_neighbor_data["global"]["peers"]: + bgp_neighbor_data["global"]["peers"][remote_addr] = { + "local_as": napalm.base.helpers.as_number(entry["local_as"]), + "remote_as": napalm.base.helpers.as_number(entry["remote_as"]), + "remote_id": remote_id, + "is_up": is_up, + "is_enabled": is_enabled, + "description": description, + "uptime": uptime, + "address_family": { afi: { - 'received_prefixes': received_prefixes, - 'accepted_prefixes': accepted_prefixes, - 'sent_prefixes': sent_prefixes + "received_prefixes": received_prefixes, + "accepted_prefixes": accepted_prefixes, + "sent_prefixes": sent_prefixes, } - } + }, } else: # found previous data for matching remote_addr, but for different afi - existing = bgp_neighbor_data['global']['peers'][remote_addr] - assert afi not in existing['address_family'] + existing = bgp_neighbor_data["global"]["peers"][remote_addr] + assert afi not in existing["address_family"] # compare with existing values and croak if they don't match - assert existing['local_as'] == napalm.base.helpers.as_number(entry['local_as']) - assert existing['remote_as'] == napalm.base.helpers.as_number(entry['remote_as']) - assert existing['remote_id'] == remote_id - assert existing['is_enabled'] == is_enabled - assert existing['description'] == description + assert existing["local_as"] == napalm.base.helpers.as_number( + entry["local_as"] + ) + assert existing["remote_as"] == napalm.base.helpers.as_number( + entry["remote_as"] + ) + assert existing["remote_id"] == remote_id + assert existing["is_enabled"] == is_enabled + assert existing["description"] == description # merge other values in a sane manner - existing['is_up'] = existing['is_up'] or is_up - existing['uptime'] = max(existing['uptime'], uptime) - existing['address_family'][afi] = { - 'received_prefixes': received_prefixes, - 'accepted_prefixes': accepted_prefixes, - 'sent_prefixes': sent_prefixes + existing["is_up"] = existing["is_up"] or is_up + existing["uptime"] = max(existing["uptime"], uptime) + existing["address_family"][afi] = { + "received_prefixes": received_prefixes, + "accepted_prefixes": accepted_prefixes, + "sent_prefixes": sent_prefixes, } return bgp_neighbor_data - def get_bgp_neighbors_detail(self, neighbor_address=''): + def get_bgp_neighbors_detail(self, neighbor_address=""): bgp_detail = defaultdict(lambda: defaultdict(lambda: [])) - raw_bgp_sum = self._send_command('show ip bgp all sum').strip() + raw_bgp_sum = self._send_command("show ip bgp all sum").strip() bgp_sum = napalm.base.helpers.textfsm_extractor( - self, 'ip_bgp_all_sum', raw_bgp_sum) + self, "ip_bgp_all_sum", raw_bgp_sum + ) for neigh in bgp_sum: - if neighbor_address and neighbor_address != neigh['neighbor']: + if neighbor_address and neighbor_address != neigh["neighbor"]: continue - raw_bgp_neigh = self._send_command('show ip bgp {} neigh {}'.format( - AFI_COMMAND_MAP[neigh['addr_family']], neigh['neighbor'])) + raw_bgp_neigh = self._send_command( + "show ip bgp {} neigh {}".format( + AFI_COMMAND_MAP[neigh["addr_family"]], neigh["neighbor"] + ) + ) bgp_neigh = napalm.base.helpers.textfsm_extractor( - self, 'ip_bgp_neigh', raw_bgp_neigh)[0] + self, "ip_bgp_neigh", raw_bgp_neigh + )[0] details = { - 'up': neigh['up'] != 'never', - 'local_as': napalm.base.helpers.as_number(neigh['local_as']), - 'remote_as': napalm.base.helpers.as_number(neigh['remote_as']), - 'router_id': napalm.base.helpers.ip( - bgp_neigh['router_id']) if bgp_neigh['router_id'] else '', - 'local_address': napalm.base.helpers.ip( - bgp_neigh['local_address']) if bgp_neigh['local_address'] else '', - 'local_address_configured': False, - 'local_port': napalm.base.helpers.as_number( - bgp_neigh['local_port']) if bgp_neigh['local_port'] else 0, - 'routing_table': bgp_neigh['vrf'] if bgp_neigh['vrf'] else 'global', - 'remote_address': napalm.base.helpers.ip(bgp_neigh['neighbor']), - 'remote_port': napalm.base.helpers.as_number( - bgp_neigh['remote_port']) if bgp_neigh['remote_port'] else 0, - 'multihop': False, - 'multipath': False, - 'remove_private_as': False, - 'import_policy': '', - 'export_policy': '', - 'input_messages': napalm.base.helpers.as_number( - bgp_neigh['msg_total_in']) if bgp_neigh['msg_total_in'] else 0, - 'output_messages': napalm.base.helpers.as_number( - bgp_neigh['msg_total_out']) if bgp_neigh['msg_total_out'] else 0, - 'input_updates': napalm.base.helpers.as_number( - bgp_neigh['msg_update_in']) if bgp_neigh['msg_update_in'] else 0, - 'output_updates': napalm.base.helpers.as_number( - bgp_neigh['msg_update_out']) if bgp_neigh['msg_update_out'] else 0, - 'messages_queued_out': napalm.base.helpers.as_number(neigh['out_q']), - 'connection_state': bgp_neigh['bgp_state'], - 'previous_connection_state': '', - 'last_event': '', - 'suppress_4byte_as': ( - bgp_neigh['four_byte_as'] != 'advertised and received' if - bgp_neigh['four_byte_as'] else False), - 'local_as_prepend': False, - 'holdtime': napalm.base.helpers.as_number( - bgp_neigh['holdtime']) if bgp_neigh['holdtime'] else 0, - 'configured_holdtime': 0, - 'keepalive': napalm.base.helpers.as_number( - bgp_neigh['keepalive']) if bgp_neigh['keepalive'] else 0, - 'configured_keepalive': 0, - 'active_prefix_count': 0, - 'received_prefix_count': 0, - 'accepted_prefix_count': 0, - 'suppressed_prefix_count': 0, - 'advertised_prefix_count': 0, - 'flap_count': 0, + "up": neigh["up"] != "never", + "local_as": napalm.base.helpers.as_number(neigh["local_as"]), + "remote_as": napalm.base.helpers.as_number(neigh["remote_as"]), + "router_id": napalm.base.helpers.ip(bgp_neigh["router_id"]) + if bgp_neigh["router_id"] + else "", + "local_address": napalm.base.helpers.ip(bgp_neigh["local_address"]) + if bgp_neigh["local_address"] + else "", + "local_address_configured": False, + "local_port": napalm.base.helpers.as_number(bgp_neigh["local_port"]) + if bgp_neigh["local_port"] + else 0, + "routing_table": bgp_neigh["vrf"] if bgp_neigh["vrf"] else "global", + "remote_address": napalm.base.helpers.ip(bgp_neigh["neighbor"]), + "remote_port": napalm.base.helpers.as_number(bgp_neigh["remote_port"]) + if bgp_neigh["remote_port"] + else 0, + "multihop": False, + "multipath": False, + "remove_private_as": False, + "import_policy": "", + "export_policy": "", + "input_messages": napalm.base.helpers.as_number( + bgp_neigh["msg_total_in"] + ) + if bgp_neigh["msg_total_in"] + else 0, + "output_messages": napalm.base.helpers.as_number( + bgp_neigh["msg_total_out"] + ) + if bgp_neigh["msg_total_out"] + else 0, + "input_updates": napalm.base.helpers.as_number( + bgp_neigh["msg_update_in"] + ) + if bgp_neigh["msg_update_in"] + else 0, + "output_updates": napalm.base.helpers.as_number( + bgp_neigh["msg_update_out"] + ) + if bgp_neigh["msg_update_out"] + else 0, + "messages_queued_out": napalm.base.helpers.as_number(neigh["out_q"]), + "connection_state": bgp_neigh["bgp_state"], + "previous_connection_state": "", + "last_event": "", + "suppress_4byte_as": ( + bgp_neigh["four_byte_as"] != "advertised and received" + if bgp_neigh["four_byte_as"] + else False + ), + "local_as_prepend": False, + "holdtime": napalm.base.helpers.as_number(bgp_neigh["holdtime"]) + if bgp_neigh["holdtime"] + else 0, + "configured_holdtime": 0, + "keepalive": napalm.base.helpers.as_number(bgp_neigh["keepalive"]) + if bgp_neigh["keepalive"] + else 0, + "configured_keepalive": 0, + "active_prefix_count": 0, + "received_prefix_count": 0, + "accepted_prefix_count": 0, + "suppressed_prefix_count": 0, + "advertised_prefix_count": 0, + "flap_count": 0, } bgp_neigh_afi = napalm.base.helpers.textfsm_extractor( - self, 'ip_bgp_neigh_afi', raw_bgp_neigh) + self, "ip_bgp_neigh_afi", raw_bgp_neigh + ) if len(bgp_neigh_afi) > 1: bgp_neigh_afi = bgp_neigh_afi[1] - details.update({ - 'local_address_configured': bgp_neigh_afi['local_addr_conf'] != '', - 'multipath': bgp_neigh_afi['multipaths'] != '0', - 'import_policy': bgp_neigh_afi['policy_in'], - 'export_policy': bgp_neigh_afi['policy_out'], - 'last_event': ( - bgp_neigh_afi['last_event'] if - bgp_neigh_afi['last_event'] != 'never' else ''), - 'active_prefix_count': napalm.base.helpers.as_number( - bgp_neigh_afi['bestpaths']), - 'received_prefix_count': napalm.base.helpers.as_number( - bgp_neigh_afi['prefix_curr_in']) + napalm.base.helpers.as_number( - bgp_neigh_afi['rejected_prefix_in']), - 'accepted_prefix_count': napalm.base.helpers.as_number( - bgp_neigh_afi['prefix_curr_in']), - 'suppressed_prefix_count': napalm.base.helpers.as_number( - bgp_neigh_afi['rejected_prefix_in']), - 'advertised_prefix_count': napalm.base.helpers.as_number( - bgp_neigh_afi['prefix_curr_out']), - 'flap_count': napalm.base.helpers.as_number(bgp_neigh_afi['flap_count']) - }) + details.update( + { + "local_address_configured": bgp_neigh_afi["local_addr_conf"] + != "", + "multipath": bgp_neigh_afi["multipaths"] != "0", + "import_policy": bgp_neigh_afi["policy_in"], + "export_policy": bgp_neigh_afi["policy_out"], + "last_event": ( + bgp_neigh_afi["last_event"] + if bgp_neigh_afi["last_event"] != "never" + else "" + ), + "active_prefix_count": napalm.base.helpers.as_number( + bgp_neigh_afi["bestpaths"] + ), + "received_prefix_count": napalm.base.helpers.as_number( + bgp_neigh_afi["prefix_curr_in"] + ) + + napalm.base.helpers.as_number( + bgp_neigh_afi["rejected_prefix_in"] + ), + "accepted_prefix_count": napalm.base.helpers.as_number( + bgp_neigh_afi["prefix_curr_in"] + ), + "suppressed_prefix_count": napalm.base.helpers.as_number( + bgp_neigh_afi["rejected_prefix_in"] + ), + "advertised_prefix_count": napalm.base.helpers.as_number( + bgp_neigh_afi["prefix_curr_out"] + ), + "flap_count": napalm.base.helpers.as_number( + bgp_neigh_afi["flap_count"] + ), + } + ) else: bgp_neigh_afi = bgp_neigh_afi[0] - details.update({ - 'import_policy': bgp_neigh_afi['policy_in'], - 'export_policy': bgp_neigh_afi['policy_out'], - }) - bgp_detail[details['routing_table']][ - details['remote_as']].append(details) + details.update( + { + "import_policy": bgp_neigh_afi["policy_in"], + "export_policy": bgp_neigh_afi["policy_out"], + } + ) + bgp_detail[details["routing_table"]][details["remote_as"]].append(details) return bgp_detail def get_interfaces_counters(self): @@ -1497,14 +1698,14 @@ def get_interfaces_counters(self): Currently doesn't determine output broadcasts, multicasts """ counters = {} - command = 'show interfaces' + command = "show interfaces" output = self._send_command(command) - sh_int_sum_cmd = 'show interface summary' + sh_int_sum_cmd = "show interface summary" sh_int_sum_cmd_out = self._send_command(sh_int_sum_cmd) # Break output into per-interface sections - interface_strings = re.split(r'.* line protocol is .*', output, flags=re.M) - header_strings = re.findall(r'.* line protocol is .*', output, flags=re.M) + interface_strings = re.split(r".* line protocol is .*", output, flags=re.M) + header_strings = re.findall(r".* line protocol is .*", output, flags=re.M) empty = interface_strings.pop(0).strip() if empty: @@ -1523,56 +1724,67 @@ def get_interfaces_counters(self): for interface, interface_str in zip(intf, interface_strings): counters.setdefault(interface, {}) for line in interface_str.splitlines(): - if 'packets input' in line: + if "packets input" in line: # '0 packets input, 0 bytes, 0 no buffer' match = re.search(r"(\d+) packets input.* (\d+) bytes", line) - counters[interface]['rx_unicast_packets'] = int(match.group(1)) - counters[interface]['rx_octets'] = int(match.group(2)) - elif 'broadcast' in line: + counters[interface]["rx_unicast_packets"] = int(match.group(1)) + counters[interface]["rx_octets"] = int(match.group(2)) + elif "broadcast" in line: # 'Received 0 broadcasts (0 multicasts)' # 'Received 264071 broadcasts (39327 IP multicasts)' # 'Received 338 broadcasts, 0 runts, 0 giants, 0 throttles' - match = re.search(r"Received (\d+) broadcasts.*(\d+).*multicasts", line) + match = re.search( + r"Received (\d+) broadcasts.*(\d+).*multicasts", line + ) alt_match = re.search(r"Received (\d+) broadcasts.*", line) if match: - counters[interface]['rx_broadcast_packets'] = int(match.group(1)) - counters[interface]['rx_multicast_packets'] = int(match.group(2)) + counters[interface]["rx_broadcast_packets"] = int( + match.group(1) + ) + counters[interface]["rx_multicast_packets"] = int( + match.group(2) + ) elif alt_match: - counters[interface]['rx_broadcast_packets'] = int(alt_match.group(1)) - counters[interface]['rx_multicast_packets'] = -1 + counters[interface]["rx_broadcast_packets"] = int( + alt_match.group(1) + ) + counters[interface]["rx_multicast_packets"] = -1 else: - counters[interface]['rx_broadcast_packets'] = -1 - counters[interface]['rx_multicast_packets'] = -1 - elif 'packets output' in line: + counters[interface]["rx_broadcast_packets"] = -1 + counters[interface]["rx_multicast_packets"] = -1 + elif "packets output" in line: # '0 packets output, 0 bytes, 0 underruns' match = re.search(r"(\d+) packets output.* (\d+) bytes", line) - counters[interface]['tx_unicast_packets'] = int(match.group(1)) - counters[interface]['tx_octets'] = int(match.group(2)) - counters[interface]['tx_broadcast_packets'] = -1 - counters[interface]['tx_multicast_packets'] = -1 - elif 'input errors' in line: + counters[interface]["tx_unicast_packets"] = int(match.group(1)) + counters[interface]["tx_octets"] = int(match.group(2)) + counters[interface]["tx_broadcast_packets"] = -1 + counters[interface]["tx_multicast_packets"] = -1 + elif "input errors" in line: # '0 input errors, 0 CRC, 0 frame, 0 overrun, 0 ignored' match = re.search(r"(\d+) input errors", line) - counters[interface]['rx_errors'] = int(match.group(1)) - counters[interface]['rx_discards'] = -1 - elif 'output errors' in line: + counters[interface]["rx_errors"] = int(match.group(1)) + counters[interface]["rx_discards"] = -1 + elif "output errors" in line: # '0 output errors, 0 collisions, 1 interface resets' match = re.search(r"(\d+) output errors", line) - counters[interface]['tx_errors'] = int(match.group(1)) - counters[interface]['tx_discards'] = -1 + counters[interface]["tx_errors"] = int(match.group(1)) + counters[interface]["tx_discards"] = -1 for line in sh_int_sum_cmd_out.splitlines(): if interface in line: # Line is tabular output with columns # Interface IHQ IQD OHQ OQD RXBS RXPS TXBS TXPS TRTL # where columns (excluding interface) are integers - regex = r"\b" + interface + \ - r"\b\s+(\d+)\s+(?P\d+)\s+(\d+)" + \ - r"\s+(?P\d+)\s+(\d+)\s+(\d+)" + \ - r"\s+(\d+)\s+(\d+)\s+(\d+)" + regex = ( + r"\b" + + interface + + r"\b\s+(\d+)\s+(?P\d+)\s+(\d+)" + + r"\s+(?P\d+)\s+(\d+)\s+(\d+)" + + r"\s+(\d+)\s+(\d+)\s+(\d+)" + ) match = re.search(regex, line) if match: - counters[interface]['rx_discards'] = int(match.group("IQD")) - counters[interface]['tx_discards'] = int(match.group("OQD")) + counters[interface]["rx_discards"] = int(match.group("IQD")) + counters[interface]["tx_discards"] = int(match.group("OQD")) return counters @@ -1585,65 +1797,71 @@ def get_environment(self): cpu hard-coded to cpu0 (i.e. only a single CPU) """ environment = {} - cpu_cmd = 'show proc cpu' - mem_cmd = 'show memory statistics' - temp_cmd = 'show env temperature status' + cpu_cmd = "show proc cpu" + mem_cmd = "show memory statistics" + temp_cmd = "show env temperature status" output = self._send_command(cpu_cmd) - environment.setdefault('cpu', {}) - environment['cpu'][0] = {} - environment['cpu'][0]['%usage'] = 0.0 + environment.setdefault("cpu", {}) + environment["cpu"][0] = {} + environment["cpu"][0]["%usage"] = 0.0 for line in output.splitlines(): - if 'CPU utilization' in line: + if "CPU utilization" in line: # CPU utilization for five seconds: 2%/0%; one minute: 2%; five minutes: 1% - cpu_regex = r'^.*one minute: (\d+)%; five.*$' + cpu_regex = r"^.*one minute: (\d+)%; five.*$" match = re.search(cpu_regex, line) - environment['cpu'][0]['%usage'] = float(match.group(1)) + environment["cpu"][0]["%usage"] = float(match.group(1)) break output = self._send_command(mem_cmd) for line in output.splitlines(): - if 'Processor' in line: + if "Processor" in line: _, _, proc_total_mem, proc_used_mem, _ = line.split()[:5] - elif 'I/O' in line or 'io' in line: + elif "I/O" in line or "io" in line: _, _, io_total_mem, io_used_mem, _ = line.split()[:5] total_mem = int(proc_total_mem) + int(io_total_mem) used_mem = int(proc_used_mem) + int(io_used_mem) - environment.setdefault('memory', {}) - environment['memory']['used_ram'] = used_mem - environment['memory']['available_ram'] = total_mem + environment.setdefault("memory", {}) + environment["memory"]["used_ram"] = used_mem + environment["memory"]["available_ram"] = total_mem - environment.setdefault('temperature', {}) - re_temp_value = re.compile('(.*) Temperature Value') + environment.setdefault("temperature", {}) + re_temp_value = re.compile("(.*) Temperature Value") # The 'show env temperature status' is not ubiquitous in Cisco IOS output = self._send_command(temp_cmd) - if '% Invalid' not in output: + if "% Invalid" not in output: for line in output.splitlines(): m = re_temp_value.match(line) if m is not None: temp_name = m.group(1).lower() - temp_value = float(line.split(':')[1].split()[0]) - env_value = {'is_alert': False, - 'is_critical': False, - 'temperature': temp_value} - environment['temperature'][temp_name] = env_value - elif 'Yellow Threshold' in line: - system_temp_alert = float(line.split(':')[1].split()[0]) + temp_value = float(line.split(":")[1].split()[0]) + env_value = { + "is_alert": False, + "is_critical": False, + "temperature": temp_value, + } + environment["temperature"][temp_name] = env_value + elif "Yellow Threshold" in line: + system_temp_alert = float(line.split(":")[1].split()[0]) if temp_value > system_temp_alert: - env_value['is_alert'] = True - elif 'Red Threshold' in line: - system_temp_crit = float(line.split(':')[1].split()[0]) + env_value["is_alert"] = True + elif "Red Threshold" in line: + system_temp_crit = float(line.split(":")[1].split()[0]) if temp_value > system_temp_crit: - env_value['is_critical'] = True + env_value["is_critical"] = True else: - env_value = {'is_alert': False, 'is_critical': False, 'temperature': -1.0} - environment['temperature']['invalid'] = env_value + env_value = {"is_alert": False, "is_critical": False, "temperature": -1.0} + environment["temperature"]["invalid"] = env_value # Initialize 'power' and 'fan' to default values (not implemented) - environment.setdefault('power', {}) - environment['power']['invalid'] = {'status': True, 'output': -1.0, 'capacity': -1.0} - environment.setdefault('fans', {}) - environment['fans']['invalid'] = {'status': True} + environment.setdefault("power", {}) + environment["power"]["invalid"] = { + "status": True, + "output": -1.0, + "capacity": -1.0, + } + environment.setdefault("fans", {}) + environment["fans"]["invalid"] = {"status": True} return environment @@ -1675,11 +1893,11 @@ def get_arp_table(self): """ arp_table = [] - command = 'show arp | exclude Incomplete' + command = "show arp | exclude Incomplete" output = self._send_command(command) # Skip the first line which is a header - output = output.split('\n') + output = output.split("\n") output = output[1:] for line in output: @@ -1688,7 +1906,7 @@ def get_arp_table(self): if len(line.split()) == 5: # Static ARP entries have no interface # Internet 10.0.0.1 - 0010.2345.1cda ARPA - interface = '' + interface = "" protocol, address, age, mac, eth_type = line.split() elif len(line.split()) == 6: protocol, address, age, mac, eth_type, interface = line.split() @@ -1696,7 +1914,7 @@ def get_arp_table(self): raise ValueError("Unexpected output from: {}".format(line.split())) try: - if age == '-': + if age == "-": age = 0 age = float(age) except ValueError: @@ -1708,10 +1926,10 @@ def get_arp_table(self): if not re.search(RE_MAC, mac): raise ValueError("Invalid MAC Address detected: {}".format(mac)) entry = { - 'interface': interface, - 'mac': napalm.base.helpers.mac(mac), - 'ip': address, - 'age': age + "interface": interface, + "mac": napalm.base.helpers.mac(mac), + "ip": address, + "age": age, } arp_table.append(entry) return arp_table @@ -1731,7 +1949,7 @@ def cli(self, commands): """ cli_output = dict() if type(commands) is not list: - raise TypeError('Please enter a valid list of commands!') + raise TypeError("Please enter a valid list of commands!") for command in commands: output = self._send_command(command) @@ -1744,7 +1962,11 @@ def get_ntp_peers(self): """Implementation of get_ntp_peers for IOS.""" ntp_stats = self.get_ntp_stats() - return {ntp_peer.get('remote'): {} for ntp_peer in ntp_stats if ntp_peer.get('remote')} + return { + ntp_peer.get("remote"): {} + for ntp_peer in ntp_stats + if ntp_peer.get("remote") + } def get_ntp_servers(self): """Implementation of get_ntp_servers for IOS. @@ -1761,7 +1983,7 @@ def get_ntp_servers(self): } """ ntp_servers = {} - command = 'show run | include ntp server' + command = "show run | include ntp server" output = self._send_command(command) for line in output.splitlines(): @@ -1777,34 +1999,38 @@ def get_ntp_stats(self): """Implementation of get_ntp_stats for IOS.""" ntp_stats = [] - command = 'show ntp associations' + command = "show ntp associations" output = self._send_command(command) for line in output.splitlines(): # Skip first two lines and last line of command output - if line == "" or 'address' in line or 'sys.peer' in line: + if line == "" or "address" in line or "sys.peer" in line: continue - if '%NTP is not enabled' in line: + if "%NTP is not enabled" in line: return [] elif len(line.split()) == 9: - address, ref_clock, st, when, poll, reach, delay, offset, disp = line.split() - address_regex = re.match(r'(\W*)([0-9.*]*)', address) + address, ref_clock, st, when, poll, reach, delay, offset, disp = ( + line.split() + ) + address_regex = re.match(r"(\W*)([0-9.*]*)", address) try: - ntp_stats.append({ - 'remote': py23_compat.text_type(address_regex.group(2)), - 'synchronized': ('*' in address_regex.group(1)), - 'referenceid': py23_compat.text_type(ref_clock), - 'stratum': int(st), - 'type': u'-', - 'when': py23_compat.text_type(when), - 'hostpoll': int(poll), - 'reachability': int(reach), - 'delay': float(delay), - 'offset': float(offset), - 'jitter': float(disp) - }) + ntp_stats.append( + { + "remote": py23_compat.text_type(address_regex.group(2)), + "synchronized": ("*" in address_regex.group(1)), + "referenceid": py23_compat.text_type(ref_clock), + "stratum": int(st), + "type": "-", + "when": py23_compat.text_type(when), + "hostpoll": int(poll), + "reachability": int(reach), + "delay": float(delay), + "offset": float(offset), + "jitter": float(disp), + } + ) except Exception: continue @@ -1853,64 +2079,84 @@ def get_mac_address_table(self): """ RE_MACTABLE_DEFAULT = r"^" + MAC_REGEX - RE_MACTABLE_6500_1 = r"^\*\s+{}\s+{}\s+".format(VLAN_REGEX, MAC_REGEX) # 7 fields - RE_MACTABLE_6500_2 = r"^{}\s+{}\s+".format(VLAN_REGEX, MAC_REGEX) # 6 fields - RE_MACTABLE_6500_3 = r"^\s{51}\S+" # Fill down prior - RE_MACTABLE_6500_4 = r"^R\s+{}\s+.*Router".format(VLAN_REGEX, MAC_REGEX) # Router field - RE_MACTABLE_6500_5 = r"^R\s+N/A\s+{}.*Router".format(MAC_REGEX) # Router skipped - RE_MACTABLE_4500_1 = r"^{}\s+{}\s+".format(VLAN_REGEX, MAC_REGEX) # 5 fields - RE_MACTABLE_4500_2 = r"^\s{32,34}\S+" # Fill down prior - RE_MACTABLE_4500_3 = r"^{}\s+{}\s+".format(INT_REGEX, MAC_REGEX) # Matches PHY int + RE_MACTABLE_6500_1 = r"^\*\s+{}\s+{}\s+".format( + VLAN_REGEX, MAC_REGEX + ) # 7 fields + RE_MACTABLE_6500_2 = r"^{}\s+{}\s+".format(VLAN_REGEX, MAC_REGEX) # 6 fields + RE_MACTABLE_6500_3 = r"^\s{51}\S+" # Fill down prior + RE_MACTABLE_6500_4 = r"^R\s+{}\s+.*Router".format( + VLAN_REGEX, MAC_REGEX + ) # Router field + RE_MACTABLE_6500_5 = r"^R\s+N/A\s+{}.*Router".format( + MAC_REGEX + ) # Router skipped + RE_MACTABLE_4500_1 = r"^{}\s+{}\s+".format(VLAN_REGEX, MAC_REGEX) # 5 fields + RE_MACTABLE_4500_2 = r"^\s{32,34}\S+" # Fill down prior + RE_MACTABLE_4500_3 = r"^{}\s+{}\s+".format( + INT_REGEX, MAC_REGEX + ) # Matches PHY int RE_MACTABLE_2960_1 = r"^All\s+{}".format(MAC_REGEX) - RE_MACTABLE_GEN_1 = r"^{}\s+{}\s+".format(VLAN_REGEX, MAC_REGEX) # 4 fields-2960/4500 + RE_MACTABLE_GEN_1 = r"^{}\s+{}\s+".format( + VLAN_REGEX, MAC_REGEX + ) # 4 fields-2960/4500 def process_mac_fields(vlan, mac, mac_type, interface): """Return proper data for mac address fields.""" - if mac_type.lower() in ['self', 'static', 'system']: + if mac_type.lower() in ["self", "static", "system"]: static = True - if vlan.lower() == 'all': + if vlan.lower() == "all": vlan = 0 - if interface.lower() == 'cpu' or re.search(r'router', interface.lower()) or \ - re.search(r'switch', interface.lower()): - interface = '' + if ( + interface.lower() == "cpu" + or re.search(r"router", interface.lower()) + or re.search(r"switch", interface.lower()) + ): + interface = "" else: static = False return { - 'mac': napalm.base.helpers.mac(mac), - 'interface': self._canonical_int(interface), - 'vlan': int(vlan), - 'static': static, - 'active': True, - 'moves': -1, - 'last_move': -1.0 + "mac": napalm.base.helpers.mac(mac), + "interface": self._canonical_int(interface), + "vlan": int(vlan), + "static": static, + "active": True, + "moves": -1, + "last_move": -1.0, } mac_address_table = [] - command = IOS_COMMANDS['show_mac_address'] + command = IOS_COMMANDS["show_mac_address"] output = self._send_command(command) # Skip the header lines - output = re.split(r'^----.*', output, flags=re.M)[1:] + output = re.split(r"^----.*", output, flags=re.M)[1:] output = "\n".join(output).strip() # Strip any leading asterisks output = re.sub(r"^\*", "", output, flags=re.M) - fill_down_vlan = fill_down_mac = fill_down_mac_type = '' + fill_down_vlan = fill_down_mac = fill_down_mac_type = "" for line in output.splitlines(): # Cat6500 one off and 4500 multicast format - if (re.search(RE_MACTABLE_6500_3, line) or re.search(RE_MACTABLE_4500_2, line)): + if re.search(RE_MACTABLE_6500_3, line) or re.search( + RE_MACTABLE_4500_2, line + ): interface = line.strip() - if ',' in interface: - interfaces = interface.split(',') + if "," in interface: + interfaces = interface.split(",") else: interfaces = [interface] for single_interface in interfaces: - mac_address_table.append(process_mac_fields(fill_down_vlan, fill_down_mac, - fill_down_mac_type, - single_interface)) + mac_address_table.append( + process_mac_fields( + fill_down_vlan, + fill_down_mac, + fill_down_mac_type, + single_interface, + ) + ) continue line = line.strip() - if line == '': + if line == "": continue if re.search(r"^---", line): # Convert any '---' to VLAN 0 @@ -1920,73 +2166,97 @@ def process_mac_fields(vlan, mac, mac_type, interface): if re.search(RE_MACTABLE_DEFAULT, line): if len(line.split()) == 4: mac, mac_type, vlan, interface = line.split() - mac_address_table.append(process_mac_fields(vlan, mac, mac_type, interface)) + mac_address_table.append( + process_mac_fields(vlan, mac, mac_type, interface) + ) else: raise ValueError("Unexpected output from: {}".format(line.split())) # Cat6500 format - elif (re.search(RE_MACTABLE_6500_1, line) or re.search(RE_MACTABLE_6500_2, line)) and \ - len(line.split()) >= 6: + elif ( + re.search(RE_MACTABLE_6500_1, line) + or re.search(RE_MACTABLE_6500_2, line) + ) and len(line.split()) >= 6: if len(line.split()) == 7: _, vlan, mac, mac_type, _, _, interface = line.split() elif len(line.split()) == 6: vlan, mac, mac_type, _, _, interface = line.split() - if ',' in interface: - interfaces = interface.split(',') + if "," in interface: + interfaces = interface.split(",") fill_down_vlan = vlan fill_down_mac = mac fill_down_mac_type = mac_type for single_interface in interfaces: - mac_address_table.append(process_mac_fields(vlan, mac, mac_type, - single_interface)) + mac_address_table.append( + process_mac_fields(vlan, mac, mac_type, single_interface) + ) else: - mac_address_table.append(process_mac_fields(vlan, mac, mac_type, interface)) + mac_address_table.append( + process_mac_fields(vlan, mac, mac_type, interface) + ) # Cat4500 format elif re.search(RE_MACTABLE_4500_1, line) and len(line.split()) == 5: vlan, mac, mac_type, _, interface = line.split() - mac_address_table.append(process_mac_fields(vlan, mac, mac_type, interface)) + mac_address_table.append( + process_mac_fields(vlan, mac, mac_type, interface) + ) # Cat4500 w/PHY interface in Mac Table. Vlan will be -1. elif re.search(RE_MACTABLE_4500_3, line) and len(line.split()) == 5: interface, mac, mac_type, _, _ = line.split() interface = canonical_interface_name(interface) - vlan = '-1' - mac_address_table.append(process_mac_fields(vlan, mac, mac_type, interface)) + vlan = "-1" + mac_address_table.append( + process_mac_fields(vlan, mac, mac_type, interface) + ) # Cat2960 format - ignore extra header line elif re.search(r"^Vlan\s+Mac Address\s+", line): continue # Cat2960 format (Cat4500 format multicast entries) - elif (re.search(RE_MACTABLE_2960_1, line) or re.search(RE_MACTABLE_GEN_1, line)) and \ - len(line.split()) == 4: + elif ( + re.search(RE_MACTABLE_2960_1, line) + or re.search(RE_MACTABLE_GEN_1, line) + ) and len(line.split()) == 4: vlan, mac, mac_type, interface = line.split() - if ',' in interface: - interfaces = interface.split(',') + if "," in interface: + interfaces = interface.split(",") fill_down_vlan = vlan fill_down_mac = mac fill_down_mac_type = mac_type for single_interface in interfaces: - mac_address_table.append(process_mac_fields(vlan, mac, mac_type, - single_interface)) + mac_address_table.append( + process_mac_fields(vlan, mac, mac_type, single_interface) + ) else: - mac_address_table.append(process_mac_fields(vlan, mac, mac_type, interface)) + mac_address_table.append( + process_mac_fields(vlan, mac, mac_type, interface) + ) # 4500 in case of unused Vlan 1. elif re.search(RE_MACTABLE_4500_1, line) and len(line.split()) == 3: vlan, mac, mac_type = line.split() - mac_address_table.append(process_mac_fields(vlan, mac, mac_type, interface='')) + mac_address_table.append( + process_mac_fields(vlan, mac, mac_type, interface="") + ) # 4500 w/PHY interface in Multicast table. Vlan will be -1. elif re.search(RE_MACTABLE_4500_3, line) and len(line.split()) == 4: vlan, mac, mac_type, interface = line.split() - vlan = '-1' - mac_address_table.append(process_mac_fields(vlan, mac, mac_type, interface)) + vlan = "-1" + mac_address_table.append( + process_mac_fields(vlan, mac, mac_type, interface) + ) elif re.search(RE_MACTABLE_6500_4, line) and len(line.split()) == 7: line = re.sub(r"^R\s+", "", line) vlan, mac, mac_type, _, _, interface = line.split() - mac_address_table.append(process_mac_fields(vlan, mac, mac_type, interface)) + mac_address_table.append( + process_mac_fields(vlan, mac, mac_type, interface) + ) continue elif re.search(RE_MACTABLE_6500_5, line): line = re.sub(r"^R\s+", "", line) vlan, mac, mac_type, _, _, interface = line.split() # Convert 'N/A' VLAN to to 0 vlan = re.sub(r"N/A", "0", vlan) - mac_address_table.append(process_mac_fields(vlan, mac, mac_type, interface)) + mac_address_table.append( + process_mac_fields(vlan, mac, mac_type, interface) + ) continue elif re.search(r"Total Mac Addresses", line): continue @@ -2001,17 +2271,17 @@ def process_mac_fields(vlan, mac, mac_type, interface): def get_probes_config(self): probes = {} - probes_regex = r"ip\s+sla\s+(?P\d+)\n" \ - r"\s+(?P\S+)\s+(?P.*\n).*" \ - r"\s+tag\s+(?P\S+)\n.*" \ - r"\s+history\s+buckets-kept\s+(?P\d+)\n.*" \ + probes_regex = ( + r"ip\s+sla\s+(?P\d+)\n" + r"\s+(?P\S+)\s+(?P.*\n).*" + r"\s+tag\s+(?P\S+)\n.*" + r"\s+history\s+buckets-kept\s+(?P\d+)\n.*" r"\s+frequency\s+(?P\d+)$" + ) probe_args = { - 'icmp-echo': r"^(?P\S+)\s+source-(?:ip|interface)\s+(?P\S+)$" - } - probe_type_map = { - 'icmp-echo': 'icmp-ping', + "icmp-echo": r"^(?P\S+)\s+source-(?:ip|interface)\s+(?P\S+)$" } + probe_type_map = {"icmp-echo": "icmp-ping"} command = "show run | include ip sla [0-9]" output = self._send_command(command) for match in re.finditer(probes_regex, output, re.M): @@ -2019,16 +2289,17 @@ def get_probes_config(self): if probe["probe_type"] not in probe_args: # Probe type not supported yet continue - probe_args_match = re.match(probe_args[probe["probe_type"]], - probe["probe_args"]) + probe_args_match = re.match( + probe_args[probe["probe_type"]], probe["probe_args"] + ) probe_data = probe_args_match.groupdict() probes[probe["id"]] = { probe["name"]: { - 'probe_type': probe_type_map[probe["probe_type"]], - 'target': probe_data["target"], - 'source': probe_data["source"], - 'probe_count': int(probe["probe_count"]), - 'test_interval': int(probe["interval"]) + "probe_type": probe_type_map[probe["probe_type"]], + "target": probe_data["target"], + "source": probe_data["source"], + "probe_count": int(probe["probe_count"]), + "test_interval": int(probe["interval"]), } } @@ -2052,39 +2323,39 @@ def get_snmp_information(self): """ # default values snmp_dict = { - 'chassis_id': u'unknown', - 'community': {}, - 'contact': u'unknown', - 'location': u'unknown' + "chassis_id": "unknown", + "community": {}, + "contact": "unknown", + "location": "unknown", } - command = 'show run | include snmp-server' + command = "show run | include snmp-server" output = self._send_command(command) for line in output.splitlines(): fields = line.split() - if 'snmp-server community' in line: + if "snmp-server community" in line: name = fields[2] - if 'community' not in snmp_dict.keys(): - snmp_dict.update({'community': {}}) - snmp_dict['community'].update({name: {}}) + if "community" not in snmp_dict.keys(): + snmp_dict.update({"community": {}}) + snmp_dict["community"].update({name: {}}) try: - snmp_dict['community'][name].update({'mode': fields[3].lower()}) + snmp_dict["community"][name].update({"mode": fields[3].lower()}) except IndexError: - snmp_dict['community'][name].update({'mode': u'N/A'}) + snmp_dict["community"][name].update({"mode": "N/A"}) try: - snmp_dict['community'][name].update({'acl': fields[4]}) + snmp_dict["community"][name].update({"acl": fields[4]}) except IndexError: - snmp_dict['community'][name].update({'acl': u'N/A'}) - elif 'snmp-server location' in line: - snmp_dict['location'] = ' '.join(fields[2:]) - elif 'snmp-server contact' in line: - snmp_dict['contact'] = ' '.join(fields[2:]) - elif 'snmp-server chassis-id' in line: - snmp_dict['chassis_id'] = ' '.join(fields[2:]) + snmp_dict["community"][name].update({"acl": "N/A"}) + elif "snmp-server location" in line: + snmp_dict["location"] = " ".join(fields[2:]) + elif "snmp-server contact" in line: + snmp_dict["contact"] = " ".join(fields[2:]) + elif "snmp-server chassis-id" in line: + snmp_dict["chassis_id"] = " ".join(fields[2:]) # If SNMP Chassis wasn't found; obtain using direct command - if snmp_dict['chassis_id'] == 'unknown': - command = 'show snmp chassis' + if snmp_dict["chassis_id"] == "unknown": + command = "show snmp chassis" snmp_chassis = self._send_command(command) - snmp_dict['chassis_id'] = snmp_chassis + snmp_dict["chassis_id"] = snmp_chassis return snmp_dict def get_users(self): @@ -2103,30 +2374,48 @@ def get_users(self): The level is an integer between 0 and 15, where 0 is the lowest access and 15 represents full access to the device. """ - username_regex = r"^username\s+(?P\S+)\s+(?:privilege\s+(?P\S+)" \ + username_regex = ( + r"^username\s+(?P\S+)\s+(?:privilege\s+(?P\S+)" r"\s+)?(?:secret \d+\s+(?P\S+))?$" - pub_keychain_regex = r"^\s+username\s+(?P\S+)(?P(?:\n\s+key-hash\s+" \ + ) + pub_keychain_regex = ( + r"^\s+username\s+(?P\S+)(?P(?:\n\s+key-hash\s+" r"(?P\S+)\s+(?P\S+)(?:\s+\S+)?)+)$" + ) users = {} command = "show run | section username" output = self._send_command(command) for match in re.finditer(username_regex, output, re.M): users[match.groupdict()["username"]] = { - 'level': int(match.groupdict()["priv_level"]) - if match.groupdict()["priv_level"] else 1, - 'password': match.groupdict()["pwd_hash"] - if match.groupdict()["pwd_hash"] else "", - 'sshkeys': [] + "level": int(match.groupdict()["priv_level"]) + if match.groupdict()["priv_level"] + else 1, + "password": match.groupdict()["pwd_hash"] + if match.groupdict()["pwd_hash"] + else "", + "sshkeys": [], } for match in re.finditer(pub_keychain_regex, output, re.M): if match.groupdict()["username"] not in users: continue - users[match.groupdict()["username"]]["sshkeys"] = list(map(lambda s: s.strip()[ - 9:], filter(None, match.groupdict()["keys"].splitlines()))) + users[match.groupdict()["username"]]["sshkeys"] = list( + map( + lambda s: s.strip()[9:], + filter(None, match.groupdict()["keys"].splitlines()), + ) + ) return users - def ping(self, destination, source=C.PING_SOURCE, ttl=C.PING_TTL, timeout=C.PING_TIMEOUT, - size=C.PING_SIZE, count=C.PING_COUNT, vrf=C.PING_VRF): + def ping( + self, + destination, + source=C.PING_SOURCE, + ttl=C.PING_TTL, + timeout=C.PING_TIMEOUT, + size=C.PING_SIZE, + count=C.PING_COUNT, + vrf=C.PING_VRF, + ): """ Execute ping on the device and returns a dictionary with the result. @@ -2148,56 +2437,68 @@ def ping(self, destination, source=C.PING_SOURCE, ttl=C.PING_TTL, timeout=C.PING ping_dict = {} # vrf needs to be right after the ping command if vrf: - command = 'ping vrf {} {}'.format(vrf, destination) + command = "ping vrf {} {}".format(vrf, destination) else: - command = 'ping {}'.format(destination) - command += ' timeout {}'.format(timeout) - command += ' size {}'.format(size) - command += ' repeat {}'.format(count) - if source != '': - command += ' source {}'.format(source) + command = "ping {}".format(destination) + command += " timeout {}".format(timeout) + command += " size {}".format(size) + command += " repeat {}".format(count) + if source != "": + command += " source {}".format(source) output = self._send_command(command) - if '%' in output: - ping_dict['error'] = output - elif 'Sending' in output: - ping_dict['success'] = { - 'probes_sent': 0, - 'packet_loss': 0, - 'rtt_min': 0.0, - 'rtt_max': 0.0, - 'rtt_avg': 0.0, - 'rtt_stddev': 0.0, - 'results': [] + if "%" in output: + ping_dict["error"] = output + elif "Sending" in output: + ping_dict["success"] = { + "probes_sent": 0, + "packet_loss": 0, + "rtt_min": 0.0, + "rtt_max": 0.0, + "rtt_avg": 0.0, + "rtt_stddev": 0.0, + "results": [], } for line in output.splitlines(): - if 'Success rate is' in line: - sent_and_received = re.search(r'\((\d*)/(\d*)\)', line) + if "Success rate is" in line: + sent_and_received = re.search(r"\((\d*)/(\d*)\)", line) probes_sent = int(sent_and_received.group(2)) probes_received = int(sent_and_received.group(1)) - ping_dict['success']['probes_sent'] = probes_sent - ping_dict['success']['packet_loss'] = probes_sent - probes_received + ping_dict["success"]["probes_sent"] = probes_sent + ping_dict["success"]["packet_loss"] = probes_sent - probes_received # If there were zero valid response packets, we are done - if 'Success rate is 0 ' in line: + if "Success rate is 0 " in line: break - min_avg_max = re.search(r'(\d*)/(\d*)/(\d*)', line) - ping_dict['success'].update({ - 'rtt_min': float(min_avg_max.group(1)), - 'rtt_avg': float(min_avg_max.group(2)), - 'rtt_max': float(min_avg_max.group(3)), - }) + min_avg_max = re.search(r"(\d*)/(\d*)/(\d*)", line) + ping_dict["success"].update( + { + "rtt_min": float(min_avg_max.group(1)), + "rtt_avg": float(min_avg_max.group(2)), + "rtt_max": float(min_avg_max.group(3)), + } + ) results_array = [] for i in range(probes_received): - results_array.append({'ip_address': py23_compat.text_type(destination), - 'rtt': 0.0}) - ping_dict['success'].update({'results': results_array}) + results_array.append( + { + "ip_address": py23_compat.text_type(destination), + "rtt": 0.0, + } + ) + ping_dict["success"].update({"results": results_array}) return ping_dict - def traceroute(self, destination, source=C.TRACEROUTE_SOURCE, - ttl=C.TRACEROUTE_TTL, timeout=C.TRACEROUTE_TIMEOUT, vrf=C.TRACEROUTE_VRF): + def traceroute( + self, + destination, + source=C.TRACEROUTE_SOURCE, + ttl=C.TRACEROUTE_TTL, + timeout=C.TRACEROUTE_TIMEOUT, + vrf=C.TRACEROUTE_VRF, + ): """ Executes traceroute on the device and returns a dictionary with the result. @@ -2236,21 +2537,21 @@ def traceroute(self, destination, source=C.TRACEROUTE_SOURCE, # Calculation to leave enough time for traceroute to complete assumes send_command # delay of .2 seconds. max_loops = (5 * ttl * timeout) + 150 - if max_loops < 500: # Make sure max_loops isn't set artificially low + if max_loops < 500: # Make sure max_loops isn't set artificially low max_loops = 500 output = self.device.send_command(command, max_loops=max_loops) # Prepare return dict traceroute_dict = dict() - if re.search('Unrecognized host or address', output): - traceroute_dict['error'] = 'unknown host %s' % destination + if re.search("Unrecognized host or address", output): + traceroute_dict["error"] = "unknown host %s" % destination return traceroute_dict else: - traceroute_dict['success'] = dict() + traceroute_dict["success"] = dict() results = dict() # Find all hops - hops = re.findall(r'\n\s+[0-9]{1,3}\s', output) + hops = re.findall(r"\n\s+[0-9]{1,3}\s", output) for hop in hops: # Search for hop in the output hop_match = re.search(hop, output) @@ -2267,98 +2568,108 @@ def traceroute(self, destination, source=C.TRACEROUTE_SOURCE, # Now you have the start and stop index for each hop # and you can parse the probes # Set the hop_variable, and remove spaces between msec for easier matching - hop_string = output[start_index:stop_index].replace(' msec', 'msec') + hop_string = output[start_index:stop_index].replace(" msec", "msec") hop_list = hop_string.split() current_hop = int(hop_list.pop(0)) # Prepare dictionary for each hop (assuming there are 3 probes in each hop) results[current_hop] = dict() - results[current_hop]['probes'] = dict() - results[current_hop]['probes'][1] = {'rtt': float(), - 'ip_address': '', - 'host_name': ''} - results[current_hop]['probes'][2] = {'rtt': float(), - 'ip_address': '', - 'host_name': ''} - results[current_hop]['probes'][3] = {'rtt': float(), - 'ip_address': '', - 'host_name': ''} + results[current_hop]["probes"] = dict() + results[current_hop]["probes"][1] = { + "rtt": float(), + "ip_address": "", + "host_name": "", + } + results[current_hop]["probes"][2] = { + "rtt": float(), + "ip_address": "", + "host_name": "", + } + results[current_hop]["probes"][3] = { + "rtt": float(), + "ip_address": "", + "host_name": "", + } current_probe = 1 - ip_address = '' - host_name = '' + ip_address = "" + host_name = "" while hop_list: current_element = hop_list.pop(0) # If current_element is * move index in dictionary to next probe - if current_element == '*': + if current_element == "*": current_probe += 1 # If current_element contains msec record the entry for probe - elif 'msec' in current_element: + elif "msec" in current_element: ip_address = py23_compat.text_type(ip_address) host_name = py23_compat.text_type(host_name) - rtt = float(current_element.replace('msec', '')) - results[current_hop]['probes'][current_probe]['ip_address'] = ip_address - results[current_hop]['probes'][current_probe]['host_name'] = host_name - results[current_hop]['probes'][current_probe]['rtt'] = rtt + rtt = float(current_element.replace("msec", "")) + results[current_hop]["probes"][current_probe][ + "ip_address" + ] = ip_address + results[current_hop]["probes"][current_probe][ + "host_name" + ] = host_name + results[current_hop]["probes"][current_probe]["rtt"] = rtt # After recording the entry move the index to next probe current_probe += 1 # If element contains '(' and ')', the output format is 'FQDN (IP_ADDRESS)' # Save the IP address - elif '(' in current_element: - ip_address = current_element.replace('(', '').replace(')', '') + elif "(" in current_element: + ip_address = current_element.replace("(", "").replace(")", "") # Save the probe's ip_address and host_name else: host_name = current_element ip_address = current_element - traceroute_dict['success'] = results + traceroute_dict["success"] = results return traceroute_dict - def get_network_instances(self, name=''): + def get_network_instances(self, name=""): instances = {} - sh_vrf_detail = self._send_command('show vrf detail') - show_ip_int_br = self._send_command('show ip interface brief') + sh_vrf_detail = self._send_command("show vrf detail") + show_ip_int_br = self._send_command("show ip interface brief") # retrieve all interfaces for the default VRF interface_dict = {} show_ip_int_br = show_ip_int_br.strip() for line in show_ip_int_br.splitlines(): - if 'Interface ' in line: + if "Interface " in line: continue interface = line.split()[0] interface_dict[interface] = {} - instances['default'] = { - 'name': 'default', - 'type': 'DEFAULT_INSTANCE', - 'state': {'route_distinguisher': ''}, - 'interfaces': {'interface': interface_dict} - } + instances["default"] = { + "name": "default", + "type": "DEFAULT_INSTANCE", + "state": {"route_distinguisher": ""}, + "interfaces": {"interface": interface_dict}, + } - for vrf in sh_vrf_detail.split('\n\n'): + for vrf in sh_vrf_detail.split("\n\n"): - first_part = vrf.split('Address family')[0] + first_part = vrf.split("Address family")[0] # retrieve the name of the VRF and the Route Distinguisher - vrf_name, RD = re.match(r'^VRF (\S+).*RD (.*);', first_part).groups() - if RD == '': - RD = '' + vrf_name, RD = re.match(r"^VRF (\S+).*RD (.*);", first_part).groups() + if RD == "": + RD = "" # retrieve the interfaces of the VRF - if_regex = re.match(r'.*Interfaces:(.*)', first_part, re.DOTALL) - if 'No interfaces' in first_part: + if_regex = re.match(r".*Interfaces:(.*)", first_part, re.DOTALL) + if "No interfaces" in first_part: interfaces = {} else: interfaces = {itf: {} for itf in if_regex.group(1).split()} instances[vrf_name] = { - 'name': vrf_name, - 'type': 'L3VRF', - 'state': {'route_distinguisher': RD}, - 'interfaces': {'interface': interfaces} - } + "name": vrf_name, + "type": "L3VRF", + "state": {"route_distinguisher": RD}, + "interfaces": {"interface": interfaces}, + } return instances if not name else instances[name] - def get_config(self, retrieve='all'): + def get_config(self, retrieve="all"): """Implementation of get_config for IOS. Returns the startup or/and running configuration as dictionary. @@ -2367,21 +2678,17 @@ def get_config(self, retrieve='all'): since IOS does not support candidate configuration. """ - configs = { - 'startup': '', - 'running': '', - 'candidate': '', - } + configs = {"startup": "", "running": "", "candidate": ""} - if retrieve in ('startup', 'all'): - command = 'show startup-config' + if retrieve in ("startup", "all"): + command = "show startup-config" output = self._send_command(command) - configs['startup'] = output + configs["startup"] = output - if retrieve in ('running', 'all'): - command = 'show running-config' + if retrieve in ("running", "all"): + command = "show running-config" output = self._send_command(command) - configs['running'] = output + configs["running"] = output return configs @@ -2414,10 +2721,10 @@ def get_ipv6_neighbors_table(self): """ ipv6_neighbors_table = [] - command = 'show ipv6 neighbors' + command = "show ipv6 neighbors" output = self._send_command(command) - ipv6_neighbors = '' + ipv6_neighbors = "" fields = re.split(r"^IPv6\s+Address.*Interface$", output, flags=(re.M | re.I)) if len(fields) == 2: ipv6_neighbors = fields[1].strip() @@ -2425,15 +2732,17 @@ def get_ipv6_neighbors_table(self): # typical format of an entry in the IOS IPv6 neighbors table: # 2002:FFFF:233::1 0 2894.0fed.be30 REACH Fa3/1/2.233 ip, age, mac, state, interface = entry.split() - mac = '' if mac == '-' else napalm.base.helpers.mac(mac) + mac = "" if mac == "-" else napalm.base.helpers.mac(mac) ip = napalm.base.helpers.ip(ip) - ipv6_neighbors_table.append({ - 'interface': interface, - 'mac': mac, - 'ip': ip, - 'age': float(age), - 'state': state - }) + ipv6_neighbors_table.append( + { + "interface": interface, + "mac": mac, + "ip": ip, + "age": float(age), + "state": state, + } + ) return ipv6_neighbors_table @property diff --git a/napalm/iosxr/__init__.py b/napalm/iosxr/__init__.py index a666be8dd..455b4a616 100644 --- a/napalm/iosxr/__init__.py +++ b/napalm/iosxr/__init__.py @@ -21,9 +21,9 @@ # Import local modules from napalm.iosxr.iosxr import IOSXRDriver # noqa -__all__ = ('IOSXRDriver',) +__all__ = ("IOSXRDriver",) try: - __version__ = pkg_resources.get_distribution('napalm-iosxr').version + __version__ = pkg_resources.get_distribution("napalm-iosxr").version except pkg_resources.DistributionNotFound: __version__ = "Not installed" diff --git a/napalm/iosxr/iosxr.py b/napalm/iosxr/iosxr.py index 491bf0cea..7b27fdd36 100644 --- a/napalm/iosxr/iosxr.py +++ b/napalm/iosxr/iosxr.py @@ -56,22 +56,24 @@ def __init__(self, hostname, username, password, timeout=60, optional_args=None) self.replace = False if optional_args is None: optional_args = {} - self.lock_on_connect = optional_args.get('config_lock', False) + self.lock_on_connect = optional_args.get("config_lock", False) self.netmiko_optional_args = netmiko_args(optional_args) try: - self.port = self.netmiko_optional_args.pop('port') + self.port = self.netmiko_optional_args.pop("port") except KeyError: self.port = 22 - self.platform = 'iosxr' - self.device = IOSXR(hostname, - username, - password, - timeout=timeout, - port=self.port, - lock=self.lock_on_connect, - **self.netmiko_optional_args) + self.platform = "iosxr" + self.device = IOSXR( + hostname, + username, + password, + timeout=timeout, + port=self.port, + lock=self.lock_on_connect, + **self.netmiko_optional_args + ) def open(self): try: @@ -85,9 +87,9 @@ def close(self): def is_alive(self): """Returns a flag with the state of the connection.""" if self.device is None: - return {'is_alive': False} + return {"is_alive": False} # Simply returns the flag from pyIOSXR - return {'is_alive': self.device.is_alive()} + return {"is_alive": self.device.is_alive()} def load_replace_candidate(self, filename=None, config=None): self.pending_changes = True @@ -117,14 +119,14 @@ def load_merge_candidate(self, filename=None, config=None): def compare_config(self): if not self.pending_changes: - return '' + return "" elif self.replace: return self.device.compare_replace_config().strip() else: return self.device.compare_config().strip() def commit_config(self, message=""): - commit_args = {'comment': message} if message else {} + commit_args = {"comment": message} if message else {} if self.replace: self.device.commit_replace_config(**commit_args) else: @@ -145,22 +147,22 @@ def rollback(self): def get_facts(self): facts = { - 'vendor': u'Cisco', - 'os_version': u'', - 'hostname': u'', - 'uptime': -1, - 'serial_number': u'', - 'fqdn': u'', - 'model': u'', - 'interface_list': [] + "vendor": "Cisco", + "os_version": "", + "hostname": "", + "uptime": -1, + "serial_number": "", + "fqdn": "", + "model": "", + "interface_list": [], } - facts_rpc_request = '\ - ' + facts_rpc_request = "\ + " facts_rpc_reply = ETREE.fromstring(self.device.make_rpc_call(facts_rpc_request)) - system_time_xpath = './/SystemTime/Uptime' - platform_attr_xpath = './/RackTable/Rack/Attributes/BasicInfo' + system_time_xpath = ".//SystemTime/Uptime" + platform_attr_xpath = ".//RackTable/Rack/Attributes/BasicInfo" system_time_tree = facts_rpc_reply.xpath(system_time_xpath)[0] try: platform_attr_tree = facts_rpc_reply.xpath(platform_attr_xpath)[0] @@ -168,26 +170,34 @@ def get_facts(self): platform_attr_tree = facts_rpc_reply.xpath(platform_attr_xpath) hostname = napalm.base.helpers.convert( - text_type, napalm.base.helpers.find_txt(system_time_tree, 'Hostname')) + text_type, napalm.base.helpers.find_txt(system_time_tree, "Hostname") + ) uptime = napalm.base.helpers.convert( - int, napalm.base.helpers.find_txt(system_time_tree, 'Uptime'), -1) + int, napalm.base.helpers.find_txt(system_time_tree, "Uptime"), -1 + ) serial = napalm.base.helpers.convert( - text_type, napalm.base.helpers.find_txt(platform_attr_tree, 'SerialNumber')) + text_type, napalm.base.helpers.find_txt(platform_attr_tree, "SerialNumber") + ) os_version = napalm.base.helpers.convert( - text_type, napalm.base.helpers.find_txt(platform_attr_tree, 'SoftwareRevision')) + text_type, + napalm.base.helpers.find_txt(platform_attr_tree, "SoftwareRevision"), + ) model = napalm.base.helpers.convert( - text_type, napalm.base.helpers.find_txt(platform_attr_tree, 'ModelName')) + text_type, napalm.base.helpers.find_txt(platform_attr_tree, "ModelName") + ) interface_list = sorted(list(self.get_interfaces().keys())) - facts.update({ - 'os_version': os_version, - 'hostname': hostname, - 'model': model, - 'uptime': uptime, - 'serial_number': serial, - 'fqdn': hostname, - 'interface_list': interface_list - }) + facts.update( + { + "os_version": os_version, + "hostname": hostname, + "model": model, + "uptime": uptime, + "serial_number": serial, + "fqdn": hostname, + "interface_list": interface_list, + } + ) return facts @@ -196,126 +206,153 @@ def get_interfaces(self): interfaces = {} INTERFACE_DEFAULTS = { - 'is_enabled': False, - 'is_up': False, - 'mac_address': u'', - 'description': u'', - 'speed': -1, - 'last_flapped': -1.0 + "is_enabled": False, + "is_up": False, + "mac_address": "", + "description": "", + "speed": -1, + "last_flapped": -1.0, } - interfaces_rpc_request = '' + interfaces_rpc_request = "" interfaces_rpc_reply = ETREE.fromstring( - self.device.make_rpc_call(interfaces_rpc_request)) + self.device.make_rpc_call(interfaces_rpc_request) + ) - for interface_tree in interfaces_rpc_reply.xpath('.//Interfaces/InterfaceTable/Interface'): - interface_name = napalm.base.helpers.find_txt(interface_tree, 'Naming/InterfaceName') + for interface_tree in interfaces_rpc_reply.xpath( + ".//Interfaces/InterfaceTable/Interface" + ): + interface_name = napalm.base.helpers.find_txt( + interface_tree, "Naming/InterfaceName" + ) if not interface_name: continue - is_up = (napalm.base.helpers.find_txt(interface_tree, 'LineState') == 'IM_STATE_UP') - enabled = (napalm.base.helpers.find_txt( - interface_tree, 'State') != 'IM_STATE_ADMINDOWN') - raw_mac = napalm.base.helpers.find_txt(interface_tree, 'MACAddress/Address') + is_up = ( + napalm.base.helpers.find_txt(interface_tree, "LineState") + == "IM_STATE_UP" + ) + enabled = ( + napalm.base.helpers.find_txt(interface_tree, "State") + != "IM_STATE_ADMINDOWN" + ) + raw_mac = napalm.base.helpers.find_txt(interface_tree, "MACAddress/Address") mac_address = napalm.base.helpers.convert( - napalm.base.helpers.mac, raw_mac, raw_mac) + napalm.base.helpers.mac, raw_mac, raw_mac + ) speed = napalm.base.helpers.convert( - int, napalm.base.helpers.convert( - int, napalm.base.helpers.find_txt(interface_tree, 'Bandwidth'), 0) * 1e-3) - description = napalm.base.helpers.find_txt(interface_tree, 'Description') + int, + napalm.base.helpers.convert( + int, napalm.base.helpers.find_txt(interface_tree, "Bandwidth"), 0 + ) + * 1e-3, + ) + description = napalm.base.helpers.find_txt(interface_tree, "Description") interfaces[interface_name] = copy.deepcopy(INTERFACE_DEFAULTS) - interfaces[interface_name].update({ - 'is_up': is_up, - 'speed': speed, - 'is_enabled': enabled, - 'mac_address': mac_address, - 'description': description - }) + interfaces[interface_name].update( + { + "is_up": is_up, + "speed": speed, + "is_enabled": enabled, + "mac_address": mac_address, + "description": description, + } + ) return interfaces def get_interfaces_counters(self): - rpc_command = '\ - ' + rpc_command = "\ + " result_tree = ETREE.fromstring(self.device.make_rpc_call(rpc_command)) interface_counters = {} - for interface in result_tree.xpath('.//Interface'): - interface_name = napalm.base.helpers.find_txt(interface, 'InterfaceHandle') + for interface in result_tree.xpath(".//Interface"): + interface_name = napalm.base.helpers.find_txt(interface, "InterfaceHandle") interface_stats = {} - if not interface.xpath('InterfaceStatistics'): + if not interface.xpath("InterfaceStatistics"): continue else: interface_stats = {} - interface_stats['tx_multicast_packets'] = napalm.base.helpers.convert( + interface_stats["tx_multicast_packets"] = napalm.base.helpers.convert( int, napalm.base.helpers.find_txt( interface, - 'InterfaceStatistics/FullInterfaceStats/MulticastPacketsSent' - ) + "InterfaceStatistics/FullInterfaceStats/MulticastPacketsSent", + ), ) - interface_stats['tx_discards'] = napalm.base.helpers.convert( + interface_stats["tx_discards"] = napalm.base.helpers.convert( int, napalm.base.helpers.find_txt( - interface, - 'InterfaceStatistics/FullInterfaceStats/OutputDrops' - ) + interface, "InterfaceStatistics/FullInterfaceStats/OutputDrops" + ), ) - interface_stats['tx_octets'] = napalm.base.helpers.convert( + interface_stats["tx_octets"] = napalm.base.helpers.convert( int, napalm.base.helpers.find_txt( - interface, - 'InterfaceStatistics/FullInterfaceStats/BytesSent' - ) + interface, "InterfaceStatistics/FullInterfaceStats/BytesSent" + ), + ) + interface_stats["tx_errors"] = napalm.base.helpers.convert( + int, + napalm.base.helpers.find_txt( + interface, "InterfaceStatistics/FullInterfaceStats/OutputErrors" + ), ) - interface_stats['tx_errors'] = napalm.base.helpers.convert( + interface_stats["rx_octets"] = napalm.base.helpers.convert( int, napalm.base.helpers.find_txt( interface, - 'InterfaceStatistics/FullInterfaceStats/OutputErrors' - ) + "InterfaceStatistics/FullInterfaceStats/BytesReceived", + ), + ) + interface_stats["tx_unicast_packets"] = napalm.base.helpers.convert( + int, + napalm.base.helpers.find_txt( + interface, "InterfaceStatistics/FullInterfaceStats/PacketsSent" + ), + ) + interface_stats["rx_errors"] = napalm.base.helpers.convert( + int, + napalm.base.helpers.find_txt( + interface, "InterfaceStatistics/FullInterfaceStats/InputErrors" + ), ) - interface_stats['rx_octets'] = napalm.base.helpers.convert( + interface_stats["tx_broadcast_packets"] = napalm.base.helpers.convert( int, napalm.base.helpers.find_txt( interface, - 'InterfaceStatistics/FullInterfaceStats/BytesReceived' - ) + "InterfaceStatistics/FullInterfaceStats/BroadcastPacketsSent", + ), ) - interface_stats['tx_unicast_packets'] = napalm.base.helpers.convert( + interface_stats["rx_multicast_packets"] = napalm.base.helpers.convert( int, napalm.base.helpers.find_txt( interface, - 'InterfaceStatistics/FullInterfaceStats/PacketsSent' - ) + "InterfaceStatistics/FullInterfaceStats/MulticastPacketsReceived", + ), ) - interface_stats['rx_errors'] = napalm.base.helpers.convert( - int, napalm.base.helpers.find_txt( - interface, 'InterfaceStatistics/FullInterfaceStats/InputErrors')) - interface_stats['tx_broadcast_packets'] = napalm.base.helpers.convert( - int, napalm.base.helpers.find_txt( - interface, 'InterfaceStatistics/FullInterfaceStats/BroadcastPacketsSent')) - interface_stats['rx_multicast_packets'] = napalm.base.helpers.convert( + interface_stats["rx_broadcast_packets"] = napalm.base.helpers.convert( int, napalm.base.helpers.find_txt( interface, - 'InterfaceStatistics/FullInterfaceStats/MulticastPacketsReceived' - ) + "InterfaceStatistics/FullInterfaceStats/BroadcastPacketsReceived", + ), ) - interface_stats['rx_broadcast_packets'] = napalm.base.helpers.convert( + interface_stats["rx_discards"] = napalm.base.helpers.convert( + int, + napalm.base.helpers.find_txt( + interface, "InterfaceStatistics/FullInterfaceStats/InputDrops" + ), + ) + interface_stats["rx_unicast_packets"] = napalm.base.helpers.convert( int, napalm.base.helpers.find_txt( interface, - 'InterfaceStatistics/FullInterfaceStats/BroadcastPacketsReceived' - ) + "InterfaceStatistics/FullInterfaceStats/PacketsReceived", + ), ) - interface_stats['rx_discards'] = napalm.base.helpers.convert( - int, napalm.base.helpers.find_txt( - interface, 'InterfaceStatistics/FullInterfaceStats/InputDrops')) - interface_stats['rx_unicast_packets'] = napalm.base.helpers.convert( - int, napalm.base.helpers.find_txt( - interface, 'InterfaceStatistics/FullInterfaceStats/PacketsReceived')) interface_counters[interface_name] = interface_stats return interface_counters @@ -326,17 +363,19 @@ def generate_vrf_query(vrf_name): Helper to provide XML-query for the VRF-type we're interested in. """ if vrf_name == "global": - rpc_command = '\ + rpc_command = "\ default\ \ - ' + " else: - rpc_command = '\ + rpc_command = "\ default\ {vrf_name}\ \ - '.format(vrf_name=vrf_name) + ".format( + vrf_name=vrf_name + ) return rpc_command """ @@ -348,14 +387,14 @@ def generate_vrf_query(vrf_name): active_vrfs = ["global"] - rpc_command = '\ + rpc_command = "\ default\ - ' + " result_tree = ETREE.fromstring(self.device.make_rpc_call(rpc_command)) - for node in result_tree.xpath('.//ConfigVRF'): - active_vrfs.append(napalm.base.helpers.find_txt(node, 'Naming/VRFName')) + for node in result_tree.xpath(".//ConfigVRF"): + active_vrfs.append(napalm.base.helpers.find_txt(node, "Naming/VRFName")) result = {} @@ -364,137 +403,179 @@ def generate_vrf_query(vrf_name): result_tree = ETREE.fromstring(self.device.make_rpc_call(rpc_command)) this_vrf = {} - this_vrf['peers'] = {} + this_vrf["peers"] = {} if vrf == "global": - this_vrf['router_id'] = napalm.base.helpers.convert( - text_type, napalm.base.helpers.find_txt( + this_vrf["router_id"] = napalm.base.helpers.convert( + text_type, + napalm.base.helpers.find_txt( result_tree, - 'Get/Operational/BGP/InstanceTable/Instance/InstanceActive/DefaultVRF' - '/GlobalProcessInfo/VRF/RouterID' - ) + "Get/Operational/BGP/InstanceTable/Instance/InstanceActive/DefaultVRF" + "/GlobalProcessInfo/VRF/RouterID", + ), ) else: - this_vrf['router_id'] = napalm.base.helpers.convert( - text_type, napalm.base.helpers.find_txt( + this_vrf["router_id"] = napalm.base.helpers.convert( + text_type, + napalm.base.helpers.find_txt( result_tree, - 'Get/Operational/BGP/InstanceTable/Instance/InstanceActive/VRFTable/VRF' - '/GlobalProcessInfo/VRF/RouterID' - ) + "Get/Operational/BGP/InstanceTable/Instance/InstanceActive/VRFTable/VRF" + "/GlobalProcessInfo/VRF/RouterID", + ), ) neighbors = {} - for neighbor in result_tree.xpath('.//Neighbor'): + for neighbor in result_tree.xpath(".//Neighbor"): this_neighbor = {} - this_neighbor['local_as'] = napalm.base.helpers.convert( - int, napalm.base.helpers.find_txt(neighbor, 'LocalAS')) - this_neighbor['remote_as'] = napalm.base.helpers.convert( - int, napalm.base.helpers.find_txt(neighbor, 'RemoteAS')) - this_neighbor['remote_id'] = napalm.base.helpers.convert( - text_type, napalm.base.helpers.find_txt(neighbor, 'RouterID')) - - if napalm.base.helpers.find_txt(neighbor, 'ConnectionAdminStatus') is "1": - this_neighbor['is_enabled'] = True + this_neighbor["local_as"] = napalm.base.helpers.convert( + int, napalm.base.helpers.find_txt(neighbor, "LocalAS") + ) + this_neighbor["remote_as"] = napalm.base.helpers.convert( + int, napalm.base.helpers.find_txt(neighbor, "RemoteAS") + ) + this_neighbor["remote_id"] = napalm.base.helpers.convert( + text_type, napalm.base.helpers.find_txt(neighbor, "RouterID") + ) + + if ( + napalm.base.helpers.find_txt(neighbor, "ConnectionAdminStatus") + is "1" + ): + this_neighbor["is_enabled"] = True try: - this_neighbor['description'] = napalm.base.helpers.convert( - text_type, napalm.base.helpers.find_txt(neighbor, 'Description')) + this_neighbor["description"] = napalm.base.helpers.convert( + text_type, napalm.base.helpers.find_txt(neighbor, "Description") + ) except AttributeError: - this_neighbor['description'] = u'' + this_neighbor["description"] = "" - this_neighbor['is_enabled'] = ( - napalm.base.helpers.find_txt(neighbor, 'ConnectionAdminStatus') == "1") + this_neighbor["is_enabled"] = ( + napalm.base.helpers.find_txt(neighbor, "ConnectionAdminStatus") + == "1" + ) - if str(napalm.base.helpers.find_txt(neighbor, 'ConnectionAdminStatus')) is "1": - this_neighbor['is_enabled'] = True + if ( + str(napalm.base.helpers.find_txt(neighbor, "ConnectionAdminStatus")) + is "1" + ): + this_neighbor["is_enabled"] = True else: - this_neighbor['is_enabled'] = False - - if str(napalm.base.helpers.find_txt(neighbor, 'ConnectionState')) == "BGP_ST_ESTAB": - this_neighbor['is_up'] = True - this_neighbor['uptime'] = napalm.base.helpers.convert( - int, napalm.base.helpers.find_txt(neighbor, 'ConnectionEstablishedTime')) + this_neighbor["is_enabled"] = False + + if ( + str(napalm.base.helpers.find_txt(neighbor, "ConnectionState")) + == "BGP_ST_ESTAB" + ): + this_neighbor["is_up"] = True + this_neighbor["uptime"] = napalm.base.helpers.convert( + int, + napalm.base.helpers.find_txt( + neighbor, "ConnectionEstablishedTime" + ), + ) else: - this_neighbor['is_up'] = False - this_neighbor['uptime'] = -1 + this_neighbor["is_up"] = False + this_neighbor["uptime"] = -1 - this_neighbor['address_family'] = {} + this_neighbor["address_family"] = {} - if napalm.base.helpers.find_txt(neighbor, - 'ConnectionRemoteAddress/AFI') == "IPv4": + if ( + napalm.base.helpers.find_txt( + neighbor, "ConnectionRemoteAddress/AFI" + ) + == "IPv4" + ): this_afi = "ipv4" - elif napalm.base.helpers.find_txt(neighbor, - 'ConnectionRemoteAddress/AFI') == "IPv6": + elif ( + napalm.base.helpers.find_txt( + neighbor, "ConnectionRemoteAddress/AFI" + ) + == "IPv6" + ): this_afi = "ipv6" else: - this_afi = napalm.base.helpers.find_txt(neighbor, 'ConnectionRemoteAddress/AFI') + this_afi = napalm.base.helpers.find_txt( + neighbor, "ConnectionRemoteAddress/AFI" + ) - this_neighbor['address_family'][this_afi] = {} + this_neighbor["address_family"][this_afi] = {} try: - this_neighbor['address_family'][this_afi]["received_prefixes"] = \ - napalm.base.helpers.convert( - int, - napalm.base.helpers.find_txt(neighbor, 'AFData/Entry/PrefixesAccepted'), - 0 - ) + napalm.base.helpers.convert( - int, - napalm.base.helpers.find_txt(neighbor, 'AFData/Entry/PrefixesDenied'), - 0 - ) - this_neighbor['address_family'][this_afi]["accepted_prefixes"] = \ - napalm.base.helpers.convert( - int, - napalm.base.helpers.find_txt(neighbor, - 'AFData/Entry/PrefixesAccepted'), - 0 - ) - this_neighbor['address_family'][this_afi]["sent_prefixes"] = \ - napalm.base.helpers.convert( - int, - napalm.base.helpers.find_txt( - neighbor, - 'AFData/Entry/PrefixesAdvertised' - ), - 0 - ) + this_neighbor["address_family"][this_afi][ + "received_prefixes" + ] = napalm.base.helpers.convert( + int, + napalm.base.helpers.find_txt( + neighbor, "AFData/Entry/PrefixesAccepted" + ), + 0, + ) + napalm.base.helpers.convert( + int, + napalm.base.helpers.find_txt( + neighbor, "AFData/Entry/PrefixesDenied" + ), + 0, + ) + this_neighbor["address_family"][this_afi][ + "accepted_prefixes" + ] = napalm.base.helpers.convert( + int, + napalm.base.helpers.find_txt( + neighbor, "AFData/Entry/PrefixesAccepted" + ), + 0, + ) + this_neighbor["address_family"][this_afi][ + "sent_prefixes" + ] = napalm.base.helpers.convert( + int, + napalm.base.helpers.find_txt( + neighbor, "AFData/Entry/PrefixesAdvertised" + ), + 0, + ) except AttributeError: - this_neighbor['address_family'][this_afi]["received_prefixes"] = -1 - this_neighbor['address_family'][this_afi]["accepted_prefixes"] = -1 - this_neighbor['address_family'][this_afi]["sent_prefixes"] = -1 + this_neighbor["address_family"][this_afi]["received_prefixes"] = -1 + this_neighbor["address_family"][this_afi]["accepted_prefixes"] = -1 + this_neighbor["address_family"][this_afi]["sent_prefixes"] = -1 neighbor_ip = napalm.base.helpers.ip( napalm.base.helpers.find_txt( - neighbor, 'Naming/NeighborAddress/IPV4Address') or - napalm.base.helpers.find_txt( - neighbor, 'Naming/NeighborAddress/IPV6Address') + neighbor, "Naming/NeighborAddress/IPV4Address" + ) + or napalm.base.helpers.find_txt( + neighbor, "Naming/NeighborAddress/IPV6Address" + ) ) neighbors[neighbor_ip] = this_neighbor - this_vrf['peers'] = neighbors + this_vrf["peers"] = neighbors result[vrf] = this_vrf return result def get_environment(self): def get_module_xml_query(module, selection): - return '\ + return "\ 0{slot}{name}\ \ - '.format(slot=module, name=selection) + ".format( + slot=module, name=selection + ) environment_status = {} - environment_status['fans'] = {} - environment_status['temperature'] = {} - environment_status['power'] = {} - environment_status['cpu'] = {} - environment_status['memory'] = 0.0 + environment_status["fans"] = {} + environment_status["temperature"] = {} + environment_status["power"] = {} + environment_status["cpu"] = {} + environment_status["memory"] = 0.0 # finding slots with equipment we're interested in - rpc_command = '\ + rpc_command = "\ 0\ - ' + " result_tree = ETREE.fromstring(self.device.make_rpc_call(rpc_command)) @@ -503,9 +584,13 @@ def get_module_xml_query(module, selection): for slot in result_tree.xpath(".//Slot"): for card in slot.xpath(".//CardTable"): # find enabled slots, figoure out type and save for later - if napalm.base.helpers.find_txt( - card, 'Card/Attributes/FRUInfo/ModuleAdministrativeState') == "ADMIN_UP": - slot_name = napalm.base.helpers.find_txt(slot, 'Naming/Name') + if ( + napalm.base.helpers.find_txt( + card, "Card/Attributes/FRUInfo/ModuleAdministrativeState" + ) + == "ADMIN_UP" + ): + slot_name = napalm.base.helpers.find_txt(slot, "Naming/Name") module_type = re.sub(r"\d+", "", slot_name) if len(module_type) > 0: active_modules[module_type].append(slot_name) @@ -516,105 +601,125 @@ def get_module_xml_query(module, selection): # PSU's # - for psu in active_modules['PM']: - if psu in ["PM6", "PM7"]: # Cisco bug, chassis difference V01<->V02 + for psu in active_modules["PM"]: + if psu in ["PM6", "PM7"]: # Cisco bug, chassis difference V01<->V02 continue - rpc_command = get_module_xml_query(psu, '') + rpc_command = get_module_xml_query(psu, "") result_tree = ETREE.fromstring(self.device.make_rpc_call(rpc_command)) psu_status = {} - psu_status['status'] = False - psu_status['capacity'] = 0.0 - psu_status['output'] = 0.0 + psu_status["status"] = False + psu_status["capacity"] = 0.0 + psu_status["output"] = 0.0 - for sensor in result_tree.xpath('.//SensorName'): - if napalm.base.helpers.find_txt(sensor, 'Naming/Name') == "host__VOLT": + for sensor in result_tree.xpath(".//SensorName"): + if napalm.base.helpers.find_txt(sensor, "Naming/Name") == "host__VOLT": this_psu_voltage = napalm.base.helpers.convert( - float, napalm.base.helpers.find_txt(sensor, 'ValueBrief')) - elif napalm.base.helpers.find_txt(sensor, 'Naming/Name') == "host__CURR": + float, napalm.base.helpers.find_txt(sensor, "ValueBrief") + ) + elif ( + napalm.base.helpers.find_txt(sensor, "Naming/Name") == "host__CURR" + ): this_psu_current = napalm.base.helpers.convert( - float, napalm.base.helpers.find_txt(sensor, 'ValueBrief')) - elif napalm.base.helpers.find_txt(sensor, 'Naming/Name') == "host__PM": + float, napalm.base.helpers.find_txt(sensor, "ValueBrief") + ) + elif napalm.base.helpers.find_txt(sensor, "Naming/Name") == "host__PM": this_psu_capacity = napalm.base.helpers.convert( - float, napalm.base.helpers.find_txt(sensor, 'ValueBrief')) + float, napalm.base.helpers.find_txt(sensor, "ValueBrief") + ) if this_psu_capacity > 0: - psu_status['capacity'] = this_psu_capacity - psu_status['status'] = True + psu_status["capacity"] = this_psu_capacity + psu_status["status"] = True if this_psu_current and this_psu_voltage: - psu_status['output'] = (this_psu_voltage * this_psu_current) / 1000000.0 + psu_status["output"] = (this_psu_voltage * this_psu_current) / 1000000.0 - environment_status['power'][psu] = psu_status + environment_status["power"][psu] = psu_status # # Memory # facts = self.get_facts() - router_model = facts.get('model') - is_xrv = router_model.lower().startswith('xrv') - environment_status['memory'] = { - 'available_ram': 0.0, - 'used_ram': 0.0 - } + router_model = facts.get("model") + is_xrv = router_model.lower().startswith("xrv") + environment_status["memory"] = {"available_ram": 0.0, "used_ram": 0.0} if not is_xrv: - rpc_command = '\ - ' + rpc_command = "\ + " result_tree = ETREE.fromstring(self.device.make_rpc_call(rpc_command)) - for node in result_tree.xpath('.//Node'): - if napalm.base.helpers.find_txt(node, - 'Naming/NodeName/Slot') == active_modules['RSP'][0]: + for node in result_tree.xpath(".//Node"): + if ( + napalm.base.helpers.find_txt(node, "Naming/NodeName/Slot") + == active_modules["RSP"][0] + ): available_ram = napalm.base.helpers.convert( - int, napalm.base.helpers.find_txt(node, 'Summary/SystemRAMMemory')) + int, + napalm.base.helpers.find_txt(node, "Summary/SystemRAMMemory"), + ) free_ram = napalm.base.helpers.convert( - int, napalm.base.helpers.find_txt(node, 'Summary/FreeApplicationMemory')) - break # we're only looking at one of the RSP's + int, + napalm.base.helpers.find_txt( + node, "Summary/FreeApplicationMemory" + ), + ) + break # we're only looking at one of the RSP's if available_ram and free_ram: used_ram = available_ram - free_ram memory = {} - memory['available_ram'] = available_ram - memory['used_ram'] = used_ram - environment_status['memory'] = memory + memory["available_ram"] = available_ram + memory["used_ram"] = used_ram + environment_status["memory"] = memory # # Fans # - for fan in active_modules['FT']: - rpc_command = get_module_xml_query(fan, '') + for fan in active_modules["FT"]: + rpc_command = get_module_xml_query(fan, "") result_tree = ETREE.fromstring(self.device.make_rpc_call(rpc_command)) - for module in result_tree.xpath('.//Module'): - for sensortype in module.xpath('.//SensorType'): - for sensorname in sensortype.xpath('.//SensorNameTable'): - if napalm.base.helpers.find_txt( - sensorname, - 'SensorName/Naming/Name') == "host__FanSpeed_0": - environment_status['fans'][fan] = { - 'status': napalm.base.helpers.convert( - int, napalm.base.helpers.find_txt( - sensorname, 'SensorName/ValueDetailed/Status')) == 1} + for module in result_tree.xpath(".//Module"): + for sensortype in module.xpath(".//SensorType"): + for sensorname in sensortype.xpath(".//SensorNameTable"): + if ( + napalm.base.helpers.find_txt( + sensorname, "SensorName/Naming/Name" + ) + == "host__FanSpeed_0" + ): + environment_status["fans"][fan] = { + "status": napalm.base.helpers.convert( + int, + napalm.base.helpers.find_txt( + sensorname, + "SensorName/ValueDetailed/Status", + ), + ) + == 1 + } # # CPU # cpu = {} - rpc_command = '' + rpc_command = "" result_tree = ETREE.fromstring(self.device.make_rpc_call(rpc_command)) - for module in result_tree.xpath('.//CPUUtilization'): + for module in result_tree.xpath(".//CPUUtilization"): this_cpu = {} this_cpu["%usage"] = napalm.base.helpers.convert( - float, napalm.base.helpers.find_txt(module, 'TotalCPUFiveMinute')) + float, napalm.base.helpers.find_txt(module, "TotalCPUFiveMinute") + ) - rack = napalm.base.helpers.find_txt(module, 'Naming/NodeName/Rack') - slot = napalm.base.helpers.find_txt(module, 'Naming/NodeName/Slot') - instance = napalm.base.helpers.find_txt(module, 'Naming/NodeName/Instance') + rack = napalm.base.helpers.find_txt(module, "Naming/NodeName/Rack") + slot = napalm.base.helpers.find_txt(module, "Naming/NodeName/Slot") + instance = napalm.base.helpers.find_txt(module, "Naming/NodeName/Instance") position = "%s/%s/%s" % (rack, slot, instance) cpu[position] = this_cpu @@ -631,23 +736,33 @@ def get_module_xml_query(module, selection): if not is_xrv: for slot in slot_list: - rpc_command = get_module_xml_query(slot, '') + rpc_command = get_module_xml_query(slot, "") result_tree = ETREE.fromstring(self.device.make_rpc_call(rpc_command)) for sensor in result_tree.xpath(".//SensorName"): - if not napalm.base.helpers.find_txt(sensor, 'Naming/Name') == "host__Inlet0": + if ( + not napalm.base.helpers.find_txt(sensor, "Naming/Name") + == "host__Inlet0" + ): continue this_reading = {} - this_reading['temperature'] = napalm.base.helpers.convert( - float, napalm.base.helpers.find_txt(sensor, 'ValueBrief')) + this_reading["temperature"] = napalm.base.helpers.convert( + float, napalm.base.helpers.find_txt(sensor, "ValueBrief") + ) threshold_value = [ napalm.base.helpers.convert(float, x.text) for x in sensor.xpath("ThresholdTable/Threshold/ValueBrief") ] - this_reading['is_alert'] = \ - threshold_value[2] <= this_reading['temperature'] <= threshold_value[3] - this_reading['is_critical'] = \ - threshold_value[4] <= this_reading['temperature'] <= threshold_value[5] - this_reading['temperature'] = this_reading['temperature']/10 + this_reading["is_alert"] = ( + threshold_value[2] + <= this_reading["temperature"] + <= threshold_value[3] + ) + this_reading["is_critical"] = ( + threshold_value[4] + <= this_reading["temperature"] + <= threshold_value[5] + ) + this_reading["temperature"] = this_reading["temperature"] / 10 environment_status["temperature"][slot] = this_reading return environment_status @@ -663,56 +778,73 @@ def get_lldp_neighbors(self): if local_interface not in lldp.keys(): lldp[local_interface] = [] - lldp[local_interface].append({ - 'hostname': napalm.base.helpers.convert( - text_type, n.split()[0]), - 'port': napalm.base.helpers.convert( - text_type, n.split()[4]) - }) + lldp[local_interface].append( + { + "hostname": napalm.base.helpers.convert(text_type, n.split()[0]), + "port": napalm.base.helpers.convert(text_type, n.split()[4]), + } + ) return lldp - def get_lldp_neighbors_detail(self, interface=''): + def get_lldp_neighbors_detail(self, interface=""): lldp_neighbors = {} - rpc_command = '' + rpc_command = "" result_tree = ETREE.fromstring(self.device.make_rpc_call(rpc_command)) - for neighbor in result_tree.xpath('.//Neighbors/DetailTable/Detail/Entry'): + for neighbor in result_tree.xpath(".//Neighbors/DetailTable/Detail/Entry"): interface_name = napalm.base.helpers.convert( - text_type, napalm.base.helpers.find_txt(neighbor, 'ReceivingInterfaceName')) + text_type, + napalm.base.helpers.find_txt(neighbor, "ReceivingInterfaceName"), + ) parent_interface = napalm.base.helpers.convert( - text_type, napalm.base.helpers.find_txt(neighbor, 'ReceivingParentInterfaceName')) - chassis_id_raw = napalm.base.helpers.find_txt(neighbor, 'ChassisID') + text_type, + napalm.base.helpers.find_txt(neighbor, "ReceivingParentInterfaceName"), + ) + chassis_id_raw = napalm.base.helpers.find_txt(neighbor, "ChassisID") chassis_id = napalm.base.helpers.convert( - napalm.base.helpers.mac, chassis_id_raw, chassis_id_raw) + napalm.base.helpers.mac, chassis_id_raw, chassis_id_raw + ) port_id = napalm.base.helpers.convert( - text_type, napalm.base.helpers.find_txt(neighbor, 'PortIDDetail')) + text_type, napalm.base.helpers.find_txt(neighbor, "PortIDDetail") + ) port_descr = napalm.base.helpers.convert( - text_type, napalm.base.helpers.find_txt(neighbor, 'Detail/PortDescription')) + text_type, + napalm.base.helpers.find_txt(neighbor, "Detail/PortDescription"), + ) system_name = napalm.base.helpers.convert( - text_type, napalm.base.helpers.find_txt(neighbor, 'Detail/SystemName')) + text_type, napalm.base.helpers.find_txt(neighbor, "Detail/SystemName") + ) system_descr = napalm.base.helpers.convert( - text_type, napalm.base.helpers.find_txt(neighbor, 'Detail/SystemDescription')) + text_type, + napalm.base.helpers.find_txt(neighbor, "Detail/SystemDescription"), + ) system_capabilities = napalm.base.helpers.convert( - text_type, napalm.base.helpers.find_txt(neighbor, 'Detail/SystemCapabilities')) + text_type, + napalm.base.helpers.find_txt(neighbor, "Detail/SystemCapabilities"), + ) enabled_capabilities = napalm.base.helpers.convert( - text_type, napalm.base.helpers.find_txt(neighbor, 'Detail/EnabledCapabilities')) + text_type, + napalm.base.helpers.find_txt(neighbor, "Detail/EnabledCapabilities"), + ) if interface_name not in lldp_neighbors.keys(): lldp_neighbors[interface_name] = [] - lldp_neighbors[interface_name].append({ - 'parent_interface': parent_interface, - 'remote_chassis_id': chassis_id, - 'remote_port': port_id, - 'remote_port_description': port_descr, - 'remote_system_name': system_name, - 'remote_system_description': system_descr, - 'remote_system_capab': system_capabilities, - 'remote_system_enable_capab': enabled_capabilities - }) + lldp_neighbors[interface_name].append( + { + "parent_interface": parent_interface, + "remote_chassis_id": chassis_id, + "remote_port": port_id, + "remote_port_description": port_descr, + "remote_system_name": system_name, + "remote_system_description": system_descr, + "remote_system_capab": system_capabilities, + "remote_system_enable_capab": enabled_capabilities, + } + ) return lldp_neighbors @@ -721,19 +853,25 @@ def cli(self, commands): cli_output = {} if type(commands) is not list: - raise TypeError('Please enter a valid list of commands!') + raise TypeError("Please enter a valid list of commands!") for command in commands: try: - cli_output[text_type(command)] = text_type(self.device._execute_show(command)) + cli_output[text_type(command)] = text_type( + self.device._execute_show(command) + ) except TimeoutError: - cli_output[text_type(command)] = 'Execution of command \ - "{command}" took too long! Please adjust your params!'.format(command=command) + cli_output[ + text_type(command) + ] = 'Execution of command \ + "{command}" took too long! Please adjust your params!'.format( + command=command + ) raise CommandTimeoutException(str(cli_output)) return cli_output - def get_bgp_config(self, group='', neighbor=''): + def get_bgp_config(self, group="", neighbor=""): bgp_config = {} @@ -742,21 +880,21 @@ def build_prefix_limit(af_table, limit, prefix_percent, prefix_timeout): prefix_limit = {} inet = False inet6 = False - preifx_type = 'inet' - if 'ipv4' in af_table.lower(): + preifx_type = "inet" + if "ipv4" in af_table.lower(): inet = True - if 'ipv6' in af_table.lower(): + if "ipv6" in af_table.lower(): inet6 = True - preifx_type = 'inet6' + preifx_type = "inet6" if inet or inet6: prefix_limit = { preifx_type: { af_table[4:].lower(): { - 'limit': limit, - 'teardown': { - 'threshold': prefix_percent, - 'timeout': prefix_timeout - } + "limit": limit, + "teardown": { + "threshold": prefix_percent, + "timeout": prefix_timeout, + }, } } } @@ -764,350 +902,503 @@ def build_prefix_limit(af_table, limit, prefix_percent, prefix_timeout): # here begins actual method... - rpc_command = '\ - default' + rpc_command = "\ + default" result_tree = ETREE.fromstring(self.device.make_rpc_call(rpc_command)) if not group: - neighbor = '' + neighbor = "" bgp_group_neighbors = {} - for bgp_neighbor in result_tree.xpath('.//Neighbor'): - group_name = napalm.base.helpers.find_txt(bgp_neighbor, 'NeighborGroupAddMember') + for bgp_neighbor in result_tree.xpath(".//Neighbor"): + group_name = napalm.base.helpers.find_txt( + bgp_neighbor, "NeighborGroupAddMember" + ) peer = napalm.base.helpers.ip( - napalm.base.helpers.find_txt(bgp_neighbor, 'Naming/NeighborAddress/IPV4Address') or - napalm.base.helpers.find_txt(bgp_neighbor, 'Naming/NeighborAddress/IPV6Address') + napalm.base.helpers.find_txt( + bgp_neighbor, "Naming/NeighborAddress/IPV4Address" + ) + or napalm.base.helpers.find_txt( + bgp_neighbor, "Naming/NeighborAddress/IPV6Address" + ) ) if neighbor and peer != neighbor: continue - description = napalm.base.helpers.find_txt(bgp_neighbor, 'Description') + description = napalm.base.helpers.find_txt(bgp_neighbor, "Description") peer_as = napalm.base.helpers.convert( - int, napalm.base.helpers.find_txt(bgp_neighbor, 'RemoteAS/AS_YY'), 0) + int, napalm.base.helpers.find_txt(bgp_neighbor, "RemoteAS/AS_YY"), 0 + ) local_as = napalm.base.helpers.convert( - int, napalm.base.helpers.find_txt(bgp_neighbor, 'LocalAS/AS_YY'), 0) + int, napalm.base.helpers.find_txt(bgp_neighbor, "LocalAS/AS_YY"), 0 + ) af_table = napalm.base.helpers.find_txt( - bgp_neighbor, 'NeighborAFTable/NeighborAF/Naming/AFName') - prefix_limit = napalm.base.helpers.convert(int, napalm.base.helpers.find_txt( - bgp_neighbor, 'NeighborAFTable/NeighborAF/MaximumPrefixes/PrefixLimit'), 0) - prefix_percent = napalm.base.helpers.convert(int, napalm.base.helpers.find_txt( - bgp_neighbor, 'NeighborAFTable/NeighborAF/MaximumPrefixes/WarningPercentage'), 0) - prefix_timeout = napalm.base.helpers.convert(int, napalm.base.helpers.find_txt( - bgp_neighbor, 'NeighborAFTable/NeighborAF/MaximumPrefixes/RestartTime'), 0) - import_policy = napalm.base.helpers.find_txt( - bgp_neighbor, 'NeighborAFTable/NeighborAF/RoutePolicyIn') - export_policy = napalm.base.helpers.find_txt( - bgp_neighbor, 'NeighborAFTable/NeighborAF/RoutePolicyOut') - local_addr_raw = ( + bgp_neighbor, "NeighborAFTable/NeighborAF/Naming/AFName" + ) + prefix_limit = napalm.base.helpers.convert( + int, napalm.base.helpers.find_txt( bgp_neighbor, - 'LocalAddress/LocalIPAddress/IPV4Address' - ) or napalm.base.helpers.find_txt( + "NeighborAFTable/NeighborAF/MaximumPrefixes/PrefixLimit", + ), + 0, + ) + prefix_percent = napalm.base.helpers.convert( + int, + napalm.base.helpers.find_txt( bgp_neighbor, - 'LocalAddress/LocalIPAddress/IPV6Address' - ) + "NeighborAFTable/NeighborAF/MaximumPrefixes/WarningPercentage", + ), + 0, + ) + prefix_timeout = napalm.base.helpers.convert( + int, + napalm.base.helpers.find_txt( + bgp_neighbor, + "NeighborAFTable/NeighborAF/MaximumPrefixes/RestartTime", + ), + 0, + ) + import_policy = napalm.base.helpers.find_txt( + bgp_neighbor, "NeighborAFTable/NeighborAF/RoutePolicyIn" + ) + export_policy = napalm.base.helpers.find_txt( + bgp_neighbor, "NeighborAFTable/NeighborAF/RoutePolicyOut" + ) + local_addr_raw = napalm.base.helpers.find_txt( + bgp_neighbor, "LocalAddress/LocalIPAddress/IPV4Address" + ) or napalm.base.helpers.find_txt( + bgp_neighbor, "LocalAddress/LocalIPAddress/IPV6Address" + ) + local_address = napalm.base.helpers.convert( + napalm.base.helpers.ip, local_addr_raw, local_addr_raw + ) + password = napalm.base.helpers.find_txt( + bgp_neighbor, "Password/Password/Password" ) - local_address = napalm.base.helpers.convert(napalm.base.helpers.ip, - local_addr_raw, local_addr_raw) - password = napalm.base.helpers.find_txt(bgp_neighbor, 'Password/Password/Password') nhs = False route_reflector = False if group_name not in bgp_group_neighbors.keys(): bgp_group_neighbors[group_name] = {} bgp_group_neighbors[group_name][peer] = { - 'description': description, - 'remote_as': peer_as, - 'prefix_limit': build_prefix_limit( - af_table, prefix_limit, prefix_percent, prefix_timeout), - 'export_policy': export_policy, - 'import_policy': import_policy, - 'local_address': local_address, - 'local_as': local_as, - 'authentication_key': password, - 'nhs': nhs, - 'route_reflector_client': route_reflector + "description": description, + "remote_as": peer_as, + "prefix_limit": build_prefix_limit( + af_table, prefix_limit, prefix_percent, prefix_timeout + ), + "export_policy": export_policy, + "import_policy": import_policy, + "local_address": local_address, + "local_as": local_as, + "authentication_key": password, + "nhs": nhs, + "route_reflector_client": route_reflector, } if neighbor and peer == neighbor: break - for bgp_group in result_tree.xpath('.//NeighborGroup'): - group_name = napalm.base.helpers.find_txt(bgp_group, 'Naming/NeighborGroupName') + for bgp_group in result_tree.xpath(".//NeighborGroup"): + group_name = napalm.base.helpers.find_txt( + bgp_group, "Naming/NeighborGroupName" + ) if group and group != group_name: continue - bgp_type = 'external' # by default external + bgp_type = "external" # by default external # must check - description = napalm.base.helpers.find_txt(bgp_group, 'Description') + description = napalm.base.helpers.find_txt(bgp_group, "Description") import_policy = napalm.base.helpers.find_txt( - bgp_group, 'NeighborGroupAFTable/NeighborGroupAF/RoutePolicyIn') + bgp_group, "NeighborGroupAFTable/NeighborGroupAF/RoutePolicyIn" + ) export_policy = napalm.base.helpers.find_txt( - bgp_group, 'NeighborGroupAFTable/NeighborGroupAF/RoutePolicyOut') - multipath = napalm.base.helpers.find_txt( - bgp_group, 'NeighborGroupAFTable/NeighborGroupAF/Multipath') == 'true' + bgp_group, "NeighborGroupAFTable/NeighborGroupAF/RoutePolicyOut" + ) + multipath = ( + napalm.base.helpers.find_txt( + bgp_group, "NeighborGroupAFTable/NeighborGroupAF/Multipath" + ) + == "true" + ) peer_as = napalm.base.helpers.convert( - int, napalm.base.helpers.find_txt(bgp_group, 'RemoteAS/AS_YY'), 0) + int, napalm.base.helpers.find_txt(bgp_group, "RemoteAS/AS_YY"), 0 + ) local_as = napalm.base.helpers.convert( - int, napalm.base.helpers.find_txt(bgp_group, 'LocalAS/AS_YY'), 0) + int, napalm.base.helpers.find_txt(bgp_group, "LocalAS/AS_YY"), 0 + ) multihop_ttl = napalm.base.helpers.convert( - int, napalm.base.helpers.find_txt(bgp_group, 'EBGPMultihop/MaxHopCount'), 0) - local_addr_raw = (napalm.base.helpers.find_txt( - bgp_group, 'LocalAddress/LocalIPAddress/IPV4Address') or - napalm.base.helpers.find_txt( - bgp_group, 'LocalAddress/LocalIPAddress/IPV6Address')) + int, + napalm.base.helpers.find_txt(bgp_group, "EBGPMultihop/MaxHopCount"), + 0, + ) + local_addr_raw = napalm.base.helpers.find_txt( + bgp_group, "LocalAddress/LocalIPAddress/IPV4Address" + ) or napalm.base.helpers.find_txt( + bgp_group, "LocalAddress/LocalIPAddress/IPV6Address" + ) local_address = napalm.base.helpers.convert( - napalm.base.helpers.ip, local_addr_raw, local_addr_raw) + napalm.base.helpers.ip, local_addr_raw, local_addr_raw + ) af_table = napalm.base.helpers.find_txt( - bgp_group, 'NeighborAFTable/NeighborAF/Naming/AFName') + bgp_group, "NeighborAFTable/NeighborAF/Naming/AFName" + ) prefix_limit = napalm.base.helpers.convert( - int, napalm.base.helpers.find_txt( + int, + napalm.base.helpers.find_txt( bgp_group, - 'NeighborGroupAFTable/NeighborGroupAF/MaximumPrefixes/PrefixLimit'), 0) + "NeighborGroupAFTable/NeighborGroupAF/MaximumPrefixes/PrefixLimit", + ), + 0, + ) prefix_percent = napalm.base.helpers.convert( int, napalm.base.helpers.find_txt( bgp_group, - 'NeighborGroupAFTable/NeighborGroupAF/MaximumPrefixes/WarningPercentage' + "NeighborGroupAFTable/NeighborGroupAF/MaximumPrefixes/WarningPercentage", ), - 0 + 0, + ) + prefix_timeout = napalm.base.helpers.convert( + int, + napalm.base.helpers.find_txt( + bgp_group, + "NeighborGroupAFTable/NeighborGroupAF/MaximumPrefixes/RestartTime", + ), + 0, ) - prefix_timeout = napalm.base.helpers.convert(int, napalm.base.helpers.find_txt( - bgp_group, 'NeighborGroupAFTable/NeighborGroupAF/MaximumPrefixes/RestartTime'), 0) remove_private = True # is it specified in the XML? bgp_config[group_name] = { - 'apply_groups': [], # on IOS-XR will always be empty list! - 'description': description, - 'local_as': local_as, - 'type': text_type(bgp_type), - 'import_policy': import_policy, - 'export_policy': export_policy, - 'local_address': local_address, - 'multipath': multipath, - 'multihop_ttl': multihop_ttl, - 'remote_as': peer_as, - 'remove_private_as': remove_private, - 'prefix_limit': build_prefix_limit( - af_table, prefix_limit, prefix_percent, prefix_timeout), - 'neighbors': bgp_group_neighbors.get(group_name, {}) + "apply_groups": [], # on IOS-XR will always be empty list! + "description": description, + "local_as": local_as, + "type": text_type(bgp_type), + "import_policy": import_policy, + "export_policy": export_policy, + "local_address": local_address, + "multipath": multipath, + "multihop_ttl": multihop_ttl, + "remote_as": peer_as, + "remove_private_as": remove_private, + "prefix_limit": build_prefix_limit( + af_table, prefix_limit, prefix_percent, prefix_timeout + ), + "neighbors": bgp_group_neighbors.get(group_name, {}), } if group and group == group_name: break - if '' in bgp_group_neighbors.keys(): - bgp_config['_'] = { - 'apply_groups': [], - 'description': '', - 'local_as': 0, - 'type': '', - 'import_policy': '', - 'export_policy': '', - 'local_address': '', - 'multipath': False, - 'multihop_ttl': 0, - 'remote_as': 0, - 'remove_private_as': False, - 'prefix_limit': {}, - 'neighbors': bgp_group_neighbors.get('', {}) + if "" in bgp_group_neighbors.keys(): + bgp_config["_"] = { + "apply_groups": [], + "description": "", + "local_as": 0, + "type": "", + "import_policy": "", + "export_policy": "", + "local_address": "", + "multipath": False, + "multihop_ttl": 0, + "remote_as": 0, + "remove_private_as": False, + "prefix_limit": {}, + "neighbors": bgp_group_neighbors.get("", {}), } return bgp_config - def get_bgp_neighbors_detail(self, neighbor_address=''): + def get_bgp_neighbors_detail(self, neighbor_address=""): bgp_neighbors_detail = {} - active_vrfs = ['default'] + active_vrfs = ["default"] - active_vrfs_rpc_request = '\ + active_vrfs_rpc_request = "\ default\ - ' + " active_vrfs_rpc_reply = ETREE.fromstring( - self.device.make_rpc_call(active_vrfs_rpc_request)) - active_vrfs_tree = active_vrfs_rpc_reply.xpath('.//ConfigVRF') + self.device.make_rpc_call(active_vrfs_rpc_request) + ) + active_vrfs_tree = active_vrfs_rpc_reply.xpath(".//ConfigVRF") for active_vrf_tree in active_vrfs_tree: - active_vrfs.append(napalm.base.helpers.find_txt(active_vrf_tree, 'Naming/VRFName')) + active_vrfs.append( + napalm.base.helpers.find_txt(active_vrf_tree, "Naming/VRFName") + ) unique_active_vrfs = sorted(set(active_vrfs)) - bgp_neighbors_vrf_all_rpc = '\ - default' + bgp_neighbors_vrf_all_rpc = "\ + default" for active_vrf in unique_active_vrfs: - vrf_rpc = '{vrf_name}\ - ' + vrf_rpc = "{vrf_name}\ + " bgp_neighbors_vrf_all_rpc += vrf_rpc.format(vrf_name=active_vrf) - bgp_neighbors_vrf_all_rpc += '' + bgp_neighbors_vrf_all_rpc += ( + "" + ) bgp_neighbors_vrf_all_tree = ETREE.fromstring( - self.device.make_rpc_call(bgp_neighbors_vrf_all_rpc)) + self.device.make_rpc_call(bgp_neighbors_vrf_all_rpc) + ) _BGP_STATE_ = { - '0': 'Unknown', - '1': 'Idle', - '2': 'Connect', - '3': 'OpenSent', - '4': 'OpenConfirm', - '5': 'Active', - '6': 'Established' + "0": "Unknown", + "1": "Idle", + "2": "Connect", + "3": "OpenSent", + "4": "OpenConfirm", + "5": "Active", + "6": "Established", } instance_active_list = bgp_neighbors_vrf_all_tree.xpath( - './/InstanceTable/Instance/InstanceActive/VRFTable/VRF') + ".//InstanceTable/Instance/InstanceActive/VRFTable/VRF" + ) for vrf_tree in instance_active_list: - vrf_name = napalm.base.helpers.find_txt(vrf_tree, 'Naming/VRFName') + vrf_name = napalm.base.helpers.find_txt(vrf_tree, "Naming/VRFName") vrf_keepalive = napalm.base.helpers.convert( - int, napalm.base.helpers.find_txt( - instance_active_list, 'GlobalProcessInfo/VRF/KeepAliveTime')) + int, + napalm.base.helpers.find_txt( + instance_active_list, "GlobalProcessInfo/VRF/KeepAliveTime" + ), + ) vrf_holdtime = napalm.base.helpers.convert( - int, napalm.base.helpers.find_txt( - instance_active_list, 'GlobalProcessInfo/VRF/HoldTime')) + int, + napalm.base.helpers.find_txt( + instance_active_list, "GlobalProcessInfo/VRF/HoldTime" + ), + ) if vrf_name not in bgp_neighbors_detail.keys(): bgp_neighbors_detail[vrf_name] = {} - for neighbor in vrf_tree.xpath('NeighborTable/Neighbor'): - up = (napalm.base.helpers.find_txt(neighbor, 'ConnectionState') == 'BGP_ST_ESTAB') + for neighbor in vrf_tree.xpath("NeighborTable/Neighbor"): + up = ( + napalm.base.helpers.find_txt(neighbor, "ConnectionState") + == "BGP_ST_ESTAB" + ) local_as = napalm.base.helpers.convert( - int, napalm.base.helpers.find_txt(neighbor, 'LocalAS'), 0) + int, napalm.base.helpers.find_txt(neighbor, "LocalAS"), 0 + ) remote_as = napalm.base.helpers.convert( - int, napalm.base.helpers.find_txt(neighbor, 'RemoteAS'), 0) + int, napalm.base.helpers.find_txt(neighbor, "RemoteAS"), 0 + ) router_id = napalm.base.helpers.ip( - napalm.base.helpers.find_txt(neighbor, 'RouterID')) + napalm.base.helpers.find_txt(neighbor, "RouterID") + ) remote_address = napalm.base.helpers.ip( - napalm.base.helpers.find_txt(neighbor, 'Naming/NeighborAddress/IPV4Address') or - napalm.base.helpers.find_txt(neighbor, 'Naming/NeighborAddress/IPV6Address') + napalm.base.helpers.find_txt( + neighbor, "Naming/NeighborAddress/IPV4Address" + ) + or napalm.base.helpers.find_txt( + neighbor, "Naming/NeighborAddress/IPV6Address" + ) + ) + local_address_configured = ( + napalm.base.helpers.find_txt(neighbor, "IsLocalAddressConfigured") + == "true" ) - local_address_configured = napalm.base.helpers.find_txt( - neighbor, 'IsLocalAddressConfigured') == 'true' local_address = napalm.base.helpers.ip( - napalm.base.helpers.find_txt(neighbor, 'ConnectionLocalAddress/IPV4Address') or - napalm.base.helpers.find_txt(neighbor, 'ConnectionLocalAddress/IPV6Address') + napalm.base.helpers.find_txt( + neighbor, "ConnectionLocalAddress/IPV4Address" + ) + or napalm.base.helpers.find_txt( + neighbor, "ConnectionLocalAddress/IPV6Address" + ) ) local_port = napalm.base.helpers.convert( - int, napalm.base.helpers.find_txt(neighbor, 'ConnectionLocalPort')) + int, napalm.base.helpers.find_txt(neighbor, "ConnectionLocalPort") + ) remote_address = napalm.base.helpers.ip( - napalm.base.helpers.find_txt(neighbor, 'ConnectionRemoteAddress/IPV4Address') or - napalm.base.helpers.find_txt(neighbor, 'ConnectionRemoteAddress/IPV6Address') + napalm.base.helpers.find_txt( + neighbor, "ConnectionRemoteAddress/IPV4Address" + ) + or napalm.base.helpers.find_txt( + neighbor, "ConnectionRemoteAddress/IPV6Address" + ) ) remote_port = napalm.base.helpers.convert( - int, napalm.base.helpers.find_txt(neighbor, 'ConnectionRemotePort')) - multihop = napalm.base.helpers.find_txt( - neighbor, 'IsExternalNeighborNotDirectlyConnected') == 'true' - remove_private_as = napalm.base.helpers.find_txt( - neighbor, 'AFData/Entry/RemovePrivateASFromUpdates') == 'true' - multipath = napalm.base.helpers.find_txt( - neighbor, 'AFData/Entry/SelectiveMultipathEligible') == 'true' + int, napalm.base.helpers.find_txt(neighbor, "ConnectionRemotePort") + ) + multihop = ( + napalm.base.helpers.find_txt( + neighbor, "IsExternalNeighborNotDirectlyConnected" + ) + == "true" + ) + remove_private_as = ( + napalm.base.helpers.find_txt( + neighbor, "AFData/Entry/RemovePrivateASFromUpdates" + ) + == "true" + ) + multipath = ( + napalm.base.helpers.find_txt( + neighbor, "AFData/Entry/SelectiveMultipathEligible" + ) + == "true" + ) import_policy = napalm.base.helpers.find_txt( - neighbor, 'AFData/Entry/RoutePolicyIn') + neighbor, "AFData/Entry/RoutePolicyIn" + ) export_policy = napalm.base.helpers.find_txt( - neighbor, 'AFData/Entry/RoutePolicyOut') + neighbor, "AFData/Entry/RoutePolicyOut" + ) input_messages = napalm.base.helpers.convert( - int, napalm.base.helpers.find_txt(neighbor, 'MessgesReceived'), 0) + int, napalm.base.helpers.find_txt(neighbor, "MessgesReceived"), 0 + ) output_messages = napalm.base.helpers.convert( - int, napalm.base.helpers.find_txt(neighbor, 'MessagesSent'), 0) + int, napalm.base.helpers.find_txt(neighbor, "MessagesSent"), 0 + ) connection_down_count = napalm.base.helpers.convert( - int, napalm.base.helpers.find_txt(neighbor, 'ConnectionDownCount'), 0) + int, + napalm.base.helpers.find_txt(neighbor, "ConnectionDownCount"), + 0, + ) messages_queued_out = napalm.base.helpers.convert( - int, napalm.base.helpers.find_txt(neighbor, 'MessagesQueuedOut'), 0) - connection_state = napalm.base.helpers.find_txt( - neighbor, 'ConnectionState').replace('BGP_ST_', '').title() - if connection_state == u'Estab': - connection_state = u'Established' + int, napalm.base.helpers.find_txt(neighbor, "MessagesQueuedOut"), 0 + ) + connection_state = ( + napalm.base.helpers.find_txt(neighbor, "ConnectionState") + .replace("BGP_ST_", "") + .title() + ) + if connection_state == "Estab": + connection_state = "Established" previous_connection_state = napalm.base.helpers.convert( - text_type, _BGP_STATE_.get(napalm.base.helpers.find_txt( - neighbor, 'PreviousConnectionState', '0'))) + text_type, + _BGP_STATE_.get( + napalm.base.helpers.find_txt( + neighbor, "PreviousConnectionState", "0" + ) + ), + ) active_prefix_count = napalm.base.helpers.convert( - int, napalm.base.helpers.find_txt( - neighbor, 'AFData/Entry/NumberOfBestpaths'), 0) + int, + napalm.base.helpers.find_txt( + neighbor, "AFData/Entry/NumberOfBestpaths" + ), + 0, + ) accepted_prefix_count = napalm.base.helpers.convert( - int, napalm.base.helpers.find_txt( - neighbor, 'AFData/Entry/PrefixesAccepted'), 0) + int, + napalm.base.helpers.find_txt( + neighbor, "AFData/Entry/PrefixesAccepted" + ), + 0, + ) suppressed_prefix_count = napalm.base.helpers.convert( - int, napalm.base.helpers.find_txt(neighbor, 'AFData/Entry/PrefixesDenied'), 0) + int, + napalm.base.helpers.find_txt( + neighbor, "AFData/Entry/PrefixesDenied" + ), + 0, + ) received_prefix_count = accepted_prefix_count + suppressed_prefix_count advertised_prefix_count = napalm.base.helpers.convert( - int, napalm.base.helpers.find_txt( - neighbor, 'AFData/Entry/PrefixesAdvertised'), 0) - suppress_4byte_as = napalm.base.helpers.find_txt( - neighbor, 'Suppress4ByteAs') == 'true' - local_as_prepend = napalm.base.helpers.find_txt( - neighbor, 'LocalASNoPrepend') != 'true' - holdtime = napalm.base.helpers.convert( - int, napalm.base.helpers.find_txt(neighbor, 'HoldTime'), 0) or vrf_holdtime + int, + napalm.base.helpers.find_txt( + neighbor, "AFData/Entry/PrefixesAdvertised" + ), + 0, + ) + suppress_4byte_as = ( + napalm.base.helpers.find_txt(neighbor, "Suppress4ByteAs") == "true" + ) + local_as_prepend = ( + napalm.base.helpers.find_txt(neighbor, "LocalASNoPrepend") != "true" + ) + holdtime = ( + napalm.base.helpers.convert( + int, napalm.base.helpers.find_txt(neighbor, "HoldTime"), 0 + ) + or vrf_holdtime + ) configured_holdtime = napalm.base.helpers.convert( - int, napalm.base.helpers.find_txt(neighbor, 'ConfiguredHoldTime'), 0) - keepalive = napalm.base.helpers.convert( - int, napalm.base.helpers.find_txt(neighbor, 'KeepAliveTime'), 0) \ + int, napalm.base.helpers.find_txt(neighbor, "ConfiguredHoldTime"), 0 + ) + keepalive = ( + napalm.base.helpers.convert( + int, napalm.base.helpers.find_txt(neighbor, "KeepAliveTime"), 0 + ) or vrf_keepalive + ) configured_keepalive = napalm.base.helpers.convert( - int, napalm.base.helpers.find_txt(neighbor, 'ConfiguredKeepalive'), 0) + int, + napalm.base.helpers.find_txt(neighbor, "ConfiguredKeepalive"), + 0, + ) flap_count = int(connection_down_count / 2) if up: flap_count -= 1 if remote_as not in bgp_neighbors_detail[vrf_name].keys(): bgp_neighbors_detail[vrf_name][remote_as] = [] - bgp_neighbors_detail[vrf_name][remote_as].append({ - 'up': up, - 'local_as': local_as, - 'remote_as': remote_as, - 'router_id': router_id, - 'local_address': local_address, - 'routing_table': vrf_name, - 'local_address_configured': local_address_configured, - 'local_port': local_port, - 'remote_address': remote_address, - 'remote_port': remote_port, - 'multihop': multihop, - 'multipath': multipath, - 'import_policy': import_policy, - 'export_policy': export_policy, - 'input_messages': input_messages, - 'output_messages': output_messages, - 'input_updates': 0, - 'output_updates': 0, - 'messages_queued_out': messages_queued_out, - 'connection_state': connection_state, - 'previous_connection_state': previous_connection_state, - 'last_event': u'', - 'remove_private_as': remove_private_as, - 'suppress_4byte_as': suppress_4byte_as, - 'local_as_prepend': local_as_prepend, - 'holdtime': holdtime, - 'configured_holdtime': configured_holdtime, - 'keepalive': keepalive, - 'configured_keepalive': configured_keepalive, - 'active_prefix_count': active_prefix_count, - 'received_prefix_count': received_prefix_count, - 'accepted_prefix_count': accepted_prefix_count, - 'suppressed_prefix_count': suppressed_prefix_count, - 'advertised_prefix_count': advertised_prefix_count, - 'flap_count': flap_count - }) - bgp_neighbors_detail['global'] = bgp_neighbors_detail.pop('default') + bgp_neighbors_detail[vrf_name][remote_as].append( + { + "up": up, + "local_as": local_as, + "remote_as": remote_as, + "router_id": router_id, + "local_address": local_address, + "routing_table": vrf_name, + "local_address_configured": local_address_configured, + "local_port": local_port, + "remote_address": remote_address, + "remote_port": remote_port, + "multihop": multihop, + "multipath": multipath, + "import_policy": import_policy, + "export_policy": export_policy, + "input_messages": input_messages, + "output_messages": output_messages, + "input_updates": 0, + "output_updates": 0, + "messages_queued_out": messages_queued_out, + "connection_state": connection_state, + "previous_connection_state": previous_connection_state, + "last_event": "", + "remove_private_as": remove_private_as, + "suppress_4byte_as": suppress_4byte_as, + "local_as_prepend": local_as_prepend, + "holdtime": holdtime, + "configured_holdtime": configured_holdtime, + "keepalive": keepalive, + "configured_keepalive": configured_keepalive, + "active_prefix_count": active_prefix_count, + "received_prefix_count": received_prefix_count, + "accepted_prefix_count": accepted_prefix_count, + "suppressed_prefix_count": suppressed_prefix_count, + "advertised_prefix_count": advertised_prefix_count, + "flap_count": flap_count, + } + ) + bgp_neighbors_detail["global"] = bgp_neighbors_detail.pop("default") return bgp_neighbors_detail def get_arp_table(self): arp_table = [] - rpc_command = '' + rpc_command = "" result_tree = ETREE.fromstring(self.device.make_rpc_call(rpc_command)) - for arp_entry in result_tree.xpath('.//EntryTable/Entry'): + for arp_entry in result_tree.xpath(".//EntryTable/Entry"): interface = napalm.base.helpers.convert( - text_type, napalm.base.helpers.find_txt(arp_entry, './/InterfaceName')) + text_type, napalm.base.helpers.find_txt(arp_entry, ".//InterfaceName") + ) ip = napalm.base.helpers.convert( - text_type, napalm.base.helpers.find_txt(arp_entry, './/Address')) + text_type, napalm.base.helpers.find_txt(arp_entry, ".//Address") + ) age = napalm.base.helpers.convert( - float, napalm.base.helpers.find_txt( - arp_entry, './/Age'), 0.0) - mac_raw = napalm.base.helpers.find_txt(arp_entry, './/HardwareAddress') + float, napalm.base.helpers.find_txt(arp_entry, ".//Age"), 0.0 + ) + mac_raw = napalm.base.helpers.find_txt(arp_entry, ".//HardwareAddress") arp_table.append( { - 'interface': interface, - 'mac': napalm.base.helpers.mac(mac_raw), - 'ip': napalm.base.helpers.ip(ip), - 'age': age + "interface": interface, + "mac": napalm.base.helpers.mac(mac_raw), + "ip": napalm.base.helpers.ip(ip), + "age": age, } ) @@ -1117,19 +1408,21 @@ def get_ntp_peers(self): ntp_peers = {} - rpc_command = '' + rpc_command = "" result_tree = ETREE.fromstring(self.device.make_rpc_call(rpc_command)) - for version in ['IPV4', 'IPV6']: - xpath = './/Peer{version}Table/Peer{version}'.format(version=version) + for version in ["IPV4", "IPV6"]: + xpath = ".//Peer{version}Table/Peer{version}".format(version=version) for peer in result_tree.xpath(xpath): peer_type = napalm.base.helpers.find_txt( - peer, 'PeerType{version}/Naming/PeerType'.format(version=version)) - if peer_type != 'Peer': + peer, "PeerType{version}/Naming/PeerType".format(version=version) + ) + if peer_type != "Peer": continue peer_address = napalm.base.helpers.find_txt( - peer, 'Naming/Address{version}'.format(version=version)) + peer, "Naming/Address{version}".format(version=version) + ) if not peer_address: continue ntp_peers[peer_address] = {} @@ -1140,19 +1433,21 @@ def get_ntp_servers(self): ntp_servers = {} - rpc_command = '' + rpc_command = "" result_tree = ETREE.fromstring(self.device.make_rpc_call(rpc_command)) - for version in ['IPV4', 'IPV6']: - xpath = './/Peer{version}Table/Peer{version}'.format(version=version) + for version in ["IPV4", "IPV6"]: + xpath = ".//Peer{version}Table/Peer{version}".format(version=version) for peer in result_tree.xpath(xpath): peer_type = napalm.base.helpers.find_txt( - peer, 'PeerType{version}/Naming/PeerType'.format(version=version)) - if peer_type != 'Server': + peer, "PeerType{version}/Naming/PeerType".format(version=version) + ) + if peer_type != "Server": continue server_address = napalm.base.helpers.find_txt( - peer, 'Naming/Address{version}'.format(version=version)) + peer, "Naming/Address{version}".format(version=version) + ) if not server_address: continue ntp_servers[server_address] = {} @@ -1163,42 +1458,52 @@ def get_ntp_stats(self): ntp_stats = [] - rpc_command = '' + rpc_command = ( + "" + ) result_tree = ETREE.fromstring(self.device.make_rpc_call(rpc_command)) - xpath = './/NodeTable/Node/Associations/PeerSummaryInfo/Entry/PeerInfoCommon' + xpath = ".//NodeTable/Node/Associations/PeerSummaryInfo/Entry/PeerInfoCommon" for node in result_tree.xpath(xpath): - synchronized = napalm.base.helpers.find_txt(node, 'IsSysPeer') == 'true' - address = napalm.base.helpers.find_txt(node, 'Address') - if address == 'DLRSC node': + synchronized = napalm.base.helpers.find_txt(node, "IsSysPeer") == "true" + address = napalm.base.helpers.find_txt(node, "Address") + if address == "DLRSC node": continue - referenceid = napalm.base.helpers.find_txt(node, 'ReferenceID') + referenceid = napalm.base.helpers.find_txt(node, "ReferenceID") hostpoll = napalm.base.helpers.convert( - int, napalm.base.helpers.find_txt(node, 'HostPoll', '0')) + int, napalm.base.helpers.find_txt(node, "HostPoll", "0") + ) reachability = napalm.base.helpers.convert( - int, napalm.base.helpers.find_txt(node, 'Reachability', '0')) + int, napalm.base.helpers.find_txt(node, "Reachability", "0") + ) stratum = napalm.base.helpers.convert( - int, napalm.base.helpers.find_txt(node, 'Stratum', '0')) + int, napalm.base.helpers.find_txt(node, "Stratum", "0") + ) delay = napalm.base.helpers.convert( - float, napalm.base.helpers.find_txt(node, 'Delay', '0.0')) + float, napalm.base.helpers.find_txt(node, "Delay", "0.0") + ) offset = napalm.base.helpers.convert( - float, napalm.base.helpers.find_txt(node, 'Offset', '0.0')) + float, napalm.base.helpers.find_txt(node, "Offset", "0.0") + ) jitter = napalm.base.helpers.convert( - float, napalm.base.helpers.find_txt(node, 'Dispersion', '0.0')) - ntp_stats.append({ - 'remote': address, - 'synchronized': synchronized, - 'referenceid': referenceid, - 'stratum': stratum, - 'type': u'', - 'when': u'', - 'hostpoll': hostpoll, - 'reachability': reachability, - 'delay': delay, - 'offset': offset, - 'jitter': jitter - }) + float, napalm.base.helpers.find_txt(node, "Dispersion", "0.0") + ) + ntp_stats.append( + { + "remote": address, + "synchronized": synchronized, + "referenceid": referenceid, + "stratum": stratum, + "type": "", + "when": "", + "hostpoll": hostpoll, + "reachability": reachability, + "delay": delay, + "offset": offset, + "jitter": jitter, + } + ) return ntp_stats @@ -1206,60 +1511,81 @@ def get_interfaces_ip(self): interfaces_ip = {} - rpc_command_ipv4_ipv6 = '\ - ' + rpc_command_ipv4_ipv6 = "\ + " # only one request ipv4_ipv6_tree = ETREE.fromstring( - self.device.make_rpc_call(rpc_command_ipv4_ipv6)) + self.device.make_rpc_call(rpc_command_ipv4_ipv6) + ) # parsing IPv4 - ipv4_xpath = './/IPV4Network/InterfaceTable/Interface' + ipv4_xpath = ".//IPV4Network/InterfaceTable/Interface" for interface in ipv4_ipv6_tree.xpath(ipv4_xpath): interface_name = napalm.base.helpers.convert( - text_type, napalm.base.helpers.find_txt(interface, 'Naming/InterfaceName')) - primary_ip = napalm.base.helpers.ip(napalm.base.helpers.find_txt( - interface, 'VRFTable/VRF/Detail/PrimaryAddress')) + text_type, + napalm.base.helpers.find_txt(interface, "Naming/InterfaceName"), + ) + primary_ip = napalm.base.helpers.ip( + napalm.base.helpers.find_txt( + interface, "VRFTable/VRF/Detail/PrimaryAddress" + ) + ) primary_prefix = napalm.base.helpers.convert( - int, napalm.base.helpers.find_txt(interface, 'VRFTable/VRF/Detail/PrefixLength')) + int, + napalm.base.helpers.find_txt( + interface, "VRFTable/VRF/Detail/PrefixLength" + ), + ) if interface_name not in interfaces_ip.keys(): interfaces_ip[interface_name] = {} - if u'ipv4' not in interfaces_ip[interface_name].keys(): - interfaces_ip[interface_name][u'ipv4'] = {} - if primary_ip not in interfaces_ip[interface_name].get(u'ipv4', {}).keys(): - interfaces_ip[interface_name][u'ipv4'][primary_ip] = { - u'prefix_length': primary_prefix + if "ipv4" not in interfaces_ip[interface_name].keys(): + interfaces_ip[interface_name]["ipv4"] = {} + if primary_ip not in interfaces_ip[interface_name].get("ipv4", {}).keys(): + interfaces_ip[interface_name]["ipv4"][primary_ip] = { + "prefix_length": primary_prefix } - for secondary_address in interface.xpath('VRFTable/VRF/Detail/SecondaryAddress/Entry'): + for secondary_address in interface.xpath( + "VRFTable/VRF/Detail/SecondaryAddress/Entry" + ): secondary_ip = napalm.base.helpers.ip( - napalm.base.helpers.find_txt(secondary_address, 'Address')) + napalm.base.helpers.find_txt(secondary_address, "Address") + ) secondary_prefix = napalm.base.helpers.convert( - int, napalm.base.helpers.find_txt(secondary_address, 'PrefixLength')) + int, napalm.base.helpers.find_txt(secondary_address, "PrefixLength") + ) if secondary_ip not in interfaces_ip[interface_name]: - interfaces_ip[interface_name][u'ipv4'][secondary_ip] = { - u'prefix_length': secondary_prefix + interfaces_ip[interface_name]["ipv4"][secondary_ip] = { + "prefix_length": secondary_prefix } # parsing IPv6 ipv6_xpath = ( - './/IPV6Network/NodeTable/Node/InterfaceData' - '/VRFTable/VRF/GlobalDetailTable/GlobalDetail' + ".//IPV6Network/NodeTable/Node/InterfaceData" + "/VRFTable/VRF/GlobalDetailTable/GlobalDetail" ) for interface in ipv4_ipv6_tree.xpath(ipv6_xpath): interface_name = napalm.base.helpers.convert( - text_type, napalm.base.helpers.find_txt(interface, 'Naming/InterfaceName')) + text_type, + napalm.base.helpers.find_txt(interface, "Naming/InterfaceName"), + ) if interface_name not in interfaces_ip.keys(): interfaces_ip[interface_name] = {} - if u'ipv6' not in interfaces_ip[interface_name].keys(): - interfaces_ip[interface_name][u'ipv6'] = {} - for address in interface.xpath('AddressList/Entry'): + if "ipv6" not in interfaces_ip[interface_name].keys(): + interfaces_ip[interface_name]["ipv6"] = {} + for address in interface.xpath("AddressList/Entry"): address_ip = napalm.base.helpers.ip( - napalm.base.helpers.find_txt(address, 'Address')) + napalm.base.helpers.find_txt(address, "Address") + ) address_prefix = napalm.base.helpers.convert( - int, napalm.base.helpers.find_txt(address, 'PrefixLength')) - if address_ip not in interfaces_ip[interface_name].get(u'ipv6', {}).keys(): - interfaces_ip[interface_name][u'ipv6'][address_ip] = { - u'prefix_length': address_prefix + int, napalm.base.helpers.find_txt(address, "PrefixLength") + ) + if ( + address_ip + not in interfaces_ip[interface_name].get("ipv6", {}).keys() + ): + interfaces_ip[interface_name]["ipv6"][address_ip] = { + "prefix_length": address_prefix } return interfaces_ip @@ -1268,202 +1594,220 @@ def get_mac_address_table(self): mac_table = [] - rpc_command = '' + rpc_command = ( + "" + ) result_tree = ETREE.fromstring(self.device.make_rpc_call(rpc_command)) - for mac_entry in result_tree.xpath('.//L2FIBMACDetailTable/L2FIBMACDetail'): - mac_raw = napalm.base.helpers.find_txt(mac_entry, 'Naming/Address') + for mac_entry in result_tree.xpath(".//L2FIBMACDetailTable/L2FIBMACDetail"): + mac_raw = napalm.base.helpers.find_txt(mac_entry, "Naming/Address") vlan = napalm.base.helpers.convert( - int, napalm.base.helpers.find_txt( - mac_entry, 'Naming/Name', '').replace('vlan', ''), 0) + int, + napalm.base.helpers.find_txt(mac_entry, "Naming/Name", "").replace( + "vlan", "" + ), + 0, + ) interface = napalm.base.helpers.find_txt( - mac_entry, 'Segment/AC/InterfaceHandle', u'') - - mac_table.append({ - 'mac': napalm.base.helpers.mac(mac_raw), - 'interface': interface, - 'vlan': vlan, - 'active': True, - 'static': False, - 'moves': 0, - 'last_move': 0.0 - }) + mac_entry, "Segment/AC/InterfaceHandle", "" + ) + + mac_table.append( + { + "mac": napalm.base.helpers.mac(mac_raw), + "interface": interface, + "vlan": vlan, + "active": True, + "static": False, + "moves": 0, + "last_move": 0.0, + } + ) return mac_table - def get_route_to(self, destination='', protocol=''): + def get_route_to(self, destination="", protocol=""): routes = {} if not isinstance(destination, py23_compat.string_types): - raise TypeError('Please specify a valid destination!') + raise TypeError("Please specify a valid destination!") protocol = protocol.lower() - if protocol == 'direct': - protocol = 'connected' + if protocol == "direct": + protocol = "connected" - dest_split = destination.split('/') + dest_split = destination.split("/") network = dest_split[0] - prefix_tag = '' + prefix_tag = "" if len(dest_split) == 2: - prefix_tag = '{prefix_length}'.format( - prefix_length=dest_split[1]) + prefix_tag = "{prefix_length}".format( + prefix_length=dest_split[1] + ) ipv = 4 try: ipv = IPAddress(network).version except AddrFormatError: - raise TypeError('Wrong destination IP Address!') + raise TypeError("Wrong destination IP Address!") if ipv == 6: route_info_rpc_command = ( - '' - 'defaultIPv6' - '' - 'Unicast' - '' - 'default' - '
' - '{network}
{prefix}
' - '
' - '
' + "" + "defaultIPv6" + "" + "Unicast" + "" + "default" + "
" + "{network}
{prefix}
" + "
" + "
" ).format(network=network, prefix=prefix_tag) else: route_info_rpc_command = ( - '' - 'default' - 'IPv4' - '' - 'Unicast' - '' - 'default' - '
' - '{network}
{prefix}
' - '
' - '
' + "" + "default" + "IPv4" + "" + "Unicast" + "" + "default" + "
" + "{network}
{prefix}
" + "
" + "
" ).format(network=network, prefix=prefix_tag) - routes_tree = ETREE.fromstring(self.device.make_rpc_call(route_info_rpc_command)) + routes_tree = ETREE.fromstring( + self.device.make_rpc_call(route_info_rpc_command) + ) - for route in routes_tree.xpath('.//Route'): + for route in routes_tree.xpath(".//Route"): route_protocol = napalm.base.helpers.convert( - text_type, napalm.base.helpers.find_txt(route, 'ProtocolName').lower()) + text_type, napalm.base.helpers.find_txt(route, "ProtocolName").lower() + ) if protocol and route_protocol != protocol: continue # ignore routes learned via a different protocol # only in case the user requested a certain protocol route_details = {} - address = napalm.base.helpers.find_txt(route, 'Prefix') - length = napalm.base.helpers.find_txt(route, 'PrefixLength') + address = napalm.base.helpers.find_txt(route, "Prefix") + length = napalm.base.helpers.find_txt(route, "PrefixLength") priority = napalm.base.helpers.convert( - int, napalm.base.helpers.find_txt(route, 'Priority')) + int, napalm.base.helpers.find_txt(route, "Priority") + ) age = napalm.base.helpers.convert( - int, napalm.base.helpers.find_txt(route, 'RouteAge')) + int, napalm.base.helpers.find_txt(route, "RouteAge") + ) destination = napalm.base.helpers.convert( - text_type, - '{prefix}/{length}'.format( - prefix=address, - length=length - ) + text_type, "{prefix}/{length}".format(prefix=address, length=length) ) if destination not in routes.keys(): routes[destination] = [] route_details = { - 'current_active': False, - 'last_active': False, - 'age': age, - 'next_hop': u'', - 'protocol': route_protocol, - 'outgoing_interface': u'', - 'preference': priority, - 'selected_next_hop': False, - 'inactive_reason': u'', - 'routing_table': u'default', - 'protocol_attributes': {} + "current_active": False, + "last_active": False, + "age": age, + "next_hop": "", + "protocol": route_protocol, + "outgoing_interface": "", + "preference": priority, + "selected_next_hop": False, + "inactive_reason": "", + "routing_table": "default", + "protocol_attributes": {}, } # from BGP will try to get some more information - if route_protocol == 'bgp' and C.SR_638170159_SOLVED: + if route_protocol == "bgp" and C.SR_638170159_SOLVED: # looks like IOS-XR does not filter correctly # !IMPORTANT - bgp_route_info_rpc_command = '\ + bgp_route_info_rpc_command = "\ IPv4Unicast\ {network}{prefix_len}\ \ - '.format( - network=network, - prefix_len=dest_split[-1] + ".format( + network=network, prefix_len=dest_split[-1] ) bgp_route_tree = ETREE.fromstring( - self.device.make_rpc_call(bgp_route_info_rpc_command)) - for bgp_path in bgp_route_tree.xpath('.//Path'): + self.device.make_rpc_call(bgp_route_info_rpc_command) + ) + for bgp_path in bgp_route_tree.xpath(".//Path"): single_route_details = route_details.copy() - if 'NotFound' not in bgp_path.keys(): - best_path = napalm.base.helpers.find_txt( - bgp_path, 'PathInformation/IsBestPath') == 'true' + if "NotFound" not in bgp_path.keys(): + best_path = ( + napalm.base.helpers.find_txt( + bgp_path, "PathInformation/IsBestPath" + ) + == "true" + ) local_preference = napalm.base.helpers.convert( int, napalm.base.helpers.find_txt( bgp_path, - 'AttributesAfterPolicyIn/CommonAttributes/LocalPreference' + "AttributesAfterPolicyIn/CommonAttributes/LocalPreference", ), - 0 + 0, ) local_preference = napalm.base.helpers.convert( int, napalm.base.helpers.find_txt( bgp_path, - 'AttributesAfterPolicyIn/CommonAttributes/LocalPreference', + "AttributesAfterPolicyIn/CommonAttributes/LocalPreference", ), - 0 + 0, ) remote_as = napalm.base.helpers.convert( int, napalm.base.helpers.find_txt( bgp_path, - 'AttributesAfterPolicyIn/CommonAttributes/NeighborAS', + "AttributesAfterPolicyIn/CommonAttributes/NeighborAS", ), - 0 + 0, ) remote_address = napalm.base.helpers.ip( napalm.base.helpers.find_txt( - bgp_path, 'PathInformation/NeighborAddress/IPV4Address') or - napalm.base.helpers.find_txt( - bgp_path, 'PathInformation/NeighborAddress/IPV6Address') + bgp_path, "PathInformation/NeighborAddress/IPV4Address" + ) + or napalm.base.helpers.find_txt( + bgp_path, "PathInformation/NeighborAddress/IPV6Address" + ) ) - as_path = ' '.join( + as_path = " ".join( [ bgp_as.text for bgp_as in bgp_path.xpath( - 'AttributesAfterPolicyIn/CommonAttributes/NeighborAS/Entry') + "AttributesAfterPolicyIn/CommonAttributes/NeighborAS/Entry" + ) ] ) next_hop = napalm.base.helpers.find_txt( - bgp_path, 'PathInformation/NextHop/IPV4Address') \ - or napalm.base.helpers.find_txt( - bgp_path, 'PathInformation/NextHop/IPV6Address') - single_route_details['current_active'] = best_path - single_route_details['next_hop'] = next_hop - single_route_details['protocol_attributes'] = { - 'local_preference': local_preference, - 'as_path': as_path, - 'remote_as': remote_as, - 'remote_address': remote_address + bgp_path, "PathInformation/NextHop/IPV4Address" + ) or napalm.base.helpers.find_txt( + bgp_path, "PathInformation/NextHop/IPV6Address" + ) + single_route_details["current_active"] = best_path + single_route_details["next_hop"] = next_hop + single_route_details["protocol_attributes"] = { + "local_preference": local_preference, + "as_path": as_path, + "remote_as": remote_as, + "remote_address": remote_address, } routes[destination].append(single_route_details) else: first_route = True - for route_entry in route.xpath('RoutePath/Entry'): + for route_entry in route.xpath("RoutePath/Entry"): # get all possible entries - next_hop = napalm.base.helpers.find_txt(route_entry, 'Address') + next_hop = napalm.base.helpers.find_txt(route_entry, "Address") single_route_details = {} single_route_details.update(route_details) - single_route_details.update({ - 'current_active': first_route, - 'next_hop': next_hop - }) + single_route_details.update( + {"current_active": first_route, "next_hop": next_hop} + ) routes[destination].append(single_route_details) first_route = False @@ -1473,29 +1817,28 @@ def get_snmp_information(self): snmp_information = {} - snmp_rpc_command = '' + snmp_rpc_command = "" snmp_result_tree = ETREE.fromstring(self.device.make_rpc_call(snmp_rpc_command)) - _PRIVILEGE_MODE_MAP_ = { - 'ReadOnly': u'ro', - 'ReadWrite': u'rw' - } + _PRIVILEGE_MODE_MAP_ = {"ReadOnly": "ro", "ReadWrite": "rw"} snmp_information = { - 'chassis_id': napalm.base.helpers.find_txt(snmp_result_tree, './/ChassisID'), - 'contact': napalm.base.helpers.find_txt(snmp_result_tree, './/Contact'), - 'location': napalm.base.helpers.find_txt(snmp_result_tree, './/Location'), - 'community': {} + "chassis_id": napalm.base.helpers.find_txt( + snmp_result_tree, ".//ChassisID" + ), + "contact": napalm.base.helpers.find_txt(snmp_result_tree, ".//Contact"), + "location": napalm.base.helpers.find_txt(snmp_result_tree, ".//Location"), + "community": {}, } - for community in snmp_result_tree.xpath('.//DefaultCommunity'): - name = napalm.base.helpers.find_txt(community, 'Naming/CommunityName') - privilege = napalm.base.helpers.find_txt(community, 'Priviledge') - acl = napalm.base.helpers.find_txt(community, 'AccessList') - snmp_information['community'][name] = { - 'mode': _PRIVILEGE_MODE_MAP_.get(privilege, u''), - 'acl': acl + for community in snmp_result_tree.xpath(".//DefaultCommunity"): + name = napalm.base.helpers.find_txt(community, "Naming/CommunityName") + privilege = napalm.base.helpers.find_txt(community, "Priviledge") + acl = napalm.base.helpers.find_txt(community, "AccessList") + snmp_information["community"][name] = { + "mode": _PRIVILEGE_MODE_MAP_.get(privilege, ""), + "acl": acl, } return snmp_information @@ -1505,40 +1848,45 @@ def get_probes_config(self): sla_config = {} _PROBE_TYPE_XML_TAG_MAP_ = { - 'ICMPEcho': u'icmp-ping', - 'UDPEcho': u'udp-ping', - 'ICMPJitter': u'icmp-ping-timestamp', - 'UDPJitter': u'udp-ping-timestamp' + "ICMPEcho": "icmp-ping", + "UDPEcho": "udp-ping", + "ICMPJitter": "icmp-ping-timestamp", + "UDPJitter": "udp-ping-timestamp", } - sla_config_rpc_command = '' + sla_config_rpc_command = ( + "" + ) sla_config_result_tree = ETREE.fromstring( - self.device.make_rpc_call(sla_config_rpc_command)) + self.device.make_rpc_call(sla_config_rpc_command) + ) - for probe in sla_config_result_tree.xpath('.//Definition'): - probe_name = napalm.base.helpers.find_txt(probe, 'Naming/OperationID') - operation_type = probe.xpath('OperationType')[0].getchildren()[0].tag - probe_type = _PROBE_TYPE_XML_TAG_MAP_.get(operation_type, u'') - operation_xpath = 'OperationType/{op_type}'.format(op_type=operation_type) + for probe in sla_config_result_tree.xpath(".//Definition"): + probe_name = napalm.base.helpers.find_txt(probe, "Naming/OperationID") + operation_type = probe.xpath("OperationType")[0].getchildren()[0].tag + probe_type = _PROBE_TYPE_XML_TAG_MAP_.get(operation_type, "") + operation_xpath = "OperationType/{op_type}".format(op_type=operation_type) operation = probe.xpath(operation_xpath)[0] - test_name = napalm.base.helpers.find_txt(operation, 'Tag') - source = napalm.base.helpers.find_txt(operation, 'SourceAddress') - target = napalm.base.helpers.find_txt(operation, 'DestAddress') + test_name = napalm.base.helpers.find_txt(operation, "Tag") + source = napalm.base.helpers.find_txt(operation, "SourceAddress") + target = napalm.base.helpers.find_txt(operation, "DestAddress") test_interval = napalm.base.helpers.convert( - int, napalm.base.helpers.find_txt(operation, 'Frequency', '0')) + int, napalm.base.helpers.find_txt(operation, "Frequency", "0") + ) probe_count = napalm.base.helpers.convert( - int, napalm.base.helpers.find_txt(operation, 'History/Buckets', '0')) + int, napalm.base.helpers.find_txt(operation, "History/Buckets", "0") + ) if probe_name not in sla_config.keys(): sla_config[probe_name] = {} if test_name not in sla_config[probe_name]: sla_config[probe_name][test_name] = {} sla_config[probe_name][test_name] = { - 'probe_type': probe_type, - 'source': source, - 'target': target, - 'probe_count': probe_count, - 'test_interval': test_interval + "probe_type": probe_type, + "source": source, + "target": target, + "probe_count": probe_count, + "test_interval": test_interval, } return sla_config @@ -1548,128 +1896,186 @@ def get_probes_results(self): sla_results = {} _PROBE_TYPE_XML_TAG_MAP_ = { - 'ICMPEcho': u'icmp-ping', - 'UDPEcho': u'udp-ping', - 'ICMPJitter': u'icmp-ping-timestamp', - 'UDPJitter': u'udp-ping-timestamp' + "ICMPEcho": "icmp-ping", + "UDPEcho": "udp-ping", + "ICMPJitter": "icmp-ping-timestamp", + "UDPJitter": "udp-ping-timestamp", } - sla_results_rpc_command = '' + sla_results_rpc_command = ( + "" + ) sla_results_tree = ETREE.fromstring( - self.device.make_rpc_call(sla_results_rpc_command)) + self.device.make_rpc_call(sla_results_rpc_command) + ) - probes_config = self.get_probes_config() # need to retrieve also the configuration + probes_config = ( + self.get_probes_config() + ) # need to retrieve also the configuration # source and tag/test_name not provided - for probe in sla_results_tree.xpath('.//Operation'): - probe_name = napalm.base.helpers.find_txt(probe, 'Naming/OperationID') + for probe in sla_results_tree.xpath(".//Operation"): + probe_name = napalm.base.helpers.find_txt(probe, "Naming/OperationID") test_name = list(probes_config.get(probe_name).keys())[0] target = napalm.base.helpers.find_txt( - probe, 'History/Target/LifeTable/Life/BucketTable/Bucket[0]/TargetAddress\ - /IPv4AddressTarget') - source = probes_config.get(probe_name).get(test_name, {}).get('source', '') - probe_type = _PROBE_TYPE_XML_TAG_MAP_.get(napalm.base.helpers.find_txt( - probe, 'Statistics/Latest/Target/SpecificStats/op_type')) - probe_count = probes_config.get(probe_name).get(test_name, {}).get('probe_count', 0) + probe, + "History/Target/LifeTable/Life/BucketTable/Bucket[0]/TargetAddress\ + /IPv4AddressTarget", + ) + source = probes_config.get(probe_name).get(test_name, {}).get("source", "") + probe_type = _PROBE_TYPE_XML_TAG_MAP_.get( + napalm.base.helpers.find_txt( + probe, "Statistics/Latest/Target/SpecificStats/op_type" + ) + ) + probe_count = ( + probes_config.get(probe_name).get(test_name, {}).get("probe_count", 0) + ) response_times = probe.xpath( - 'History/Target/LifeTable/Life[last()]/BucketTable/Bucket/ResponseTime') + "History/Target/LifeTable/Life[last()]/BucketTable/Bucket/ResponseTime" + ) response_times = [ napalm.base.helpers.convert( - int, napalm.base.helpers.find_txt(response_time, '.', '0')) + int, napalm.base.helpers.find_txt(response_time, ".", "0") + ) for response_time in response_times ] rtt = 0.0 if len(response_times): - rtt = sum(response_times, 0.0)/len(response_times) + rtt = sum(response_times, 0.0) / len(response_times) return_codes = probe.xpath( - 'History/Target/LifeTable/Life[last()]/BucketTable/Bucket/ReturnCode') + "History/Target/LifeTable/Life[last()]/BucketTable/Bucket/ReturnCode" + ) return_codes = [ - napalm.base.helpers.find_txt(return_code, '.') + napalm.base.helpers.find_txt(return_code, ".") for return_code in return_codes ] last_test_loss = 0.0 if len(return_codes): last_test_loss = napalm.base.helpers.convert( - int, 100*(1-return_codes.count('ipslaRetCodeOK')/napalm.base.helpers.convert( - float, len(return_codes)))) + int, + 100 + * ( + 1 + - return_codes.count("ipslaRetCodeOK") + / napalm.base.helpers.convert(float, len(return_codes)) + ), + ) rms = napalm.base.helpers.convert( - float, napalm.base.helpers.find_txt(probe, 'Statistics/Aggregated/HourTable/Hour\ + float, + napalm.base.helpers.find_txt( + probe, + "Statistics/Aggregated/HourTable/Hour\ /Distributed/Target/DistributionIntervalTable/DistributionInterval/CommonStats\ - /Sum2ResponseTime')) + /Sum2ResponseTime", + ), + ) global_test_updates = napalm.base.helpers.convert( - float, napalm.base.helpers.find_txt(probe, 'Statistics/Aggregated/HourTable/Hour\ + float, + napalm.base.helpers.find_txt( + probe, + "Statistics/Aggregated/HourTable/Hour\ /Distributed/Target/DistributionIntervalTable/DistributionInterval/CommonStats\ - /UpdateCount')) + /UpdateCount", + ), + ) jitter = 0.0 if global_test_updates: - jitter = rtt-(rms/global_test_updates)**0.5 + jitter = rtt - (rms / global_test_updates) ** 0.5 # jitter = max(rtt - max(response_times), rtt - min(response_times)) current_test_min_delay = 0.0 # no stats for undergoing test :( current_test_max_delay = 0.0 current_test_avg_delay = 0.0 last_test_min_delay = napalm.base.helpers.convert( - float, napalm.base.helpers.find_txt( - probe, 'Statistics/Latest/Target/CommonStats/MinResponseTime')) + float, + napalm.base.helpers.find_txt( + probe, "Statistics/Latest/Target/CommonStats/MinResponseTime" + ), + ) last_test_max_delay = napalm.base.helpers.convert( - float, napalm.base.helpers.find_txt( - probe, 'Statistics/Latest/Target/CommonStats/MaxResponseTime')) + float, + napalm.base.helpers.find_txt( + probe, "Statistics/Latest/Target/CommonStats/MaxResponseTime" + ), + ) last_test_sum_delay = napalm.base.helpers.convert( - float, napalm.base.helpers.find_txt( - probe, 'Statistics/Latest/Target/CommonStats/SumResponseTime')) + float, + napalm.base.helpers.find_txt( + probe, "Statistics/Latest/Target/CommonStats/SumResponseTime" + ), + ) last_test_updates = napalm.base.helpers.convert( - float, napalm.base.helpers.find_txt( - probe, 'Statistics/Latest/Target/CommonStats/UpdateCount')) + float, + napalm.base.helpers.find_txt( + probe, "Statistics/Latest/Target/CommonStats/UpdateCount" + ), + ) last_test_avg_delay = 0.0 if last_test_updates: - last_test_avg_delay = last_test_sum_delay/last_test_updates + last_test_avg_delay = last_test_sum_delay / last_test_updates global_test_min_delay = napalm.base.helpers.convert( - float, napalm.base.helpers.find_txt(probe, - 'Statistics/Aggregated/HourTable/Hour/Distributed/Target\ - /DistributionIntervalTable/DistributionInterval/CommonStats/MinResponseTime')) + float, + napalm.base.helpers.find_txt( + probe, + "Statistics/Aggregated/HourTable/Hour/Distributed/Target\ + /DistributionIntervalTable/DistributionInterval/CommonStats/MinResponseTime", + ), + ) global_test_max_delay = napalm.base.helpers.convert( - float, napalm.base.helpers.find_txt(probe, - 'Statistics/Aggregated/HourTable/Hour/Distributed/Target\ - /DistributionIntervalTable/DistributionInterval/CommonStats/MaxResponseTime')) + float, + napalm.base.helpers.find_txt( + probe, + "Statistics/Aggregated/HourTable/Hour/Distributed/Target\ + /DistributionIntervalTable/DistributionInterval/CommonStats/MaxResponseTime", + ), + ) global_test_sum_delay = napalm.base.helpers.convert( - float, napalm.base.helpers.find_txt(probe, 'Statistics/Aggregated/HourTable/Hour\ + float, + napalm.base.helpers.find_txt( + probe, + "Statistics/Aggregated/HourTable/Hour\ /Distributed/Target/DistributionIntervalTable/DistributionInterval\ - /CommonStats/SumResponseTime')) + /CommonStats/SumResponseTime", + ), + ) global_test_avg_delay = 0.0 if global_test_updates: - global_test_avg_delay = global_test_sum_delay/global_test_updates + global_test_avg_delay = global_test_sum_delay / global_test_updates if probe_name not in sla_results.keys(): sla_results[probe_name] = {} sla_results[probe_name][test_name] = { - 'target': target, - 'source': source, - 'probe_type': probe_type, - 'probe_count': probe_count, - 'rtt': rtt, - 'round_trip_jitter': jitter, - 'last_test_loss': last_test_loss, - 'current_test_min_delay': current_test_min_delay, - 'current_test_max_delay': current_test_max_delay, - 'current_test_avg_delay': current_test_avg_delay, - 'last_test_min_delay': last_test_min_delay, - 'last_test_max_delay': last_test_max_delay, - 'last_test_avg_delay': last_test_avg_delay, - 'global_test_min_delay': global_test_min_delay, - 'global_test_max_delay': global_test_max_delay, - 'global_test_avg_delay': global_test_avg_delay + "target": target, + "source": source, + "probe_type": probe_type, + "probe_count": probe_count, + "rtt": rtt, + "round_trip_jitter": jitter, + "last_test_loss": last_test_loss, + "current_test_min_delay": current_test_min_delay, + "current_test_max_delay": current_test_max_delay, + "current_test_avg_delay": current_test_avg_delay, + "last_test_min_delay": last_test_min_delay, + "last_test_max_delay": last_test_max_delay, + "last_test_avg_delay": last_test_avg_delay, + "global_test_min_delay": global_test_min_delay, + "global_test_max_delay": global_test_max_delay, + "global_test_avg_delay": global_test_avg_delay, } return sla_results - def traceroute(self, - destination, - source=C.TRACEROUTE_SOURCE, - ttl=C.TRACEROUTE_TTL, - timeout=C.TRACEROUTE_TIMEOUT, - vrf=C.TRACEROUTE_VRF): + def traceroute( + self, + destination, + source=C.TRACEROUTE_SOURCE, + ttl=C.TRACEROUTE_TTL, + timeout=C.TRACEROUTE_TIMEOUT, + vrf=C.TRACEROUTE_VRF, + ): traceroute_result = {} @@ -1677,93 +2083,99 @@ def traceroute(self, try: ipv = IPAddress(destination).version except AddrFormatError: - return {'error': 'Wrong destination IP Address!'} + return {"error": "Wrong destination IP Address!"} - source_tag = '' - ttl_tag = '' - timeout_tag = '' - vrf_tag = '' + source_tag = "" + ttl_tag = "" + timeout_tag = "" + vrf_tag = "" if source: - source_tag = '{source}'.format(source=source) + source_tag = "{source}".format(source=source) if ttl: - ttl_tag = '{maxttl}'.format(maxttl=ttl) + ttl_tag = "{maxttl}".format(maxttl=ttl) if timeout: - timeout_tag = '{timeout}'.format(timeout=timeout) + timeout_tag = "{timeout}".format(timeout=timeout) if vrf: - vrf_tag = '{vrf}'.format(vrf=vrf) + vrf_tag = "{vrf}".format(vrf=vrf) - traceroute_rpc_command = '{destination}\ + traceroute_rpc_command = "{destination}\ {vrf_tag}{source_tag}{ttl_tag}{timeout_tag}\ - '.format( + ".format( version=ipv, destination=destination, vrf_tag=vrf_tag, source_tag=source_tag, ttl_tag=ttl_tag, - timeout_tag=timeout_tag + timeout_tag=timeout_tag, ) xml_tree_txt = self.device.make_rpc_call(traceroute_rpc_command) traceroute_tree = ETREE.fromstring(xml_tree_txt) - results_tree = traceroute_tree.xpath('.//Results') + results_tree = traceroute_tree.xpath(".//Results") if results_tree is None or not len(results_tree): - return {'error': 'Device returned empty results.'} + return {"error": "Device returned empty results."} - results_error = napalm.base.helpers.find_txt(results_tree[0], 'Error') + results_error = napalm.base.helpers.find_txt(results_tree[0], "Error") if results_error: - return {'error': results_error} + return {"error": results_error} - traceroute_result['success'] = {} + traceroute_result["success"] = {} last_hop_index = 1 last_probe_index = 1 - last_probe_ip_address = '*' - last_probe_host_name = '' - last_hop_dict = {'probes': {}} + last_probe_ip_address = "*" + last_probe_host_name = "" + last_hop_dict = {"probes": {}} for thanks_cisco in results_tree[0].getchildren(): tag_name = thanks_cisco.tag tag_value = thanks_cisco.text - if tag_name == 'HopIndex': + if tag_name == "HopIndex": new_hop_index = napalm.base.helpers.convert( - int, napalm.base.helpers.find_txt(thanks_cisco, '.', '-1')) + int, napalm.base.helpers.find_txt(thanks_cisco, ".", "-1") + ) if last_hop_index and last_hop_index != new_hop_index: - traceroute_result['success'][last_hop_index] = copy.deepcopy(last_hop_dict) - last_hop_dict = {'probes': {}} - last_probe_ip_address = '*' - last_probe_host_name = '' + traceroute_result["success"][last_hop_index] = copy.deepcopy( + last_hop_dict + ) + last_hop_dict = {"probes": {}} + last_probe_ip_address = "*" + last_probe_host_name = "" last_hop_index = new_hop_index continue - tag_value = napalm.base.helpers.find_txt(thanks_cisco, '.', '') - if tag_name == 'ProbeIndex': + tag_value = napalm.base.helpers.find_txt(thanks_cisco, ".", "") + if tag_name == "ProbeIndex": last_probe_index = napalm.base.helpers.convert(int, tag_value, 0) + 1 - if last_probe_index not in last_hop_dict.get('probes').keys(): - last_hop_dict['probes'][last_probe_index] = {} + if last_probe_index not in last_hop_dict.get("probes").keys(): + last_hop_dict["probes"][last_probe_index] = {} if not last_probe_host_name: last_probe_host_name = last_probe_ip_address - last_hop_dict['probes'][last_probe_index] = { - 'ip_address': napalm.base.helpers.convert( - text_type, last_probe_ip_address), - 'host_name': napalm.base.helpers.convert( - text_type, last_probe_host_name), - 'rtt': timeout * 1000.0 + last_hop_dict["probes"][last_probe_index] = { + "ip_address": napalm.base.helpers.convert( + text_type, last_probe_ip_address + ), + "host_name": napalm.base.helpers.convert( + text_type, last_probe_host_name + ), + "rtt": timeout * 1000.0, } continue - if tag_name == 'HopAddress': + if tag_name == "HopAddress": last_probe_ip_address = tag_value continue - if tag_name == 'HopHostName': + if tag_name == "HopHostName": last_probe_host_name = tag_value continue - if tag_name == 'DeltaTime': - last_hop_dict['probes'][last_probe_index]['rtt'] = napalm.base.helpers.convert( - float, tag_value, 0.0) + if tag_name == "DeltaTime": + last_hop_dict["probes"][last_probe_index][ + "rtt" + ] = napalm.base.helpers.convert(float, tag_value, 0.0) continue if last_hop_index: - traceroute_result['success'][last_hop_index] = last_hop_dict + traceroute_result["success"][last_hop_index] = last_hop_dict return traceroute_result @@ -1772,51 +2184,45 @@ def get_users(self): users = {} _CISCO_GROUP_TO_CISCO_PRIVILEGE_MAP = { - 'root-system': 15, - 'operator': 5, - 'sysadmin': 1, - 'serviceadmin': 1, - 'root-lr': 15 + "root-system": 15, + "operator": 5, + "sysadmin": 1, + "serviceadmin": 1, + "root-lr": 15, } - _DEFAULT_USER_DETAILS = { - 'level': 0, - 'password': '', - 'sshkeys': [] - } + _DEFAULT_USER_DETAILS = {"level": 0, "password": "", "sshkeys": []} - users_xml_req = '' + users_xml_req = "" users_xml_reply = ETREE.fromstring(self.device.make_rpc_call(users_xml_req)) - for user_entry in users_xml_reply.xpath('.//Username'): - username = napalm.base.helpers.find_txt(user_entry, 'Naming/Name') + for user_entry in users_xml_reply.xpath(".//Username"): + username = napalm.base.helpers.find_txt(user_entry, "Naming/Name") group = napalm.base.helpers.find_txt( - user_entry, 'UsergroupsUnderUsername/UsergroupUnderUsername/Naming/Name') + user_entry, "UsergroupsUnderUsername/UsergroupUnderUsername/Naming/Name" + ) level = _CISCO_GROUP_TO_CISCO_PRIVILEGE_MAP.get(group, 0) - password = napalm.base.helpers.find_txt(user_entry, 'Password/Password') + password = napalm.base.helpers.find_txt(user_entry, "Password/Password") user_details = _DEFAULT_USER_DETAILS.copy() - user_details.update({ - 'level': level, - 'password': py23_compat.text_type(password) - }) + user_details.update( + {"level": level, "password": py23_compat.text_type(password)} + ) users[username] = user_details return users - def get_config(self, retrieve='all'): + def get_config(self, retrieve="all"): - config = { - 'startup': '', - 'running': '', - 'candidate': '' - } # default values + config = {"startup": "", "running": "", "candidate": ""} # default values - if retrieve.lower() in ['running', 'all']: - config['running'] = py23_compat.text_type( - self.device._execute_config_show('show running-config')) - if retrieve.lower() in ['candidate', 'all']: - config['candidate'] = py23_compat.text_type( - self.device._execute_config_show('show configuration merge')) + if retrieve.lower() in ["running", "all"]: + config["running"] = py23_compat.text_type( + self.device._execute_config_show("show running-config") + ) + if retrieve.lower() in ["candidate", "all"]: + config["candidate"] = py23_compat.text_type( + self.device._execute_config_show("show configuration merge") + ) return config diff --git a/napalm/junos/constants.py b/napalm/junos/constants.py index 448dd7936..ea8d85193 100644 --- a/napalm/junos/constants.py +++ b/napalm/junos/constants.py @@ -7,14 +7,14 @@ # OpenConfig mapping # ref: https://github.com/openconfig/public/blob/master/release/models/network-instance/openconfig-network-instance-types.yang # noqa OC_NETWORK_INSTANCE_TYPE_MAP = { - 'default': 'DEFAULT_INSTANCE', - 'l2vpn': 'L2VPN', - 'vrf': 'L3VRF', - 'evpn': 'BGP_EVPN', - 'vpls': 'BGP_VPLS', - 'forwarding': 'L2P2P' + "default": "DEFAULT_INSTANCE", + "l2vpn": "L2VPN", + "vrf": "L3VRF", + "evpn": "BGP_EVPN", + "vpls": "BGP_VPLS", + "forwarding": "L2P2P", } # OPTICS_NULL_LEVEL_SPC matches infinite light level '- Inf' # reading on some versions of JUNOS # https://github.com/napalm-automation/napalm/issues/491 -OPTICS_NULL_LEVEL_SPC = '- Inf' +OPTICS_NULL_LEVEL_SPC = "- Inf" diff --git a/napalm/junos/junos.py b/napalm/junos/junos.py index eee54be79..134336764 100644 --- a/napalm/junos/junos.py +++ b/napalm/junos/junos.py @@ -82,26 +82,30 @@ def __init__(self, hostname, username, password, timeout=60, optional_args=None) if optional_args is None: optional_args = {} - self.config_lock = optional_args.get('config_lock', False) - self.port = optional_args.get('port', 22) - self.key_file = optional_args.get('key_file', None) - self.keepalive = optional_args.get('keepalive', 30) - self.ssh_config_file = optional_args.get('ssh_config_file', None) - self.ignore_warning = optional_args.get('ignore_warning', False) + self.config_lock = optional_args.get("config_lock", False) + self.port = optional_args.get("port", 22) + self.key_file = optional_args.get("key_file", None) + self.keepalive = optional_args.get("keepalive", 30) + self.ssh_config_file = optional_args.get("ssh_config_file", None) + self.ignore_warning = optional_args.get("ignore_warning", False) if self.key_file: - self.device = Device(hostname, - user=username, - password=password, - ssh_private_key_file=self.key_file, - ssh_config=self.ssh_config_file, - port=self.port) + self.device = Device( + hostname, + user=username, + password=password, + ssh_private_key_file=self.key_file, + ssh_config=self.ssh_config_file, + port=self.port, + ) else: - self.device = Device(hostname, - user=username, - password=password, - port=self.port, - ssh_config=self.ssh_config_file) + self.device = Device( + hostname, + user=username, + password=password, + port=self.port, + ssh_config=self.ssh_config_file, + ) self.platform = "junos" self.profile = [self.platform] @@ -167,7 +171,8 @@ def is_alive(self): # evaluate the state of the underlying SSH connection # and also the NETCONF status from PyEZ return { - 'is_alive': self.device._conn._session.transport.is_active() and self.device.connected + "is_alive": self.device._conn._session.transport.is_active() + and self.device.connected } @staticmethod @@ -179,27 +184,27 @@ def _is_json_format(config): return True def _detect_config_format(self, config): - fmt = 'text' + fmt = "text" set_action_matches = [ - 'set', - 'activate', - 'deactivate', - 'annotate', - 'copy', - 'delete', - 'insert', - 'protect', - 'rename', - 'unprotect', - 'edit', - 'top', + "set", + "activate", + "deactivate", + "annotate", + "copy", + "delete", + "insert", + "protect", + "rename", + "unprotect", + "edit", + "top", ] - if config.strip().startswith('<'): - return 'xml' - elif config.strip().split(' ')[0] in set_action_matches: - return 'set' + if config.strip().startswith("<"): + return "xml" + elif config.strip().split(" ")[0] in set_action_matches: + return "set" elif self._is_json_format(config): - return 'json' + return "json" return fmt def _load_candidate(self, filename, config, overwrite): @@ -221,8 +226,12 @@ def _load_candidate(self, filename, config, overwrite): if fmt == "xml": configuration = etree.XML(configuration) - self.device.cu.load(configuration, format=fmt, overwrite=overwrite, - ignore_warning=self.ignore_warning) + self.device.cu.load( + configuration, + format=fmt, + overwrite=overwrite, + ignore_warning=self.ignore_warning, + ) except ConfigLoadError as e: if self.config_replace: raise ReplaceConfigException(e.errs) @@ -244,13 +253,13 @@ def compare_config(self): diff = self.device.cu.diff() if diff is None: - return '' + return "" else: return diff.strip() def commit_config(self, message=""): """Commit configuration.""" - commit_args = {'comment': message} if message else {} + commit_args = {"comment": message} if message else {} self.device.cu.commit(ignore_warning=self.ignore_warning, **commit_args) if not self.config_lock: self._unlock() @@ -277,14 +286,14 @@ def get_facts(self): interface_list = interfaces.keys() return { - 'vendor': u'Juniper', - 'model': py23_compat.text_type(output['model']), - 'serial_number': py23_compat.text_type(output['serialnumber']), - 'os_version': py23_compat.text_type(output['version']), - 'hostname': py23_compat.text_type(output['hostname']), - 'fqdn': py23_compat.text_type(output['fqdn']), - 'uptime': uptime, - 'interface_list': interface_list + "vendor": "Juniper", + "model": py23_compat.text_type(output["model"]), + "serial_number": py23_compat.text_type(output["serialnumber"]), + "os_version": py23_compat.text_type(output["version"]), + "hostname": py23_compat.text_type(output["hostname"]), + "fqdn": py23_compat.text_type(output["fqdn"]), + "uptime": uptime, + "interface_list": interface_list, } def get_interfaces(self): @@ -300,33 +309,37 @@ def get_interfaces(self): def _convert_to_dict(interfaces): for iface in interfaces.keys(): result[iface] = { - 'is_up': interfaces[iface]['is_up'], + "is_up": interfaces[iface]["is_up"], # For physical interfaces will always be there, so just # return the value interfaces[iface]['is_enabled'] # For logical interfaces if is present interface is disabled, # otherwise interface is enabled - 'is_enabled': (True if interfaces[iface]['is_enabled'] is None - else interfaces[iface]['is_enabled']), - 'description': (interfaces[iface]['description'] or u''), - 'last_flapped': float((interfaces[iface]['last_flapped'] or -1)), - 'mac_address': napalm.base.helpers.convert( + "is_enabled": ( + True + if interfaces[iface]["is_enabled"] is None + else interfaces[iface]["is_enabled"] + ), + "description": (interfaces[iface]["description"] or ""), + "last_flapped": float((interfaces[iface]["last_flapped"] or -1)), + "mac_address": napalm.base.helpers.convert( napalm.base.helpers.mac, - interfaces[iface]['mac_address'], - py23_compat.text_type(interfaces[iface]['mac_address'])), - 'speed': -1 + interfaces[iface]["mac_address"], + py23_compat.text_type(interfaces[iface]["mac_address"]), + ), + "speed": -1, } # result[iface]['last_flapped'] = float(result[iface]['last_flapped']) - match = re.search(r'(\d+)(\w*)', interfaces[iface]['speed'] or u'') + match = re.search(r"(\d+)(\w*)", interfaces[iface]["speed"] or "") if match is None: continue speed_value = napalm.base.helpers.convert(int, match.group(1), -1) if speed_value == -1: continue speed_unit = match.group(2) - if speed_unit.lower() == 'gbps': + if speed_unit.lower() == "gbps": speed_value *= 1000 - result[iface]['speed'] = speed_value + result[iface]["speed"] = speed_value return result @@ -340,7 +353,9 @@ def get_interfaces_counters(self): query.get() interface_counters = {} for interface, counters in query.items(): - interface_counters[interface] = {k: v if v is not None else -1 for k, v in counters} + interface_counters[interface] = { + k: v if v is not None else -1 for k, v in counters + } return interface_counters def get_environment(self): @@ -358,76 +373,87 @@ def get_environment(self): for sensor_object, object_data in environment.items(): structured_object_data = {k: v for k, v in object_data} - if structured_object_data['class']: + if structured_object_data["class"]: # If current object has a 'class' defined, store it for use # on subsequent unlabeled lines. - current_class = structured_object_data['class'] + current_class = structured_object_data["class"] else: # Juniper doesn't label the 2nd+ lines of a given class with a # class name. In that case, we use the most recent class seen. - structured_object_data['class'] = current_class + structured_object_data["class"] = current_class - if structured_object_data['class'] == 'Power': + if structured_object_data["class"] == "Power": # Create a dict for the 'power' key try: - environment_data['power'][sensor_object] = {} + environment_data["power"][sensor_object] = {} except KeyError: - environment_data['power'] = {} - environment_data['power'][sensor_object] = {} + environment_data["power"] = {} + environment_data["power"][sensor_object] = {} - environment_data['power'][sensor_object]['capacity'] = -1.0 - environment_data['power'][sensor_object]['output'] = -1.0 + environment_data["power"][sensor_object]["capacity"] = -1.0 + environment_data["power"][sensor_object]["output"] = -1.0 - if structured_object_data['class'] == 'Fans': + if structured_object_data["class"] == "Fans": # Create a dict for the 'fans' key try: - environment_data['fans'][sensor_object] = {} + environment_data["fans"][sensor_object] = {} except KeyError: - environment_data['fans'] = {} - environment_data['fans'][sensor_object] = {} + environment_data["fans"] = {} + environment_data["fans"][sensor_object] = {} - status = structured_object_data['status'] - env_class = structured_object_data['class'] - if (status == 'OK' and env_class == 'Power'): + status = structured_object_data["status"] + env_class = structured_object_data["class"] + if status == "OK" and env_class == "Power": # If status is Failed, Absent or Testing, set status to False. - environment_data['power'][sensor_object]['status'] = True + environment_data["power"][sensor_object]["status"] = True - elif (status != 'OK' and env_class == 'Power'): - environment_data['power'][sensor_object]['status'] = False + elif status != "OK" and env_class == "Power": + environment_data["power"][sensor_object]["status"] = False - elif (status == 'OK' and env_class == 'Fans'): + elif status == "OK" and env_class == "Fans": # If status is Failed, Absent or Testing, set status to False. - environment_data['fans'][sensor_object]['status'] = True + environment_data["fans"][sensor_object]["status"] = True - elif (status != 'OK' and env_class == 'Fans'): - environment_data['fans'][sensor_object]['status'] = False + elif status != "OK" and env_class == "Fans": + environment_data["fans"][sensor_object]["status"] = False for temperature_object, temperature_data in temperature_thresholds.items(): structured_temperature_data = {k: v for k, v in temperature_data} - if structured_object_data['class'] == 'Temp': + if structured_object_data["class"] == "Temp": # Create a dict for the 'temperature' key try: - environment_data['temperature'][sensor_object] = {} + environment_data["temperature"][sensor_object] = {} except KeyError: - environment_data['temperature'] = {} - environment_data['temperature'][sensor_object] = {} + environment_data["temperature"] = {} + environment_data["temperature"][sensor_object] = {} # Check we have a temperature field in this class (See #66) - if structured_object_data['temperature']: - environment_data['temperature'][sensor_object]['temperature'] = \ - float(structured_object_data['temperature']) + if structured_object_data["temperature"]: + environment_data["temperature"][sensor_object][ + "temperature" + ] = float(structured_object_data["temperature"]) # Set a default value (False) to the key is_critical and is_alert - environment_data['temperature'][sensor_object]['is_alert'] = False - environment_data['temperature'][sensor_object]['is_critical'] = False + environment_data["temperature"][sensor_object]["is_alert"] = False + environment_data["temperature"][sensor_object][ + "is_critical" + ] = False # Check if the working temperature is equal to or higher than alerting threshold - temp = structured_object_data['temperature'] + temp = structured_object_data["temperature"] if temp is not None: - if structured_temperature_data['red-alarm'] <= temp: - environment_data['temperature'][sensor_object]['is_critical'] = True - environment_data['temperature'][sensor_object]['is_alert'] = True - elif structured_temperature_data['yellow-alarm'] <= temp: - environment_data['temperature'][sensor_object]['is_alert'] = True + if structured_temperature_data["red-alarm"] <= temp: + environment_data["temperature"][sensor_object][ + "is_critical" + ] = True + environment_data["temperature"][sensor_object][ + "is_alert" + ] = True + elif structured_temperature_data["yellow-alarm"] <= temp: + environment_data["temperature"][sensor_object][ + "is_alert" + ] = True else: - environment_data['temperature'][sensor_object]['temperature'] = 0.0 + environment_data["temperature"][sensor_object][ + "temperature" + ] = 0.0 # Try to correct Power Supply information pem_table = dict() @@ -438,55 +464,66 @@ def get_environment(self): pass else: # Format PEM information and correct capacity and output values - if 'power' not in environment_data.keys(): + if "power" not in environment_data.keys(): # Power supplies were not included from the environment table above # Need to initialize data - environment_data['power'] = {} + environment_data["power"] = {} for pem in power_supplies.items(): pem_name = pem[0].replace("PEM", "Power Supply") - environment_data['power'][pem_name] = {} - environment_data['power'][pem_name]['output'] = -1.0 - environment_data['power'][pem_name]['capacity'] = -1.0 - environment_data['power'][pem_name]['status'] = False + environment_data["power"][pem_name] = {} + environment_data["power"][pem_name]["output"] = -1.0 + environment_data["power"][pem_name]["capacity"] = -1.0 + environment_data["power"][pem_name]["status"] = False for pem in power_supplies.items(): pem_name = pem[0].replace("PEM", "Power Supply") pem_table[pem_name] = dict(pem[1]) - if pem_table[pem_name]['capacity'] is not None: - environment_data['power'][pem_name]['capacity'] = \ - pem_table[pem_name]['capacity'] - if pem_table[pem_name]['output'] is not None: - environment_data['power'][pem_name]['output'] = pem_table[pem_name]['output'] - environment_data['power'][pem_name]['status'] = pem_table[pem_name]['status'] + if pem_table[pem_name]["capacity"] is not None: + environment_data["power"][pem_name]["capacity"] = pem_table[ + pem_name + ]["capacity"] + if pem_table[pem_name]["output"] is not None: + environment_data["power"][pem_name]["output"] = pem_table[pem_name][ + "output" + ] + environment_data["power"][pem_name]["status"] = pem_table[pem_name][ + "status" + ] for routing_engine_object, routing_engine_data in routing_engine.items(): structured_routing_engine_data = {k: v for k, v in routing_engine_data} # Create dicts for 'cpu' and 'memory'. try: - environment_data['cpu'][routing_engine_object] = {} - environment_data['memory'] = {} + environment_data["cpu"][routing_engine_object] = {} + environment_data["memory"] = {} except KeyError: - environment_data['cpu'] = {} - environment_data['cpu'][routing_engine_object] = {} - environment_data['memory'] = {} + environment_data["cpu"] = {} + environment_data["cpu"][routing_engine_object] = {} + environment_data["memory"] = {} # Calculate the CPU usage by using the CPU idle value. - environment_data['cpu'][routing_engine_object]['%usage'] = \ - 100.0 - structured_routing_engine_data['cpu-idle'] + environment_data["cpu"][routing_engine_object]["%usage"] = ( + 100.0 - structured_routing_engine_data["cpu-idle"] + ) try: - environment_data['memory']['available_ram'] = \ - int(structured_routing_engine_data['memory-dram-size']) + environment_data["memory"]["available_ram"] = int( + structured_routing_engine_data["memory-dram-size"] + ) except ValueError: - environment_data['memory']['available_ram'] = \ - int( - ''.join( - i for i in structured_routing_engine_data['memory-dram-size'] - if i.isdigit() - ) + environment_data["memory"]["available_ram"] = int( + "".join( + i + for i in structured_routing_engine_data["memory-dram-size"] + if i.isdigit() ) + ) # Junos gives us RAM in %, so calculation has to be made. # Sadly, bacause of this, results are not 100% accurate to the truth. - environment_data['memory']['used_ram'] = \ - int(round(environment_data['memory']['available_ram'] / 100.0 * - structured_routing_engine_data['memory-buffer-utilization'])) + environment_data["memory"]["used_ram"] = int( + round( + environment_data["memory"]["available_ram"] + / 100.0 + * structured_routing_engine_data["memory-buffer-utilization"] + ) + ) return environment_data @@ -498,15 +535,11 @@ def _get_address_family(table, instance): :params table: The name of the routing table :returns: address family """ - address_family_mapping = { - 'inet': 'ipv4', - 'inet6': 'ipv6', - 'inetflow': 'flow' - } + address_family_mapping = {"inet": "ipv4", "inet6": "ipv6", "inetflow": "flow"} if instance == "master": - family = table.rsplit('.', 1)[-2] + family = table.rsplit(".", 1)[-2] else: - family = table.split('.')[-2] + family = table.split(".")[-2] try: address_family = address_family_mapping[family] except KeyError: @@ -515,45 +548,45 @@ def _get_address_family(table, instance): def _parse_route_stats(self, neighbor, instance): data = { - 'ipv4': { - 'received_prefixes': -1, - 'accepted_prefixes': -1, - 'sent_prefixes': -1 + "ipv4": { + "received_prefixes": -1, + "accepted_prefixes": -1, + "sent_prefixes": -1, + }, + "ipv6": { + "received_prefixes": -1, + "accepted_prefixes": -1, + "sent_prefixes": -1, }, - 'ipv6': { - 'received_prefixes': -1, - 'accepted_prefixes': -1, - 'sent_prefixes': -1 - } } - if not neighbor['is_up']: + if not neighbor["is_up"]: return data - elif isinstance(neighbor['tables'], list): - if isinstance(neighbor['sent_prefixes'], int): + elif isinstance(neighbor["tables"], list): + if isinstance(neighbor["sent_prefixes"], int): # We expect sent_prefixes to be a list, but sometimes it # is of type int. Therefore convert attribute to list - neighbor['sent_prefixes'] = [neighbor['sent_prefixes']] - for idx, table in enumerate(neighbor['tables']): + neighbor["sent_prefixes"] = [neighbor["sent_prefixes"]] + for idx, table in enumerate(neighbor["tables"]): family = self._get_address_family(table, instance) if family is None: # Need to remove counter from sent_prefixes list anyway - if 'in sync' in neighbor['send-state'][idx]: - neighbor['sent_prefixes'].pop(0) + if "in sync" in neighbor["send-state"][idx]: + neighbor["sent_prefixes"].pop(0) continue data[family] = {} - data[family]['received_prefixes'] = neighbor['received_prefixes'][idx] - data[family]['accepted_prefixes'] = neighbor['accepted_prefixes'][idx] - if 'in sync' in neighbor['send-state'][idx]: - data[family]['sent_prefixes'] = neighbor['sent_prefixes'].pop(0) + data[family]["received_prefixes"] = neighbor["received_prefixes"][idx] + data[family]["accepted_prefixes"] = neighbor["accepted_prefixes"][idx] + if "in sync" in neighbor["send-state"][idx]: + data[family]["sent_prefixes"] = neighbor["sent_prefixes"].pop(0) else: - data[family]['sent_prefixes'] = 0 + data[family]["sent_prefixes"] = 0 else: - family = self._get_address_family(neighbor['tables'], instance) + family = self._get_address_family(neighbor["tables"], instance) if family is not None: data[family] = {} - data[family]['received_prefixes'] = neighbor['received_prefixes'] - data[family]['accepted_prefixes'] = neighbor['accepted_prefixes'] - data[family]['sent_prefixes'] = neighbor['sent_prefixes'] + data[family]["received_prefixes"] = neighbor["received_prefixes"] + data[family]["accepted_prefixes"] = neighbor["accepted_prefixes"] + data[family]["sent_prefixes"] = neighbor["sent_prefixes"] return data @staticmethod @@ -561,7 +594,7 @@ def _parse_value(value): if isinstance(value, py23_compat.string_types): return py23_compat.text_type(value) elif value is None: - return u'' + return "" else: return value @@ -569,14 +602,14 @@ def get_bgp_neighbors(self): """Return BGP neighbors details.""" bgp_neighbor_data = {} default_neighbor_details = { - 'local_as': 0, - 'remote_as': 0, - 'remote_id': '', - 'is_up': False, - 'is_enabled': False, - 'description': '', - 'uptime': 0, - 'address_family': {} + "local_as": 0, + "remote_as": 0, + "remote_id": "", + "is_up": False, + "is_enabled": False, + "description": "", + "uptime": 0, + "address_family": {}, } keys = default_neighbor_details.keys() @@ -587,60 +620,77 @@ def get_bgp_neighbors(self): def _get_uptime_table(instance): if instance not in uptime_table_lookup: - uptime_table_lookup[instance] = uptime_table.get(instance=instance).items() + uptime_table_lookup[instance] = uptime_table.get( + instance=instance + ).items() return uptime_table_lookup[instance] - def _get_bgp_neighbors_core(neighbor_data, instance=None, uptime_table_items=None): - ''' + def _get_bgp_neighbors_core( + neighbor_data, instance=None, uptime_table_items=None + ): + """ Make sure to execute a simple request whenever using junos > 13. This is a helper used to avoid code redundancy and reuse the function also when iterating through the list BGP neighbors under a specific routing instance, also when the device is capable to return the routing instance name at the BGP neighbor level. - ''' + """ for bgp_neighbor in neighbor_data: - peer_ip = napalm.base.helpers.ip(bgp_neighbor[0].split('+')[0]) + peer_ip = napalm.base.helpers.ip(bgp_neighbor[0].split("+")[0]) neighbor_details = deepcopy(default_neighbor_details) neighbor_details.update( - {elem[0]: elem[1] for elem in bgp_neighbor[1] if elem[1] is not None} + { + elem[0]: elem[1] + for elem in bgp_neighbor[1] + if elem[1] is not None + } ) if not instance: # not instance, means newer Junos version, # as we request everything in a single request - peer_fwd_rti = neighbor_details.pop('peer_fwd_rti') + peer_fwd_rti = neighbor_details.pop("peer_fwd_rti") instance = peer_fwd_rti else: # instance is explicitly requests, # thus it's an old Junos, so we retrieve the BGP neighbors # under a certain routing instance - peer_fwd_rti = neighbor_details.pop('peer_fwd_rti', '') - instance_name = 'global' if instance == 'master' else instance + peer_fwd_rti = neighbor_details.pop("peer_fwd_rti", "") + instance_name = "global" if instance == "master" else instance if instance_name not in bgp_neighbor_data: bgp_neighbor_data[instance_name] = {} - if 'router_id' not in bgp_neighbor_data[instance_name]: + if "router_id" not in bgp_neighbor_data[instance_name]: # we only need to set this once - bgp_neighbor_data[instance_name]['router_id'] = \ - py23_compat.text_type(neighbor_details.get('local_id', '')) + bgp_neighbor_data[instance_name][ + "router_id" + ] = py23_compat.text_type(neighbor_details.get("local_id", "")) peer = { key: self._parse_value(value) for key, value in neighbor_details.items() if key in keys } - peer['local_as'] = napalm.base.helpers.as_number(peer['local_as']) - peer['remote_as'] = napalm.base.helpers.as_number(peer['remote_as']) - peer['address_family'] = self._parse_route_stats(neighbor_details, instance) - if 'peers' not in bgp_neighbor_data[instance_name]: - bgp_neighbor_data[instance_name]['peers'] = {} - bgp_neighbor_data[instance_name]['peers'][peer_ip] = peer + peer["local_as"] = napalm.base.helpers.as_number(peer["local_as"]) + peer["remote_as"] = napalm.base.helpers.as_number(peer["remote_as"]) + peer["address_family"] = self._parse_route_stats( + neighbor_details, instance + ) + if "peers" not in bgp_neighbor_data[instance_name]: + bgp_neighbor_data[instance_name]["peers"] = {} + bgp_neighbor_data[instance_name]["peers"][peer_ip] = peer if not uptime_table_items: uptime_table_items = _get_uptime_table(instance) for neighbor, uptime in uptime_table_items: normalized_neighbor = napalm.base.helpers.ip(neighbor) - if normalized_neighbor not in bgp_neighbor_data[instance_name]['peers']: - bgp_neighbor_data[instance_name]['peers'][normalized_neighbor] = {} - bgp_neighbor_data[instance_name]['peers'][normalized_neighbor]['uptime'] = \ - uptime[0][1] + if ( + normalized_neighbor + not in bgp_neighbor_data[instance_name]["peers"] + ): + bgp_neighbor_data[instance_name]["peers"][ + normalized_neighbor + ] = {} + bgp_neighbor_data[instance_name]["peers"][normalized_neighbor][ + "uptime" + ] = uptime[0][1] # Commenting out the following sections, till Junos # will provide a way to identify the routing instance name @@ -657,15 +707,17 @@ def _get_bgp_neighbors_core(neighbor_data, instance=None, uptime_table_items=Non # if old_junos: instances = junos_views.junos_route_instance_table(self.device).get() for instance, instance_data in instances.items(): - if instance.startswith('__'): + if instance.startswith("__"): # junos internal instances continue - bgp_neighbor_data[instance] = {'peers': {}} + bgp_neighbor_data[instance] = {"peers": {}} instance_neighbors = bgp_neighbors_table.get(instance=instance).items() uptime_table_items = uptime_table.get(instance=instance).items() - _get_bgp_neighbors_core(instance_neighbors, - instance=instance, - uptime_table_items=uptime_table_items) + _get_bgp_neighbors_core( + instance_neighbors, + instance=instance, + uptime_table_items=uptime_table_items, + ) # If the OS provides the `peer_fwd_rti` or any way to identify the # rotuing instance name (see above), the performances of this getter # can be significantly improved, as we won't execute one request @@ -677,7 +729,7 @@ def _get_bgp_neighbors_core(neighbor_data, instance=None, uptime_table_items=Non # _get_bgp_neighbors_core(instance_neighbors) bgp_tmp_dict = {} for k, v in bgp_neighbor_data.items(): - if bgp_neighbor_data[k]['peers']: + if bgp_neighbor_data[k]["peers"]: bgp_tmp_dict[k] = v return bgp_tmp_dict @@ -690,7 +742,7 @@ def get_lldp_neighbors(self): # this assumes the library runs in an environment # able to handle logs # otherwise, the user just won't see this happening - log.error('Unable to retrieve the LLDP neighbors information:') + log.error("Unable to retrieve the LLDP neighbors information:") log.error(py23_compat.text_type(rpcerr)) return {} result = lldp.items() @@ -699,11 +751,13 @@ def get_lldp_neighbors(self): for neigh in result: if neigh[0] not in neighbors.keys(): neighbors[neigh[0]] = [] - neighbors[neigh[0]].append({x[0]: py23_compat.text_type(x[1]) for x in neigh[1]}) + neighbors[neigh[0]].append( + {x[0]: py23_compat.text_type(x[1]) for x in neigh[1]} + ) return neighbors - def get_lldp_neighbors_detail(self, interface=''): + def get_lldp_neighbors_detail(self, interface=""): """Detailed view of the LLDP neighbors.""" lldp_neighbors = {} @@ -714,12 +768,12 @@ def get_lldp_neighbors_detail(self, interface=''): # this assumes the library runs in an environment # able to handle logs # otherwise, the user just won't see this happening - log.error('Unable to retrieve the LLDP neighbors information:') + log.error("Unable to retrieve the LLDP neighbors information:") log.error(py23_compat.text_type(rpcerr)) return {} interfaces = lldp_table.get().keys() - if self.device.facts.get('switch_style') == "VLAN": + if self.device.facts.get("switch_style") == "VLAN": lldp_table.GET_RPC = "get-lldp-interface-neighbors-information" interface_variable = "interface_name" alt_rpc = "get-lldp-interface-neighbors" @@ -747,18 +801,24 @@ def get_lldp_neighbors_detail(self, interface=''): for item in lldp_table: if interface not in lldp_neighbors.keys(): lldp_neighbors[interface] = [] - lldp_neighbors[interface].append({ - 'parent_interface': item.parent_interface, - 'remote_port': item.remote_port, - 'remote_chassis_id': napalm.base.helpers.convert( - napalm.base.helpers.mac, item.remote_chassis_id, item.remote_chassis_id), - 'remote_port_description': napalm.base.helpers.convert( - py23_compat.text_type, item.remote_port_description), - 'remote_system_name': item.remote_system_name, - 'remote_system_description': item.remote_system_description, - 'remote_system_capab': item.remote_system_capab, - 'remote_system_enable_capab': item.remote_system_enable_capab - }) + lldp_neighbors[interface].append( + { + "parent_interface": item.parent_interface, + "remote_port": item.remote_port, + "remote_chassis_id": napalm.base.helpers.convert( + napalm.base.helpers.mac, + item.remote_chassis_id, + item.remote_chassis_id, + ), + "remote_port_description": napalm.base.helpers.convert( + py23_compat.text_type, item.remote_port_description + ), + "remote_system_name": item.remote_system_name, + "remote_system_description": item.remote_system_description, + "remote_system_capab": item.remote_system_capab, + "remote_system_enable_capab": item.remote_system_enable_capab, + } + ) return lldp_neighbors @@ -767,92 +827,86 @@ def cli(self, commands): cli_output = {} def _count(txt, none): # Second arg for consistency only. noqa - ''' + """ Return the exact output, as Junos displays e.g.: > show system processes extensive | match root | count Count: 113 lines - ''' + """ count = len(txt.splitlines()) - return 'Count: {count} lines'.format(count=count) + return "Count: {count} lines".format(count=count) def _trim(txt, length): - ''' + """ Trim specified number of columns from start of line. - ''' + """ try: newlines = [] for line in txt.splitlines(): - newlines.append(line[int(length):]) - return '\n'.join(newlines) + newlines.append(line[int(length) :]) + return "\n".join(newlines) except ValueError: return txt def _except(txt, pattern): - ''' + """ Show only text that does not match a pattern. - ''' - rgx = '^.*({pattern}).*$'.format(pattern=pattern) + """ + rgx = "^.*({pattern}).*$".format(pattern=pattern) unmatched = [ - line for line in txt.splitlines() - if not re.search(rgx, line, re.I) + line for line in txt.splitlines() if not re.search(rgx, line, re.I) ] - return '\n'.join(unmatched) + return "\n".join(unmatched) def _last(txt, length): - ''' + """ Display end of output only. - ''' + """ try: - return '\n'.join( - txt.splitlines()[(-1) * int(length):] - ) + return "\n".join(txt.splitlines()[(-1) * int(length) :]) except ValueError: return txt def _match(txt, pattern): - ''' + """ Show only text that matches a pattern. - ''' - rgx = '^.*({pattern}).*$'.format(pattern=pattern) - matched = [ - line for line in txt.splitlines() - if re.search(rgx, line, re.I) - ] - return '\n'.join(matched) + """ + rgx = "^.*({pattern}).*$".format(pattern=pattern) + matched = [line for line in txt.splitlines() if re.search(rgx, line, re.I)] + return "\n".join(matched) def _find(txt, pattern): - ''' + """ Search for first occurrence of pattern. - ''' - rgx = '^.*({pattern})(.*)$'.format(pattern=pattern) + """ + rgx = "^.*({pattern})(.*)$".format(pattern=pattern) match = re.search(rgx, txt, re.I | re.M | re.DOTALL) if match: - return '{pattern}{rest}'.format(pattern=pattern, rest=match.group(2)) + return "{pattern}{rest}".format(pattern=pattern, rest=match.group(2)) else: - return '\nPattern not found' + return "\nPattern not found" def _process_pipe(cmd, txt): - ''' + """ Process CLI output from Juniper device that doesn't allow piping the output. - ''' + """ if txt is None: return txt _OF_MAP = OrderedDict() - _OF_MAP['except'] = _except - _OF_MAP['match'] = _match - _OF_MAP['last'] = _last - _OF_MAP['trim'] = _trim - _OF_MAP['count'] = _count - _OF_MAP['find'] = _find + _OF_MAP["except"] = _except + _OF_MAP["match"] = _match + _OF_MAP["last"] = _last + _OF_MAP["trim"] = _trim + _OF_MAP["count"] = _count + _OF_MAP["find"] = _find # the operations order matter in this case! - exploded_cmd = cmd.split('|') + exploded_cmd = cmd.split("|") pipe_oper_args = {} for pipe in exploded_cmd[1:]: exploded_pipe = pipe.split() pipe_oper = exploded_pipe[0] # always there - pipe_args = ''.join(exploded_pipe[1:2]) + pipe_args = "".join(exploded_pipe[1:2]) # will not throw error when there's no arg pipe_oper_args[pipe_oper] = pipe_args for oper in _OF_MAP.keys(): @@ -863,30 +917,38 @@ def _process_pipe(cmd, txt): return txt if not isinstance(commands, list): - raise TypeError('Please enter a valid list of commands!') - _PIPE_BLACKLIST = ['save'] + raise TypeError("Please enter a valid list of commands!") + _PIPE_BLACKLIST = ["save"] # Preprocessing to avoid forbidden commands for command in commands: - exploded_cmd = command.split('|') + exploded_cmd = command.split("|") command_safe_parts = [] for pipe in exploded_cmd[1:]: exploded_pipe = pipe.split() pipe_oper = exploded_pipe[0] # always there if pipe_oper in _PIPE_BLACKLIST: continue - pipe_args = ''.join(exploded_pipe[1:2]) - safe_pipe = pipe_oper if not pipe_args else '{fun} {args}'.format(fun=pipe_oper, - args=pipe_args) + pipe_args = "".join(exploded_pipe[1:2]) + safe_pipe = ( + pipe_oper + if not pipe_args + else "{fun} {args}".format(fun=pipe_oper, args=pipe_args) + ) command_safe_parts.append(safe_pipe) - safe_command = exploded_cmd[0] if not command_safe_parts else \ - '{base} | {pipes}'.format(base=exploded_cmd[0], - pipes=' | '.join(command_safe_parts)) + safe_command = ( + exploded_cmd[0] + if not command_safe_parts + else "{base} | {pipes}".format( + base=exploded_cmd[0], pipes=" | ".join(command_safe_parts) + ) + ) raw_txt = self.device.cli(safe_command, warning=False) cli_output[py23_compat.text_type(command)] = py23_compat.text_type( - _process_pipe(command, raw_txt)) + _process_pipe(command, raw_txt) + ) return cli_output - def get_bgp_config(self, group='', neighbor=''): + def get_bgp_config(self, group="", neighbor=""): """Return BGP configuration.""" def _check_nhs(policies, nhs_policies): @@ -939,11 +1001,9 @@ def build_prefix_limit(**args): prefix_limit = {} for key, value in args.items(): - key_levels = key.split('_') + key_levels = key.split("_") length = len(key_levels) - 1 - temp_dict = { - key_levels[length]: value - } + temp_dict = {key_levels[length]: value} for index in reversed(range(length)): level = key_levels[index] temp_dict = {level: temp_dict} @@ -952,56 +1012,47 @@ def build_prefix_limit(**args): return prefix_limit _COMMON_FIELDS_DATATYPE_ = { - 'description': py23_compat.text_type, - 'local_address': py23_compat.text_type, - 'local_as': int, - 'remote_as': int, - 'import_policy': py23_compat.text_type, - 'export_policy': py23_compat.text_type, - 'inet_unicast_limit_prefix_limit': int, - 'inet_unicast_teardown_threshold_prefix_limit': int, - 'inet_unicast_teardown_timeout_prefix_limit': int, - 'inet_unicast_novalidate_prefix_limit': int, - 'inet_flow_limit_prefix_limit': int, - 'inet_flow_teardown_threshold_prefix_limit': int, - 'inet_flow_teardown_timeout_prefix_limit': int, - 'inet_flow_novalidate_prefix_limit': py23_compat.text_type, - 'inet6_unicast_limit_prefix_limit': int, - 'inet6_unicast_teardown_threshold_prefix_limit': int, - 'inet6_unicast_teardown_timeout_prefix_limit': int, - 'inet6_unicast_novalidate_prefix_limit': int, - 'inet6_flow_limit_prefix_limit': int, - 'inet6_flow_teardown_threshold_prefix_limit': int, - 'inet6_flow_teardown_timeout_prefix_limit': int, - 'inet6_flow_novalidate_prefix_limit': py23_compat.text_type, + "description": py23_compat.text_type, + "local_address": py23_compat.text_type, + "local_as": int, + "remote_as": int, + "import_policy": py23_compat.text_type, + "export_policy": py23_compat.text_type, + "inet_unicast_limit_prefix_limit": int, + "inet_unicast_teardown_threshold_prefix_limit": int, + "inet_unicast_teardown_timeout_prefix_limit": int, + "inet_unicast_novalidate_prefix_limit": int, + "inet_flow_limit_prefix_limit": int, + "inet_flow_teardown_threshold_prefix_limit": int, + "inet_flow_teardown_timeout_prefix_limit": int, + "inet_flow_novalidate_prefix_limit": py23_compat.text_type, + "inet6_unicast_limit_prefix_limit": int, + "inet6_unicast_teardown_threshold_prefix_limit": int, + "inet6_unicast_teardown_timeout_prefix_limit": int, + "inet6_unicast_novalidate_prefix_limit": int, + "inet6_flow_limit_prefix_limit": int, + "inet6_flow_teardown_threshold_prefix_limit": int, + "inet6_flow_teardown_timeout_prefix_limit": int, + "inet6_flow_novalidate_prefix_limit": py23_compat.text_type, } _PEER_FIELDS_DATATYPE_MAP_ = { - 'authentication_key': py23_compat.text_type, - 'route_reflector_client': bool, - 'nhs': bool + "authentication_key": py23_compat.text_type, + "route_reflector_client": bool, + "nhs": bool, } - _PEER_FIELDS_DATATYPE_MAP_.update( - _COMMON_FIELDS_DATATYPE_ - ) + _PEER_FIELDS_DATATYPE_MAP_.update(_COMMON_FIELDS_DATATYPE_) _GROUP_FIELDS_DATATYPE_MAP_ = { - 'type': py23_compat.text_type, - 'apply_groups': list, - 'remove_private_as': bool, - 'multipath': bool, - 'multihop_ttl': int + "type": py23_compat.text_type, + "apply_groups": list, + "remove_private_as": bool, + "multipath": bool, + "multihop_ttl": int, } - _GROUP_FIELDS_DATATYPE_MAP_.update( - _COMMON_FIELDS_DATATYPE_ - ) + _GROUP_FIELDS_DATATYPE_MAP_.update(_COMMON_FIELDS_DATATYPE_) - _DATATYPE_DEFAULT_ = { - py23_compat.text_type: '', - int: 0, - bool: False, - list: [] - } + _DATATYPE_DEFAULT_ = {py23_compat.text_type: "", int: 0, bool: False, list: []} bgp_config = {} @@ -1011,7 +1062,7 @@ def build_prefix_limit(**args): else: bgp = junos_views.junos_bgp_config_table(self.device) bgp.get() - neighbor = '' # if no group is set, no neighbor should be set either + neighbor = "" # if no group is set, no neighbor should be set either bgp_items = bgp.items() if neighbor: @@ -1036,45 +1087,51 @@ def build_prefix_limit(**args): bgp_config[bgp_group_name] = { field: _DATATYPE_DEFAULT_.get(datatype) for field, datatype in _GROUP_FIELDS_DATATYPE_MAP_.items() - if '_prefix_limit' not in field + if "_prefix_limit" not in field } for elem in bgp_group_details: - if not ('_prefix_limit' not in elem[0] and elem[1] is not None): + if not ("_prefix_limit" not in elem[0] and elem[1] is not None): continue datatype = _GROUP_FIELDS_DATATYPE_MAP_.get(elem[0]) default = _DATATYPE_DEFAULT_.get(datatype) key = elem[0] value = elem[1] - if key in ['export_policy', 'import_policy']: + if key in ["export_policy", "import_policy"]: if isinstance(value, list): - value = ' '.join(value) - if key == 'local_address': + value = " ".join(value) + if key == "local_address": value = napalm.base.helpers.convert( - napalm.base.helpers.ip, value, value) - if key == 'neighbors': + napalm.base.helpers.ip, value, value + ) + if key == "neighbors": bgp_group_peers = value continue - bgp_config[bgp_group_name].update({ - key: napalm.base.helpers.convert(datatype, value, default) - }) + bgp_config[bgp_group_name].update( + {key: napalm.base.helpers.convert(datatype, value, default)} + ) prefix_limit_fields = {} for elem in bgp_group_details: - if '_prefix_limit' in elem[0] and elem[1] is not None: + if "_prefix_limit" in elem[0] and elem[1] is not None: datatype = _GROUP_FIELDS_DATATYPE_MAP_.get(elem[0]) default = _DATATYPE_DEFAULT_.get(datatype) - prefix_limit_fields.update({ - elem[0].replace('_prefix_limit', ''): - napalm.base.helpers.convert(datatype, elem[1], default) - }) - bgp_config[bgp_group_name]['prefix_limit'] = build_prefix_limit(**prefix_limit_fields) - if 'multihop' in bgp_config[bgp_group_name].keys(): + prefix_limit_fields.update( + { + elem[0].replace( + "_prefix_limit", "" + ): napalm.base.helpers.convert(datatype, elem[1], default) + } + ) + bgp_config[bgp_group_name]["prefix_limit"] = build_prefix_limit( + **prefix_limit_fields + ) + if "multihop" in bgp_config[bgp_group_name].keys(): # Delete 'multihop' key from the output - del bgp_config[bgp_group_name]['multihop'] - if bgp_config[bgp_group_name]['multihop_ttl'] == 0: + del bgp_config[bgp_group_name]["multihop"] + if bgp_config[bgp_group_name]["multihop_ttl"] == 0: # Set ttl to default value 64 - bgp_config[bgp_group_name]['multihop_ttl'] = 64 + bgp_config[bgp_group_name]["multihop_ttl"] = 64 - bgp_config[bgp_group_name]['neighbors'] = {} + bgp_config[bgp_group_name]["neighbors"] = {} for bgp_group_neighbor in bgp_group_peers.items(): bgp_peer_address = napalm.base.helpers.ip(bgp_group_neighbor[0]) if neighbor and bgp_peer_address != neighbor: @@ -1083,103 +1140,115 @@ def build_prefix_limit(**args): bgp_peer_details = { field: _DATATYPE_DEFAULT_.get(datatype) for field, datatype in _PEER_FIELDS_DATATYPE_MAP_.items() - if '_prefix_limit' not in field + if "_prefix_limit" not in field } for elem in bgp_group_details: - if not ('_prefix_limit' not in elem[0] and elem[1] is not None): + if not ("_prefix_limit" not in elem[0] and elem[1] is not None): continue datatype = _PEER_FIELDS_DATATYPE_MAP_.get(elem[0]) default = _DATATYPE_DEFAULT_.get(datatype) key = elem[0] value = elem[1] - if key in ['export_policy']: + if key in ["export_policy"]: # next-hop self is applied on export IBGP sessions - bgp_peer_details['nhs'] = _check_nhs(value, nhs_policies) - if key in ['export_policy', 'import_policy']: + bgp_peer_details["nhs"] = _check_nhs(value, nhs_policies) + if key in ["export_policy", "import_policy"]: if isinstance(value, list): - value = ' '.join(value) - if key == 'local_address': + value = " ".join(value) + if key == "local_address": value = napalm.base.helpers.convert( - napalm.base.helpers.ip, value, value) - bgp_peer_details.update({ - key: napalm.base.helpers.convert(datatype, value, default) - }) - bgp_peer_details['local_as'] = napalm.base.helpers.as_number( - bgp_peer_details['local_as']) - bgp_peer_details['remote_as'] = napalm.base.helpers.as_number( - bgp_peer_details['remote_as']) - if key == 'cluster': - bgp_peer_details['route_reflector_client'] = True + napalm.base.helpers.ip, value, value + ) + bgp_peer_details.update( + {key: napalm.base.helpers.convert(datatype, value, default)} + ) + bgp_peer_details["local_as"] = napalm.base.helpers.as_number( + bgp_peer_details["local_as"] + ) + bgp_peer_details["remote_as"] = napalm.base.helpers.as_number( + bgp_peer_details["remote_as"] + ) + if key == "cluster": + bgp_peer_details["route_reflector_client"] = True # we do not want cluster in the output - del bgp_peer_details['cluster'] + del bgp_peer_details["cluster"] - if 'cluster' in bgp_config[bgp_group_name].keys(): - bgp_peer_details['route_reflector_client'] = True + if "cluster" in bgp_config[bgp_group_name].keys(): + bgp_peer_details["route_reflector_client"] = True prefix_limit_fields = {} for elem in bgp_group_details: - if '_prefix_limit' in elem[0] and elem[1] is not None: + if "_prefix_limit" in elem[0] and elem[1] is not None: datatype = _PEER_FIELDS_DATATYPE_MAP_.get(elem[0]) default = _DATATYPE_DEFAULT_.get(datatype) - prefix_limit_fields.update({ - elem[0].replace('_prefix_limit', ''): - napalm.base.helpers.convert(datatype, elem[1], default) - }) - bgp_peer_details['prefix_limit'] = build_prefix_limit(**prefix_limit_fields) - bgp_config[bgp_group_name]['neighbors'][bgp_peer_address] = bgp_peer_details + prefix_limit_fields.update( + { + elem[0].replace( + "_prefix_limit", "" + ): napalm.base.helpers.convert( + datatype, elem[1], default + ) + } + ) + bgp_peer_details["prefix_limit"] = build_prefix_limit( + **prefix_limit_fields + ) + bgp_config[bgp_group_name]["neighbors"][ + bgp_peer_address + ] = bgp_peer_details if neighbor and bgp_peer_address == neighbor_ip: break # found the desired neighbor - if 'cluster' in bgp_config[bgp_group_name].keys(): + if "cluster" in bgp_config[bgp_group_name].keys(): # we do not want cluster in the output - del bgp_config[bgp_group_name]['cluster'] + del bgp_config[bgp_group_name]["cluster"] return bgp_config - def get_bgp_neighbors_detail(self, neighbor_address=''): + def get_bgp_neighbors_detail(self, neighbor_address=""): """Detailed view of the BGP neighbors operational data.""" bgp_neighbors = {} default_neighbor_details = { - 'up': False, - 'local_as': 0, - 'remote_as': 0, - 'router_id': u'', - 'local_address': u'', - 'routing_table': u'', - 'local_address_configured': False, - 'local_port': 0, - 'remote_address': u'', - 'remote_port': 0, - 'multihop': False, - 'multipath': False, - 'remove_private_as': False, - 'import_policy': u'', - 'export_policy': u'', - 'input_messages': -1, - 'output_messages': -1, - 'input_updates': -1, - 'output_updates': -1, - 'messages_queued_out': -1, - 'connection_state': u'', - 'previous_connection_state': u'', - 'last_event': u'', - 'suppress_4byte_as': False, - 'local_as_prepend': False, - 'holdtime': 0, - 'configured_holdtime': 0, - 'keepalive': 0, - 'configured_keepalive': 0, - 'active_prefix_count': -1, - 'received_prefix_count': -1, - 'accepted_prefix_count': -1, - 'suppressed_prefix_count': -1, - 'advertised_prefix_count': -1, - 'flap_count': 0 + "up": False, + "local_as": 0, + "remote_as": 0, + "router_id": "", + "local_address": "", + "routing_table": "", + "local_address_configured": False, + "local_port": 0, + "remote_address": "", + "remote_port": 0, + "multihop": False, + "multipath": False, + "remove_private_as": False, + "import_policy": "", + "export_policy": "", + "input_messages": -1, + "output_messages": -1, + "input_updates": -1, + "output_updates": -1, + "messages_queued_out": -1, + "connection_state": "", + "previous_connection_state": "", + "last_event": "", + "suppress_4byte_as": False, + "local_as_prepend": False, + "holdtime": 0, + "configured_holdtime": 0, + "keepalive": 0, + "configured_keepalive": 0, + "active_prefix_count": -1, + "received_prefix_count": -1, + "accepted_prefix_count": -1, + "suppressed_prefix_count": -1, + "advertised_prefix_count": -1, + "flap_count": 0, } OPTION_KEY_MAP = { - 'RemovePrivateAS': 'remove_private_as', - 'Multipath': 'multipath', - 'Multihop': 'multihop', - 'AddressFamily': 'local_address_configured' + "RemovePrivateAS": "remove_private_as", + "Multipath": "multipath", + "Multihop": "multihop", + "AddressFamily": "local_address_configured" # 'AuthKey' : 'authentication_key_set' # but other vendors do not specify if auth key is set # other options: @@ -1187,7 +1256,7 @@ def get_bgp_neighbors_detail(self, neighbor_address=''): } def _bgp_iter_core(neighbor_data, instance=None): - ''' + """ Iterate over a list of neighbors. For older junos, the routing instance is not specified inside the BGP neighbors XML, therefore we need to use a super sub-optimal structure @@ -1195,55 +1264,63 @@ def _bgp_iter_core(neighbor_data, instance=None): then execute one request for each and every routing instance. For newer junos, this is not necessary as the routing instance is available and we can get everything solve in a single request. - ''' + """ for bgp_neighbor in neighbor_data: remote_as = int(bgp_neighbor[0]) neighbor_details = deepcopy(default_neighbor_details) neighbor_details.update( - {elem[0]: elem[1] for elem in bgp_neighbor[1] if elem[1] is not None} + { + elem[0]: elem[1] + for elem in bgp_neighbor[1] + if elem[1] is not None + } ) if not instance: - peer_fwd_rti = neighbor_details.pop('peer_fwd_rti') + peer_fwd_rti = neighbor_details.pop("peer_fwd_rti") instance = peer_fwd_rti else: - peer_fwd_rti = neighbor_details.pop('peer_fwd_rti', '') - instance_name = 'global' if instance == 'master' else instance - options = neighbor_details.pop('options', '') + peer_fwd_rti = neighbor_details.pop("peer_fwd_rti", "") + instance_name = "global" if instance == "master" else instance + options = neighbor_details.pop("options", "") if isinstance(options, str): options_list = options.split() for option in options_list: key = OPTION_KEY_MAP.get(option) if key is not None: neighbor_details[key] = True - four_byte_as = neighbor_details.pop('4byte_as', 0) - local_address = neighbor_details.pop('local_address', '') - local_details = local_address.split('+') - neighbor_details['local_address'] = napalm.base.helpers.convert( - napalm.base.helpers.ip, local_details[0], local_details[0]) + four_byte_as = neighbor_details.pop("4byte_as", 0) + local_address = neighbor_details.pop("local_address", "") + local_details = local_address.split("+") + neighbor_details["local_address"] = napalm.base.helpers.convert( + napalm.base.helpers.ip, local_details[0], local_details[0] + ) if len(local_details) == 2: - neighbor_details['local_port'] = int(local_details[1]) + neighbor_details["local_port"] = int(local_details[1]) else: - neighbor_details['local_port'] = 179 - neighbor_details['suppress_4byte_as'] = (remote_as != four_byte_as) - peer_address = neighbor_details.pop('peer_address', '') - remote_details = peer_address.split('+') - neighbor_details['remote_address'] = napalm.base.helpers.convert( - napalm.base.helpers.ip, remote_details[0], remote_details[0]) + neighbor_details["local_port"] = 179 + neighbor_details["suppress_4byte_as"] = remote_as != four_byte_as + peer_address = neighbor_details.pop("peer_address", "") + remote_details = peer_address.split("+") + neighbor_details["remote_address"] = napalm.base.helpers.convert( + napalm.base.helpers.ip, remote_details[0], remote_details[0] + ) if len(remote_details) == 2: - neighbor_details['remote_port'] = int(remote_details[1]) + neighbor_details["remote_port"] = int(remote_details[1]) else: - neighbor_details['remote_port'] = 179 - neighbor_details['routing_table'] = instance_name - neighbor_details['local_as'] = napalm.base.helpers.as_number( - neighbor_details['local_as']) - neighbor_details['remote_as'] = napalm.base.helpers.as_number( - neighbor_details['remote_as']) - neighbors_rib = neighbor_details.pop('rib') - neighbors_queue = neighbor_details.pop('queue') + neighbor_details["remote_port"] = 179 + neighbor_details["routing_table"] = instance_name + neighbor_details["local_as"] = napalm.base.helpers.as_number( + neighbor_details["local_as"] + ) + neighbor_details["remote_as"] = napalm.base.helpers.as_number( + neighbor_details["remote_as"] + ) + neighbors_rib = neighbor_details.pop("rib") + neighbors_queue = neighbor_details.pop("queue") messages_queued_out = 0 for queue_entry in neighbors_queue.items(): messages_queued_out += queue_entry[1][0][1] - neighbor_details['messages_queued_out'] = messages_queued_out + neighbor_details["messages_queued_out"] = messages_queued_out if instance_name not in bgp_neighbors.keys(): bgp_neighbors[instance_name] = {} if remote_as not in bgp_neighbors[instance_name].keys(): @@ -1253,11 +1330,11 @@ def _bgp_iter_core(neighbor_data, instance=None): bgp_neighbors[instance_name][remote_as].append(neighbor_details) continue # no RIBs available, pass default details neighbor_rib_details = { - 'active_prefix_count': 0, - 'received_prefix_count': 0, - 'accepted_prefix_count': 0, - 'suppressed_prefix_count': 0, - 'advertised_prefix_count': 0 + "active_prefix_count": 0, + "received_prefix_count": 0, + "accepted_prefix_count": 0, + "suppressed_prefix_count": 0, + "advertised_prefix_count": 0, } for rib_entry in neighbor_rib_stats: for elem in rib_entry[1]: @@ -1275,11 +1352,12 @@ def _bgp_iter_core(neighbor_data, instance=None): # if old_junos: instances = junos_views.junos_route_instance_table(self.device) for instance, instance_data in instances.get().items(): - if instance.startswith('__'): + if instance.startswith("__"): # junos internal instances continue - neighbor_data = bgp_neighbors_table.get(instance=instance, - neighbor_address=str(neighbor_address)).items() + neighbor_data = bgp_neighbors_table.get( + instance=instance, neighbor_address=str(neighbor_address) + ).items() _bgp_iter_core(neighbor_data, instance=instance) # else: # bgp_neighbors_table = junos_views.junos_bgp_neighbors_table(self.device) @@ -1305,11 +1383,9 @@ def get_arp_table(self): arp_table_items = arp_table_raw.items() for arp_table_entry in arp_table_items: - arp_entry = { - elem[0]: elem[1] for elem in arp_table_entry[1] - } - arp_entry['mac'] = napalm.base.helpers.mac(arp_entry.get('mac')) - arp_entry['ip'] = napalm.base.helpers.ip(arp_entry.get('ip')) + arp_entry = {elem[0]: elem[1] for elem in arp_table_entry[1]} + arp_entry["mac"] = napalm.base.helpers.mac(arp_entry.get("mac")) + arp_entry["ip"] = napalm.base.helpers.ip(arp_entry.get("ip")) arp_table.append(arp_entry) return arp_table @@ -1323,11 +1399,9 @@ def get_ipv6_neighbors_table(self): ipv6_neighbors_table_items = ipv6_neighbors_table_raw.items() for ipv6_table_entry in ipv6_neighbors_table_items: - ipv6_entry = { - elem[0]: elem[1] for elem in ipv6_table_entry[1] - } - ipv6_entry['mac'] = napalm.base.helpers.mac(ipv6_entry.get('mac')) - ipv6_entry['ip'] = napalm.base.helpers.ip(ipv6_entry.get('ip')) + ipv6_entry = {elem[0]: elem[1] for elem in ipv6_table_entry[1]} + ipv6_entry["mac"] = napalm.base.helpers.mac(ipv6_entry.get("mac")) + ipv6_entry["ip"] = napalm.base.helpers.ip(ipv6_entry.get("ip")) ipv6_neighbors_table.append(ipv6_entry) return ipv6_neighbors_table @@ -1365,14 +1439,14 @@ def get_ntp_stats(self): ntp_stats = [] REGEX = ( - r'^\s?(\+|\*|x|-)?([a-zA-Z0-9\.+-:]+)' - r'\s+([a-zA-Z0-9\.]+)\s+([0-9]{1,2})' - r'\s+(-|u)\s+([0-9h-]+)\s+([0-9]+)' - r'\s+([0-9]+)\s+([0-9\.]+)\s+([0-9\.-]+)' - r'\s+([0-9\.]+)\s?$' + r"^\s?(\+|\*|x|-)?([a-zA-Z0-9\.+-:]+)" + r"\s+([a-zA-Z0-9\.]+)\s+([0-9]{1,2})" + r"\s+(-|u)\s+([0-9h-]+)\s+([0-9]+)" + r"\s+([0-9]+)\s+([0-9\.]+)\s+([0-9\.-]+)" + r"\s+([0-9\.]+)\s?$" ) - ntp_assoc_output = self.device.cli('show ntp associations no-resolve') + ntp_assoc_output = self.device.cli("show ntp associations no-resolve") ntp_assoc_output_lines = ntp_assoc_output.splitlines() for ntp_assoc_output_line in ntp_assoc_output_lines[3:]: # except last line @@ -1381,19 +1455,21 @@ def get_ntp_stats(self): continue # pattern not found line_groups = line_search.groups() try: - ntp_stats.append({ - 'remote': napalm.base.helpers.ip(line_groups[1]), - 'synchronized': (line_groups[0] == '*'), - 'referenceid': py23_compat.text_type(line_groups[2]), - 'stratum': int(line_groups[3]), - 'type': py23_compat.text_type(line_groups[4]), - 'when': py23_compat.text_type(line_groups[5]), - 'hostpoll': int(line_groups[6]), - 'reachability': int(line_groups[7]), - 'delay': float(line_groups[8]), - 'offset': float(line_groups[9]), - 'jitter': float(line_groups[10]) - }) + ntp_stats.append( + { + "remote": napalm.base.helpers.ip(line_groups[1]), + "synchronized": (line_groups[0] == "*"), + "referenceid": py23_compat.text_type(line_groups[2]), + "stratum": int(line_groups[3]), + "type": py23_compat.text_type(line_groups[4]), + "when": py23_compat.text_type(line_groups[5]), + "hostpoll": int(line_groups[6]), + "reachability": int(line_groups[7]), + "delay": float(line_groups[8]), + "offset": float(line_groups[9]), + "jitter": float(line_groups[10]), + } + ) except Exception: continue # jump to next line @@ -1408,29 +1484,29 @@ def get_interfaces_ip(self): interface_table_items = interface_table.items() _FAMILY_VMAP_ = { - 'inet': u'ipv4', - 'inet6': u'ipv6' + "inet": "ipv4", + "inet6": "ipv6" # can add more mappings } - _FAMILY_MAX_PREFIXLEN = { - 'inet': 32, - 'inet6': 128 - } + _FAMILY_MAX_PREFIXLEN = {"inet": 32, "inet6": 128} for interface_details in interface_table_items: ip_network = interface_details[0] - ip_address = ip_network.split('/')[0] + ip_address = ip_network.split("/")[0] address = napalm.base.helpers.convert( - napalm.base.helpers.ip, ip_address, ip_address) + napalm.base.helpers.ip, ip_address, ip_address + ) try: interface_details_dict = dict(interface_details[1]) - family_raw = interface_details_dict.get('family') - interface = py23_compat.text_type(interface_details_dict.get('interface')) + family_raw = interface_details_dict.get("family") + interface = py23_compat.text_type( + interface_details_dict.get("interface") + ) except ValueError: continue - prefix = napalm.base.helpers.convert(int, - ip_network.split('/')[-1], - _FAMILY_MAX_PREFIXLEN.get(family_raw)) + prefix = napalm.base.helpers.convert( + int, ip_network.split("/")[-1], _FAMILY_MAX_PREFIXLEN.get(family_raw) + ) family = _FAMILY_VMAP_.get(family_raw) if not family or not interface: continue @@ -1440,7 +1516,7 @@ def get_interfaces_ip(self): interfaces_ip[interface][family] = {} if address not in interfaces_ip[interface][family].keys(): interfaces_ip[interface][family][address] = {} - interfaces_ip[interface][family][address][u'prefix_length'] = prefix + interfaces_ip[interface][family][address]["prefix_length"] = prefix return interfaces_ip @@ -1448,8 +1524,12 @@ def get_mac_address_table(self): """Return the MAC address table.""" mac_address_table = [] - if self.device.facts.get('personality', '') in ['SWITCH']: # for EX & QFX devices - if self.device.facts.get('switch_style', '') in ['VLAN_L2NG']: # for L2NG devices + if self.device.facts.get("personality", "") in [ + "SWITCH" + ]: # for EX & QFX devices + if self.device.facts.get("switch_style", "") in [ + "VLAN_L2NG" + ]: # for L2NG devices mac_table = junos_views.junos_mac_address_table_switch_l2ng(self.device) else: mac_table = junos_views.junos_mac_address_table_switch(self.device) @@ -1460,91 +1540,83 @@ def get_mac_address_table(self): mac_table_items = mac_table.items() default_values = { - 'mac': u'', - 'interface': u'', - 'vlan': 0, - 'static': False, - 'active': True, - 'moves': 0, - 'last_move': 0.0 + "mac": "", + "interface": "", + "vlan": 0, + "static": False, + "active": True, + "moves": 0, + "last_move": 0.0, } for mac_table_entry in mac_table_items: mac_entry = default_values.copy() - mac_entry.update( - {elem[0]: elem[1] for elem in mac_table_entry[1]} - ) - mac = mac_entry.get('mac') + mac_entry.update({elem[0]: elem[1] for elem in mac_table_entry[1]}) + mac = mac_entry.get("mac") # JUNOS returns '*' for Type = Flood - if mac == '*': + if mac == "*": continue - mac_entry['mac'] = napalm.base.helpers.mac(mac) + mac_entry["mac"] = napalm.base.helpers.mac(mac) mac_address_table.append(mac_entry) return mac_address_table - def get_route_to(self, destination='', protocol=''): + def get_route_to(self, destination="", protocol=""): """Return route details to a specific destination, learned from a certain protocol.""" routes = {} if not isinstance(destination, py23_compat.string_types): - raise TypeError('Please specify a valid destination!') + raise TypeError("Please specify a valid destination!") if protocol and isinstance(destination, py23_compat.string_types): protocol = protocol.lower() - if protocol == 'connected': - protocol = 'direct' # this is how is called on JunOS + if protocol == "connected": + protocol = "direct" # this is how is called on JunOS _COMMON_PROTOCOL_FIELDS_ = [ - 'destination', - 'prefix_length', - 'protocol', - 'current_active', - 'last_active', - 'age', - 'next_hop', - 'outgoing_interface', - 'selected_next_hop', - 'preference', - 'inactive_reason', - 'routing_table' + "destination", + "prefix_length", + "protocol", + "current_active", + "last_active", + "age", + "next_hop", + "outgoing_interface", + "selected_next_hop", + "preference", + "inactive_reason", + "routing_table", ] # identifies the list of fileds common for all protocols _BOOLEAN_FIELDS_ = [ - 'current_active', - 'selected_next_hop', - 'last_active' + "current_active", + "selected_next_hop", + "last_active", ] # fields expected to have boolean values _PROTOCOL_SPECIFIC_FIELDS_ = { - 'bgp': [ - 'local_as', - 'remote_as', - 'as_path', - 'communities', - 'local_preference', - 'preference2', - 'remote_address', - 'metric', - 'metric2' + "bgp": [ + "local_as", + "remote_as", + "as_path", + "communities", + "local_preference", + "preference2", + "remote_address", + "metric", + "metric2", ], - 'isis': [ - 'level', - 'metric', - 'local_as' - ] + "isis": ["level", "metric", "local_as"], } routes_table = junos_views.junos_protocol_route_table(self.device) - rt_kargs = { - 'destination': destination - } + rt_kargs = {"destination": destination} if protocol and isinstance(destination, py23_compat.string_types): - rt_kargs['protocol'] = protocol + rt_kargs["protocol"] = protocol try: routes_table.get(**rt_kargs) @@ -1554,15 +1626,20 @@ def get_route_to(self, destination='', protocol=''): # will take very very long to determine all routes and # moreover will return a huge list raise CommandTimeoutException( - 'Too many routes returned! Please try with a longer prefix or a specific protocol!' + "Too many routes returned! Please try with a longer prefix or a specific protocol!" ) except RpcError as rpce: - if len(rpce.errs) > 0 and 'bad_element' in rpce.errs[0]: + if len(rpce.errs) > 0 and "bad_element" in rpce.errs[0]: raise CommandErrorException( - 'Unknown protocol: {proto}'.format(proto=rpce.errs[0]['bad_element'])) + "Unknown protocol: {proto}".format( + proto=rpce.errs[0]["bad_element"] + ) + ) raise CommandErrorException(rpce) except Exception as err: - raise CommandErrorException('Cannot retrieve routes! Reason: {err}'.format(err=err)) + raise CommandErrorException( + "Cannot retrieve routes! Reason: {err}".format(err=err) + ) routes_items = routes_table.items() @@ -1570,40 +1647,38 @@ def get_route_to(self, destination='', protocol=''): d = {} # next_hop = route[0] d = {elem[0]: elem[1] for elem in route[1]} - destination = d.pop('destination', '') - prefix_length = d.pop('prefix_length', 32) - destination = '{d}/{p}'.format( - d=destination, - p=prefix_length - ) + destination = d.pop("destination", "") + prefix_length = d.pop("prefix_length", 32) + destination = "{d}/{p}".format(d=destination, p=prefix_length) d.update({key: False for key in _BOOLEAN_FIELDS_ if d.get(key) is None}) - as_path = d.get('as_path') + as_path = d.get("as_path") if as_path is not None: - d['as_path'] = as_path.split(' I ')[0] \ - .replace('AS path:', '') \ - .replace('I', '') \ + d["as_path"] = ( + as_path.split(" I ")[0] + .replace("AS path:", "") + .replace("I", "") .strip() + ) # to be sure that contains only AS Numbers - if d.get('inactive_reason') is None: - d['inactive_reason'] = u'' - route_protocol = d.get('protocol').lower() + if d.get("inactive_reason") is None: + d["inactive_reason"] = "" + route_protocol = d.get("protocol").lower() if protocol and protocol != route_protocol: continue - communities = d.get('communities') + communities = d.get("communities") if communities is not None and type(communities) is not list: - d['communities'] = [communities] + d["communities"] = [communities] d_keys = list(d.keys()) # fields that are not in _COMMON_PROTOCOL_FIELDS_ are supposed to be protocol specific all_protocol_attributes = { - key: d.pop(key) - for key in d_keys - if key not in _COMMON_PROTOCOL_FIELDS_ + key: d.pop(key) for key in d_keys if key not in _COMMON_PROTOCOL_FIELDS_ } protocol_attributes = { - key: value for key, value in all_protocol_attributes.items() + key: value + for key, value in all_protocol_attributes.items() if key in _PROTOCOL_SPECIFIC_FIELDS_.get(route_protocol, []) } - d['protocol_attributes'] = protocol_attributes + d["protocol_attributes"] = protocol_attributes if destination not in routes.keys(): routes[destination] = [] routes[destination].append(d) @@ -1622,27 +1697,29 @@ def get_snmp_information(self): return snmp_information snmp_information = { - py23_compat.text_type(ele[0]): ele[1] if ele[1] else '' + py23_compat.text_type(ele[0]): ele[1] if ele[1] else "" for ele in snmp_items[0][1] } - snmp_information['community'] = {} - communities_table = snmp_information.pop('communities_table') + snmp_information["community"] = {} + communities_table = snmp_information.pop("communities_table") if not communities_table: return snmp_information for community in communities_table.items(): community_name = py23_compat.text_type(community[0]) - community_details = { - 'acl': '' - } - community_details.update({ - py23_compat.text_type(ele[0]): py23_compat.text_type( - ele[1] if ele[0] != 'mode' - else C.SNMP_AUTHORIZATION_MODE_MAP.get(ele[1])) - for ele in community[1] - }) - snmp_information['community'][community_name] = community_details + community_details = {"acl": ""} + community_details.update( + { + py23_compat.text_type(ele[0]): py23_compat.text_type( + ele[1] + if ele[0] != "mode" + else C.SNMP_AUTHORIZATION_MODE_MAP.get(ele[1]) + ) + for ele in community[1] + } + ) + snmp_information["community"][community_name] = community_details return snmp_information @@ -1656,27 +1733,33 @@ def get_probes_config(self): for probe_test in probes_table_items: test_name = py23_compat.text_type(probe_test[0]) - test_details = { - p[0]: p[1] for p in probe_test[1] - } + test_details = {p[0]: p[1] for p in probe_test[1]} probe_name = napalm.base.helpers.convert( - py23_compat.text_type, test_details.pop('probe_name')) + py23_compat.text_type, test_details.pop("probe_name") + ) target = napalm.base.helpers.convert( - py23_compat.text_type, test_details.pop('target', '')) - test_interval = napalm.base.helpers.convert(int, test_details.pop('test_interval', '0')) - probe_count = napalm.base.helpers.convert(int, test_details.pop('probe_count', '0')) + py23_compat.text_type, test_details.pop("target", "") + ) + test_interval = napalm.base.helpers.convert( + int, test_details.pop("test_interval", "0") + ) + probe_count = napalm.base.helpers.convert( + int, test_details.pop("probe_count", "0") + ) probe_type = napalm.base.helpers.convert( - py23_compat.text_type, test_details.pop('probe_type', '')) + py23_compat.text_type, test_details.pop("probe_type", "") + ) source = napalm.base.helpers.convert( - py23_compat.text_type, test_details.pop('source_address', '')) + py23_compat.text_type, test_details.pop("source_address", "") + ) if probe_name not in probes.keys(): probes[probe_name] = {} probes[probe_name][test_name] = { - 'probe_type': probe_type, - 'target': target, - 'source': source, - 'probe_count': probe_count, - 'test_interval': test_interval + "probe_type": probe_type, + "target": target, + "source": source, + "probe_count": probe_count, + "test_interval": test_interval, } return probes @@ -1691,216 +1774,280 @@ def get_probes_results(self): for probe_result in probes_results_items: probe_name = py23_compat.text_type(probe_result[0]) - test_results = { - p[0]: p[1] for p in probe_result[1] - } - test_results['last_test_loss'] = napalm.base.helpers.convert( - int, test_results.pop('last_test_loss'), 0) + test_results = {p[0]: p[1] for p in probe_result[1]} + test_results["last_test_loss"] = napalm.base.helpers.convert( + int, test_results.pop("last_test_loss"), 0 + ) for test_param_name, test_param_value in test_results.items(): if isinstance(test_param_value, float): test_results[test_param_name] = test_param_value * 1e-3 # convert from useconds to mseconds - test_name = test_results.pop('test_name', '') - source = test_results.get('source', u'') + test_name = test_results.pop("test_name", "") + source = test_results.get("source", "") if source is None: - test_results['source'] = u'' + test_results["source"] = "" if probe_name not in probes_results.keys(): probes_results[probe_name] = {} probes_results[probe_name][test_name] = test_results return probes_results - def traceroute(self, - destination, - source=C.TRACEROUTE_SOURCE, - ttl=C.TRACEROUTE_TTL, - timeout=C.TRACEROUTE_TIMEOUT, - vrf=C.TRACEROUTE_VRF): + def traceroute( + self, + destination, + source=C.TRACEROUTE_SOURCE, + ttl=C.TRACEROUTE_TTL, + timeout=C.TRACEROUTE_TIMEOUT, + vrf=C.TRACEROUTE_VRF, + ): """Execute traceroute and return results.""" traceroute_result = {} # calling form RPC does not work properly :( # but defined junos_route_instance_table just in case - source_str = '' - maxttl_str = '' - wait_str = '' - vrf_str = '' + source_str = "" + maxttl_str = "" + wait_str = "" + vrf_str = "" if source: - source_str = ' source {source}'.format(source=source) + source_str = " source {source}".format(source=source) if ttl: - maxttl_str = ' ttl {ttl}'.format(ttl=ttl) + maxttl_str = " ttl {ttl}".format(ttl=ttl) if timeout: - wait_str = ' wait {timeout}'.format(timeout=timeout) + wait_str = " wait {timeout}".format(timeout=timeout) if vrf: - vrf_str = ' routing-instance {vrf}'.format(vrf=vrf) + vrf_str = " routing-instance {vrf}".format(vrf=vrf) - traceroute_command = 'traceroute {destination}{source}{maxttl}{wait}{vrf}'.format( + traceroute_command = "traceroute {destination}{source}{maxttl}{wait}{vrf}".format( destination=destination, source=source_str, maxttl=maxttl_str, wait=wait_str, - vrf=vrf_str + vrf=vrf_str, ) - traceroute_rpc = E('command', traceroute_command) + traceroute_rpc = E("command", traceroute_command) rpc_reply = self.device._conn.rpc(traceroute_rpc)._NCElement__doc # make direct RPC call via NETCONF - traceroute_results = rpc_reply.find('.//traceroute-results') + traceroute_results = rpc_reply.find(".//traceroute-results") traceroute_failure = napalm.base.helpers.find_txt( - traceroute_results, 'traceroute-failure', '') + traceroute_results, "traceroute-failure", "" + ) error_message = napalm.base.helpers.find_txt( - traceroute_results, 'rpc-error/error-message', '') + traceroute_results, "rpc-error/error-message", "" + ) if traceroute_failure and error_message: - return {'error': '{}: {}'.format(traceroute_failure, error_message)} + return {"error": "{}: {}".format(traceroute_failure, error_message)} - traceroute_result['success'] = {} - for hop in traceroute_results.findall('hop'): + traceroute_result["success"] = {} + for hop in traceroute_results.findall("hop"): ttl_value = napalm.base.helpers.convert( - int, napalm.base.helpers.find_txt(hop, 'ttl-value'), 1) - if ttl_value not in traceroute_result['success']: - traceroute_result['success'][ttl_value] = {'probes': {}} - for probe in hop.findall('probe-result'): + int, napalm.base.helpers.find_txt(hop, "ttl-value"), 1 + ) + if ttl_value not in traceroute_result["success"]: + traceroute_result["success"][ttl_value] = {"probes": {}} + for probe in hop.findall("probe-result"): probe_index = napalm.base.helpers.convert( - int, napalm.base.helpers.find_txt(probe, 'probe-index'), 0) + int, napalm.base.helpers.find_txt(probe, "probe-index"), 0 + ) ip_address = napalm.base.helpers.convert( - napalm.base.helpers.ip, napalm.base.helpers.find_txt(probe, 'ip-address'), '*') + napalm.base.helpers.ip, + napalm.base.helpers.find_txt(probe, "ip-address"), + "*", + ) host_name = py23_compat.text_type( - napalm.base.helpers.find_txt(probe, 'host-name', '*')) - rtt = napalm.base.helpers.convert( - float, napalm.base.helpers.find_txt(probe, 'rtt'), 0) * 1e-3 # ms - traceroute_result['success'][ttl_value]['probes'][probe_index] = { - 'ip_address': ip_address, - 'host_name': host_name, - 'rtt': rtt + napalm.base.helpers.find_txt(probe, "host-name", "*") + ) + rtt = ( + napalm.base.helpers.convert( + float, napalm.base.helpers.find_txt(probe, "rtt"), 0 + ) + * 1e-3 + ) # ms + traceroute_result["success"][ttl_value]["probes"][probe_index] = { + "ip_address": ip_address, + "host_name": host_name, + "rtt": rtt, } return traceroute_result - def ping(self, destination, source=C.PING_SOURCE, ttl=C.PING_TTL, - timeout=C.PING_TIMEOUT, size=C.PING_SIZE, count=C.PING_COUNT, vrf=C.PING_VRF): + def ping( + self, + destination, + source=C.PING_SOURCE, + ttl=C.PING_TTL, + timeout=C.PING_TIMEOUT, + size=C.PING_SIZE, + count=C.PING_COUNT, + vrf=C.PING_VRF, + ): ping_dict = {} - source_str = '' - maxttl_str = '' - timeout_str = '' - size_str = '' - count_str = '' - vrf_str = '' + source_str = "" + maxttl_str = "" + timeout_str = "" + size_str = "" + count_str = "" + vrf_str = "" if source: - source_str = ' source {source}'.format(source=source) + source_str = " source {source}".format(source=source) if ttl: - maxttl_str = ' ttl {ttl}'.format(ttl=ttl) + maxttl_str = " ttl {ttl}".format(ttl=ttl) if timeout: - timeout_str = ' wait {timeout}'.format(timeout=timeout) + timeout_str = " wait {timeout}".format(timeout=timeout) if size: - size_str = ' size {size}'.format(size=size) + size_str = " size {size}".format(size=size) if count: - count_str = ' count {count}'.format(count=count) + count_str = " count {count}".format(count=count) if vrf: - vrf_str = ' routing-instance {vrf}'.format(vrf=vrf) + vrf_str = " routing-instance {vrf}".format(vrf=vrf) - ping_command = 'ping {destination}{source}{ttl}{timeout}{size}{count}{vrf}'.format( + ping_command = "ping {destination}{source}{ttl}{timeout}{size}{count}{vrf}".format( destination=destination, source=source_str, ttl=maxttl_str, timeout=timeout_str, size=size_str, count=count_str, - vrf=vrf_str + vrf=vrf_str, ) - ping_rpc = E('command', ping_command) + ping_rpc = E("command", ping_command) rpc_reply = self.device._conn.rpc(ping_rpc)._NCElement__doc # make direct RPC call via NETCONF - probe_summary = rpc_reply.find('.//probe-results-summary') + probe_summary = rpc_reply.find(".//probe-results-summary") if probe_summary is None: - rpc_error = rpc_reply.find('.//rpc-error') - return {'error': '{}'.format( - napalm.base.helpers.find_txt(rpc_error, 'error-message'))} + rpc_error = rpc_reply.find(".//rpc-error") + return { + "error": "{}".format( + napalm.base.helpers.find_txt(rpc_error, "error-message") + ) + } packet_loss = napalm.base.helpers.convert( - int, napalm.base.helpers.find_txt(probe_summary, 'packet-loss'), 100) + int, napalm.base.helpers.find_txt(probe_summary, "packet-loss"), 100 + ) # rtt values are valid only if a we get an ICMP reply if packet_loss is not 100: - ping_dict['success'] = {} - ping_dict['success']['probes_sent'] = int( - probe_summary.findtext("probes-sent")) - ping_dict['success']['packet_loss'] = packet_loss - ping_dict['success'].update({ - - 'rtt_min': round((napalm.base.helpers.convert( - float, napalm.base.helpers.find_txt( - probe_summary, 'rtt-minimum'), -1) * 1e-3), 3), - - 'rtt_max': round((napalm.base.helpers.convert( - float, napalm.base.helpers.find_txt( - probe_summary, 'rtt-maximum'), -1) * 1e-3), 3), - - 'rtt_avg': round((napalm.base.helpers.convert( - float, napalm.base.helpers.find_txt( - probe_summary, 'rtt-average'), -1) * 1e-3), 3), - - 'rtt_stddev': round((napalm.base.helpers.convert( - float, napalm.base.helpers.find_txt( - probe_summary, 'rtt-stddev'), -1) * 1e-3), 3) - }) + ping_dict["success"] = {} + ping_dict["success"]["probes_sent"] = int( + probe_summary.findtext("probes-sent") + ) + ping_dict["success"]["packet_loss"] = packet_loss + ping_dict["success"].update( + { + "rtt_min": round( + ( + napalm.base.helpers.convert( + float, + napalm.base.helpers.find_txt( + probe_summary, "rtt-minimum" + ), + -1, + ) + * 1e-3 + ), + 3, + ), + "rtt_max": round( + ( + napalm.base.helpers.convert( + float, + napalm.base.helpers.find_txt( + probe_summary, "rtt-maximum" + ), + -1, + ) + * 1e-3 + ), + 3, + ), + "rtt_avg": round( + ( + napalm.base.helpers.convert( + float, + napalm.base.helpers.find_txt( + probe_summary, "rtt-average" + ), + -1, + ) + * 1e-3 + ), + 3, + ), + "rtt_stddev": round( + ( + napalm.base.helpers.convert( + float, + napalm.base.helpers.find_txt( + probe_summary, "rtt-stddev" + ), + -1, + ) + * 1e-3 + ), + 3, + ), + } + ) - tmp = rpc_reply.find('.//ping-results') + tmp = rpc_reply.find(".//ping-results") results_array = [] - for probe_result in tmp.findall('probe-result'): + for probe_result in tmp.findall("probe-result"): ip_address = napalm.base.helpers.convert( napalm.base.helpers.ip, - napalm.base.helpers.find_txt(probe_result, 'ip-address'), '*') + napalm.base.helpers.find_txt(probe_result, "ip-address"), + "*", + ) rtt = round( - (napalm.base.helpers.convert( - float, napalm.base.helpers.find_txt( - probe_result, 'rtt'), -1) * 1e-3), 3) + ( + napalm.base.helpers.convert( + float, napalm.base.helpers.find_txt(probe_result, "rtt"), -1 + ) + * 1e-3 + ), + 3, + ) - results_array.append({'ip_address': ip_address, - 'rtt': rtt}) + results_array.append({"ip_address": ip_address, "rtt": rtt}) - ping_dict['success'].update({'results': results_array}) + ping_dict["success"].update({"results": results_array}) else: - return {'error': 'Packet loss {}'.format(packet_loss)} + return {"error": "Packet loss {}".format(packet_loss)} return ping_dict def _get_root(self): """get root user password.""" - _DEFAULT_USER_DETAILS = { - 'level': 20, - 'password': '', - 'sshkeys': [] - } + _DEFAULT_USER_DETAILS = {"level": 20, "password": "", "sshkeys": []} root = {} root_table = junos_views.junos_root_table(self.device) root_table.get() root_items = root_table.items() for user_entry in root_items: - username = 'root' + username = "root" user_details = _DEFAULT_USER_DETAILS.copy() - user_details.update({ - d[0]: d[1] for d in user_entry[1] if d[1] - }) + user_details.update({d[0]: d[1] for d in user_entry[1] if d[1]}) user_details = { key: py23_compat.text_type(user_details[key]) for key in user_details.keys() } - user_details['level'] = int(user_details['level']) - user_details['sshkeys'] = [ + user_details["level"] = int(user_details["level"]) + user_details["sshkeys"] = [ user_details.pop(key) - for key in ['ssh_rsa', 'ssh_dsa', 'ssh_ecdsa'] - if user_details.get(key, '') + for key in ["ssh_rsa", "ssh_dsa", "ssh_ecdsa"] + if user_details.get(key, "") ] root[username] = user_details return root @@ -1910,18 +2057,14 @@ def get_users(self): users = {} _JUNOS_CLASS_CISCO_PRIVILEGE_LEVEL_MAP = { - 'super-user': 15, - 'superuser': 15, - 'operator': 5, - 'read-only': 1, - 'unauthorized': 0 + "super-user": 15, + "superuser": 15, + "operator": 5, + "read-only": 1, + "unauthorized": 0, } - _DEFAULT_USER_DETAILS = { - 'level': 0, - 'password': '', - 'sshkeys': [] - } + _DEFAULT_USER_DETAILS = {"level": 0, "password": "", "sshkeys": []} users_table = junos_views.junos_users_table(self.device) users_table.get() @@ -1931,22 +2074,18 @@ def get_users(self): for user_entry in users_items: username = user_entry[0] user_details = _DEFAULT_USER_DETAILS.copy() - user_details.update({ - d[0]: d[1] for d in user_entry[1] if d[1] - }) - user_class = user_details.pop('class', '') + user_details.update({d[0]: d[1] for d in user_entry[1] if d[1]}) + user_class = user_details.pop("class", "") user_details = { key: py23_compat.text_type(user_details[key]) for key in user_details.keys() } level = _JUNOS_CLASS_CISCO_PRIVILEGE_LEVEL_MAP.get(user_class, 0) - user_details.update({ - 'level': level - }) - user_details['sshkeys'] = [ + user_details.update({"level": level}) + user_details["sshkeys"] = [ user_details.pop(key) - for key in ['ssh_rsa', 'ssh_dsa', 'ssh_ecdsa'] - if user_details.get(key, '') + for key in ["ssh_rsa", "ssh_dsa", "ssh_ecdsa"] + if user_details.get(key, "") ] users[username] = user_details users.update(root_user) @@ -1965,7 +2104,7 @@ def get_optics(self): optics_items_with_lane = [] for intf_optic_item in optics_items: temp_list = list(intf_optic_item) - temp_list.insert(1, u"0") + temp_list.insert(1, "0") new_intf_optic_item = tuple(temp_list) optics_items_with_lane.append(new_intf_optic_item) @@ -2005,71 +2144,69 @@ def get_optics(self): optics = dict(intf_optic_item[2]) if interface_name not in optics_detail: optics_detail[interface_name] = {} - optics_detail[interface_name]['physical_channels'] = {} - optics_detail[interface_name]['physical_channels']['channel'] = [] + optics_detail[interface_name]["physical_channels"] = {} + optics_detail[interface_name]["physical_channels"]["channel"] = [] INVALID_LIGHT_LEVEL = [None, C.OPTICS_NULL_LEVEL, C.OPTICS_NULL_LEVEL_SPC] # Defaulting avg, min, max values to 0.0 since device does not # return these values intf_optics = { - 'index': int(lane), - 'state': { - 'input_power': { - 'instant': ( - float(optics['input_power']) - if optics['input_power'] not in INVALID_LIGHT_LEVEL - else 0.0), - 'avg': 0.0, - 'max': 0.0, - 'min': 0.0 + "index": int(lane), + "state": { + "input_power": { + "instant": ( + float(optics["input_power"]) + if optics["input_power"] not in INVALID_LIGHT_LEVEL + else 0.0 + ), + "avg": 0.0, + "max": 0.0, + "min": 0.0, }, - 'output_power': { - 'instant': ( - float(optics['output_power']) - if optics['output_power'] not in INVALID_LIGHT_LEVEL - else 0.0), - 'avg': 0.0, - 'max': 0.0, - 'min': 0.0 + "output_power": { + "instant": ( + float(optics["output_power"]) + if optics["output_power"] not in INVALID_LIGHT_LEVEL + else 0.0 + ), + "avg": 0.0, + "max": 0.0, + "min": 0.0, }, - 'laser_bias_current': { - 'instant': ( - float(optics['laser_bias_current']) - if optics['laser_bias_current'] not in INVALID_LIGHT_LEVEL - else 0.0), - 'avg': 0.0, - 'max': 0.0, - 'min': 0.0 - } - } + "laser_bias_current": { + "instant": ( + float(optics["laser_bias_current"]) + if optics["laser_bias_current"] not in INVALID_LIGHT_LEVEL + else 0.0 + ), + "avg": 0.0, + "max": 0.0, + "min": 0.0, + }, + }, } - optics_detail[interface_name]['physical_channels']['channel'].append(intf_optics) + optics_detail[interface_name]["physical_channels"]["channel"].append( + intf_optics + ) return optics_detail - def get_config(self, retrieve='all'): - rv = { - 'startup': '', - 'running': '', - 'candidate': '' - } + def get_config(self, retrieve="all"): + rv = {"startup": "", "running": "", "candidate": ""} - options = { - 'format': 'text', - 'database': 'candidate' - } + options = {"format": "text", "database": "candidate"} - if retrieve in ('candidate', 'all'): + if retrieve in ("candidate", "all"): config = self.device.rpc.get_config(filter_xml=None, options=options) - rv['candidate'] = py23_compat.text_type(config.text) - if retrieve in ('running', 'all'): - options['database'] = 'committed' + rv["candidate"] = py23_compat.text_type(config.text) + if retrieve in ("running", "all"): + options["database"] = "committed" config = self.device.rpc.get_config(filter_xml=None, options=options) - rv['running'] = py23_compat.text_type(config.text) + rv["running"] = py23_compat.text_type(config.text) return rv - def get_network_instances(self, name=''): + def get_network_instances(self, name=""): network_instances = {} @@ -2081,45 +2218,43 @@ def get_network_instances(self, name=''): for ri_entry in ri_entries: ri_name = py23_compat.text_type(ri_entry[0]) - ri_details = { - d[0]: d[1] for d in ri_entry[1] - } - ri_type = ri_details['instance_type'] + ri_details = {d[0]: d[1] for d in ri_entry[1]} + ri_type = ri_details["instance_type"] if ri_type is None: - ri_type = 'default' - ri_rd = ri_details['route_distinguisher'] - ri_interfaces = ri_details['interfaces'] + ri_type = "default" + ri_rd = ri_details["route_distinguisher"] + ri_interfaces = ri_details["interfaces"] if not isinstance(ri_interfaces, list): ri_interfaces = [ri_interfaces] network_instances[ri_name] = { - 'name': ri_name, - 'type': C.OC_NETWORK_INSTANCE_TYPE_MAP.get(ri_type, ri_type), # default: return raw - 'state': { - 'route_distinguisher': ri_rd if ri_rd else '' - }, - 'interfaces': { - 'interface': { + "name": ri_name, + "type": C.OC_NETWORK_INSTANCE_TYPE_MAP.get( + ri_type, ri_type + ), # default: return raw + "state": {"route_distinguisher": ri_rd if ri_rd else ""}, + "interfaces": { + "interface": { intrf_name: {} for intrf_name in ri_interfaces if intrf_name } - } + }, } - vrf_interfaces.extend(network_instances[ri_name]['interfaces']['interface'].keys()) + vrf_interfaces.extend( + network_instances[ri_name]["interfaces"]["interface"].keys() + ) all_interfaces = self.get_interfaces().keys() default_interfaces = list(set(all_interfaces) - set(vrf_interfaces)) - if 'default' not in network_instances: - network_instances['default'] = { - 'name': 'default', - 'type': C.OC_NETWORK_INSTANCE_TYPE_MAP.get('default'), - 'state': { - 'route_distinguisher': '' - }, - 'interfaces': { - 'interface': { + if "default" not in network_instances: + network_instances["default"] = { + "name": "default", + "type": C.OC_NETWORK_INSTANCE_TYPE_MAP.get("default"), + "state": {"route_distinguisher": ""}, + "interfaces": { + "interface": { py23_compat.text_type(intrf_name): {} for intrf_name in default_interfaces } - } + }, } if not name: diff --git a/napalm/junos/utils/junos_views.py b/napalm/junos/utils/junos_views.py index b14292e36..471a978e6 100755 --- a/napalm/junos/utils/junos_views.py +++ b/napalm/junos/utils/junos_views.py @@ -20,7 +20,7 @@ def _loadyaml_bypass(yaml_str): return FactoryLoader().load(yaml.safe_load(yaml_str)) -_YAML_ = splitext(__file__)[0] + '.yml' +_YAML_ = splitext(__file__)[0] + ".yml" if py23_compat.PY2: globals().update(loadyaml(_YAML_)) else: diff --git a/napalm/nxos/__init__.py b/napalm/nxos/__init__.py index 37a4dfb3c..695c79189 100644 --- a/napalm/nxos/__init__.py +++ b/napalm/nxos/__init__.py @@ -23,8 +23,8 @@ from napalm.nxos.nxos import NXOSDriverBase try: - __version__ = pkg_resources.get_distribution('napalm-nxos').version + __version__ = pkg_resources.get_distribution("napalm-nxos").version except pkg_resources.DistributionNotFound: __version__ = "Not installed" -__all__ = ('NXOSDriver', 'NXOSDriverBase') +__all__ = ("NXOSDriver", "NXOSDriverBase") diff --git a/napalm/nxos/nxos.py b/napalm/nxos/nxos.py index 4a7c5cd21..949cc10e8 100644 --- a/napalm/nxos/nxos.py +++ b/napalm/nxos/nxos.py @@ -46,6 +46,7 @@ def ensure_netmiko_conn(func): """Decorator that ensures Netmiko connection exists.""" + def wrap_function(self, filename=None, config=None): try: netmiko_object = self._netmiko_device @@ -54,18 +55,19 @@ def wrap_function(self, filename=None, config=None): except AttributeError: device_type = c.NETMIKO_MAP[self.platform] netmiko_optional_args = self.netmiko_optional_args - if 'port' in netmiko_optional_args: - netmiko_optional_args['port'] = 22 + if "port" in netmiko_optional_args: + netmiko_optional_args["port"] = 22 self._netmiko_open( - device_type=device_type, - netmiko_optional_args=netmiko_optional_args, + device_type=device_type, netmiko_optional_args=netmiko_optional_args ) func(self, filename=filename, config=config) + return wrap_function class NXOSDriverBase(NetworkDriver): """Common code shared between nx-api and nxos_ssh.""" + def __init__(self, hostname, username, password, timeout=60, optional_args=None): if optional_args is None: optional_args = {} @@ -76,10 +78,10 @@ def __init__(self, hostname, username, password, timeout=60, optional_args=None) self.replace = True self.loaded = False self.changed = False - self.merge_candidate = '' - self.candidate_cfg = 'candidate_config.txt' - self.rollback_cfg = 'rollback_config.txt' - self._dest_file_system = optional_args.pop('dest_file_system', "bootflash:") + self.merge_candidate = "" + self.candidate_cfg = "candidate_config.txt" + self.rollback_cfg = "rollback_config.txt" + self._dest_file_system = optional_args.pop("dest_file_system", "bootflash:") self.netmiko_optional_args = netmiko_args(optional_args) self.device = None @@ -87,7 +89,9 @@ def __init__(self, hostname, username, password, timeout=60, optional_args=None) def load_replace_candidate(self, filename=None, config=None): if not filename and not config: - raise ReplaceConfigException('filename or config parameter must be provided.') + raise ReplaceConfigException( + "filename or config parameter must be provided." + ) if not filename: tmp_file = self._create_tmp_file(config) @@ -105,12 +109,14 @@ def load_replace_candidate(self, filename=None, config=None): direction="put", overwrite_file=True, ) - if not transfer_result['file_exists']: + if not transfer_result["file_exists"]: raise ValueError() except Exception: - msg = ('Could not transfer file. There was an error ' - 'during transfer. Please make sure remote ' - 'permissions are set.') + msg = ( + "Could not transfer file. There was an error " + "during transfer. Please make sure remote " + "permissions are set." + ) raise ReplaceConfigException(msg) self.replace = True @@ -120,9 +126,9 @@ def load_replace_candidate(self, filename=None, config=None): def load_merge_candidate(self, filename=None, config=None): if not filename and not config: - raise MergeConfigException('filename or config param must be provided.') + raise MergeConfigException("filename or config param must be provided.") - self.merge_candidate += '\n' # insert one extra line + self.merge_candidate += "\n" # insert one extra line if filename is not None: with open(filename, "r") as f: self.merge_candidate += f.read() @@ -134,8 +140,8 @@ def load_merge_candidate(self, filename=None, config=None): def _commit_merge(self): try: output = self._send_config(self.merge_candidate) - if output and 'Invalid command' in output: - raise MergeConfigException('Error while applying config!') + if output and "Invalid command" in output: + raise MergeConfigException("Error while applying config!") except Exception as e: self.changed = True self.rollback() @@ -143,7 +149,7 @@ def _commit_merge(self): self.changed = True # clear the merge buffer - self.merge_candidate = '' + self.merge_candidate = "" def _get_merge_diff(self): """ @@ -156,34 +162,39 @@ def _get_merge_diff(self): being sent by the merge_load_config() """ diff = [] - running_config = self.get_config(retrieve='running')['running'] + running_config = self.get_config(retrieve="running")["running"] running_lines = running_config.splitlines() for line in self.merge_candidate.splitlines(): if line not in running_lines and line: - if line[0].strip() != '!': + if line[0].strip() != "!": diff.append(line) - return '\n'.join(diff) + return "\n".join(diff) def _get_diff(self): """Get a diff between running config and a proposed file.""" diff = [] self._create_sot_file() diff_out = self._send_command( - 'show diff rollback-patch file {} file {}'.format( - 'sot_file', self.candidate_cfg), raw_text=True) + "show diff rollback-patch file {} file {}".format( + "sot_file", self.candidate_cfg + ), + raw_text=True, + ) try: - diff_out = diff_out.split( - 'Generating Rollback Patch')[1].replace( - 'Rollback Patch is Empty', '').strip() + diff_out = ( + diff_out.split("Generating Rollback Patch")[1] + .replace("Rollback Patch is Empty", "") + .strip() + ) for line in diff_out.splitlines(): if line: - if line[0].strip() != '!' and line[0].strip() != '.': - diff.append(line.rstrip(' ')) + if line[0].strip() != "!" and line[0].strip() != ".": + diff.append(line.rstrip(" ")) except (AttributeError, KeyError): raise ReplaceConfigException( "Could not calculate diff. It's possible the given file doesn't exist." ) - return '\n'.join(diff) + return "\n".join(diff) def compare_config(self): if self.loaded: @@ -191,11 +202,13 @@ def compare_config(self): return self._get_merge_diff() diff = self._get_diff() return diff - return '' + return "" def commit_config(self, message=""): if message: - raise NotImplementedError('Commit message not implemented for this platform') + raise NotImplementedError( + "Commit message not implemented for this platform" + ) if self.loaded: # Create checkpoint from current running-config self._save_to_checkpoint(self.rollback_cfg) @@ -208,12 +221,12 @@ def commit_config(self, message=""): self._copy_run_start() self.loaded = False else: - raise ReplaceConfigException('No config loaded.') + raise ReplaceConfigException("No config loaded.") def discard_config(self): if self.loaded: # clear the buffer - self.merge_candidate = '' + self.merge_candidate = "" if self.loaded and self.replace: self._delete_file(self.candidate_cfg) self.loaded = False @@ -227,17 +240,23 @@ def _create_sot_file(self): self._delete_file(filename="sot_file") except Exception: pass - commands = ['terminal dont-ask', 'checkpoint file sot_file', 'no terminal dont-ask'] + commands = [ + "terminal dont-ask", + "checkpoint file sot_file", + "no terminal dont-ask", + ] self._send_command_list(commands) - def ping(self, - destination, - source=c.PING_SOURCE, - ttl=c.PING_TTL, - timeout=c.PING_TIMEOUT, - size=c.PING_SIZE, - count=c.PING_COUNT, - vrf=c.PING_VRF): + def ping( + self, + destination, + source=c.PING_SOURCE, + ttl=c.PING_TTL, + timeout=c.PING_TIMEOUT, + size=c.PING_SIZE, + count=c.PING_COUNT, + vrf=c.PING_VRF, + ): """ Execute ping on the device and returns a dictionary with the result. Output dictionary has one of following keys: @@ -257,145 +276,161 @@ def ping(self, """ ping_dict = {} - version = '' + version = "" try: - version = '6' if IPAddress(destination).version == 6 else '' + version = "6" if IPAddress(destination).version == 6 else "" except AddrFormatError: # Allow use of DNS names pass - command = 'ping{version} {destination}'.format( - version=version, - destination=destination) - command += ' timeout {}'.format(timeout) - command += ' packet-size {}'.format(size) - command += ' count {}'.format(count) - if source != '': - command += ' source {}'.format(source) - - if vrf != '': - command += ' vrf {}'.format(vrf) + command = "ping{version} {destination}".format( + version=version, destination=destination + ) + command += " timeout {}".format(timeout) + command += " packet-size {}".format(size) + command += " count {}".format(count) + if source != "": + command += " source {}".format(source) + + if vrf != "": + command += " vrf {}".format(vrf) output = self._send_command(command, raw_text=True) - if 'connect:' in output: - ping_dict['error'] = output - elif 'PING' in output: - ping_dict['success'] = { - 'probes_sent': 0, - 'packet_loss': 0, - 'rtt_min': 0.0, - 'rtt_max': 0.0, - 'rtt_avg': 0.0, - 'rtt_stddev': 0.0, - 'results': [] + if "connect:" in output: + ping_dict["error"] = output + elif "PING" in output: + ping_dict["success"] = { + "probes_sent": 0, + "packet_loss": 0, + "rtt_min": 0.0, + "rtt_max": 0.0, + "rtt_avg": 0.0, + "rtt_stddev": 0.0, + "results": [], } results_array = [] for line in output.splitlines(): fields = line.split() - if 'icmp' in line: - if 'Unreachable' in line: + if "icmp" in line: + if "Unreachable" in line: if "(" in fields[2]: results_array.append( { - 'ip_address': py23_compat.text_type(fields[2][1:-1]), - 'rtt': 0.0, + "ip_address": py23_compat.text_type( + fields[2][1:-1] + ), + "rtt": 0.0, } ) else: - results_array.append({'ip_address': py23_compat.text_type(fields[1]), - 'rtt': 0.0}) - elif 'truncated' in line: + results_array.append( + { + "ip_address": py23_compat.text_type(fields[1]), + "rtt": 0.0, + } + ) + elif "truncated" in line: if "(" in fields[4]: results_array.append( { - 'ip_address': py23_compat.text_type(fields[4][1:-2]), - 'rtt': 0.0, + "ip_address": py23_compat.text_type( + fields[4][1:-2] + ), + "rtt": 0.0, } ) else: results_array.append( { - 'ip_address': py23_compat.text_type(fields[3][:-1]), - 'rtt': 0.0, + "ip_address": py23_compat.text_type(fields[3][:-1]), + "rtt": 0.0, } ) - elif fields[1] == 'bytes': - if version == '6': + elif fields[1] == "bytes": + if version == "6": m = fields[5][5:] else: m = fields[6][5:] - results_array.append({'ip_address': py23_compat.text_type(fields[3][:-1]), - 'rtt': float(m)}) - elif 'packets transmitted' in line: - ping_dict['success']['probes_sent'] = int(fields[0]) - ping_dict['success']['packet_loss'] = int(fields[0]) - int(fields[3]) - elif 'min/avg/max' in line: - m = fields[3].split('/') - ping_dict['success'].update({ - 'rtt_min': float(m[0]), - 'rtt_avg': float(m[1]), - 'rtt_max': float(m[2]), - }) - ping_dict['success'].update({'results': results_array}) + results_array.append( + { + "ip_address": py23_compat.text_type(fields[3][:-1]), + "rtt": float(m), + } + ) + elif "packets transmitted" in line: + ping_dict["success"]["probes_sent"] = int(fields[0]) + ping_dict["success"]["packet_loss"] = int(fields[0]) - int( + fields[3] + ) + elif "min/avg/max" in line: + m = fields[3].split("/") + ping_dict["success"].update( + { + "rtt_min": float(m[0]), + "rtt_avg": float(m[1]), + "rtt_max": float(m[2]), + } + ) + ping_dict["success"].update({"results": results_array}) return ping_dict - def traceroute(self, - destination, - source=c.TRACEROUTE_SOURCE, - ttl=c.TRACEROUTE_TTL, - timeout=c.TRACEROUTE_TIMEOUT, - vrf=c.TRACEROUTE_VRF): + def traceroute( + self, + destination, + source=c.TRACEROUTE_SOURCE, + ttl=c.TRACEROUTE_TTL, + timeout=c.TRACEROUTE_TIMEOUT, + vrf=c.TRACEROUTE_VRF, + ): _HOP_ENTRY_PROBE = [ - r'\s+', - r'(', # beginning of host_name (ip_address) RTT group - r'(', # beginning of host_name (ip_address) group only - r'([a-zA-Z0-9\.:-]*)', # hostname - r'\s+', - r'\(?([a-fA-F0-9\.:][^\)]*)\)?' # IP Address between brackets - r')?', # end of host_name (ip_address) group only + r"\s+", + r"(", # beginning of host_name (ip_address) RTT group + r"(", # beginning of host_name (ip_address) group only + r"([a-zA-Z0-9\.:-]*)", # hostname + r"\s+", + r"\(?([a-fA-F0-9\.:][^\)]*)\)?" # IP Address between brackets + r")?", # end of host_name (ip_address) group only # also hostname/ip are optional -- they can or cannot be specified # if not specified, means the current probe followed the same path as the previous - r'\s+', - r'(\d+\.\d+)\s+ms', # RTT - r'|\*', # OR *, when non responsive hop - r')' # end of host_name (ip_address) RTT group + r"\s+", + r"(\d+\.\d+)\s+ms", # RTT + r"|\*", # OR *, when non responsive hop + r")", # end of host_name (ip_address) RTT group ] - _HOP_ENTRY = [ - r'\s?', # space before hop index? - r'(\d+)', # hop index - ] + _HOP_ENTRY = [r"\s?", r"(\d+)"] # space before hop index? # hop index traceroute_result = {} timeout = 5 # seconds probes = 3 # 3 probes/jop and this cannot be changed on NXOS! - version = '' + version = "" try: - version = '6' if IPAddress(destination).version == 6 else '' + version = "6" if IPAddress(destination).version == 6 else "" except AddrFormatError: # Allow use of DNS names pass if source: - source_opt = 'source {source}'.format(source=source) - command = 'traceroute{version} {destination} {source_opt}'.format( - version=version, - destination=destination, - source_opt=source_opt) + source_opt = "source {source}".format(source=source) + command = "traceroute{version} {destination} {source_opt}".format( + version=version, destination=destination, source_opt=source_opt + ) else: - command = 'traceroute{version} {destination}'.format( - version=version, - destination=destination) + command = "traceroute{version} {destination}".format( + version=version, destination=destination + ) try: traceroute_raw_output = self._send_command(command, raw_text=True) except CommandErrorException: - return {'error': 'Cannot execute traceroute on the device: {}'.format(command)} + return { + "error": "Cannot execute traceroute on the device: {}".format(command) + } - hop_regex = ''.join(_HOP_ENTRY + _HOP_ENTRY_PROBE * probes) - traceroute_result['success'] = {} + hop_regex = "".join(_HOP_ENTRY + _HOP_ENTRY_PROBE * probes) + traceroute_result["success"] = {} if traceroute_raw_output: for line in traceroute_raw_output.splitlines(): hop_search = re.search(hop_regex, line) @@ -403,14 +438,15 @@ def traceroute(self, continue hop_details = hop_search.groups() hop_index = int(hop_details[0]) - previous_probe_host_name = '*' - previous_probe_ip_address = '*' - traceroute_result['success'][hop_index] = {'probes': {}} + previous_probe_host_name = "*" + previous_probe_ip_address = "*" + traceroute_result["success"][hop_index] = {"probes": {}} for probe_index in range(probes): host_name = hop_details[3 + probe_index * 5] ip_address_raw = hop_details[4 + probe_index * 5] ip_address = napalm.base.helpers.convert( - napalm.base.helpers.ip, ip_address_raw, ip_address_raw) + napalm.base.helpers.ip, ip_address_raw, ip_address_raw + ) rtt = hop_details[5 + probe_index * 5] if rtt: rtt = float(rtt) @@ -420,31 +456,33 @@ def traceroute(self, host_name = previous_probe_host_name if not ip_address: ip_address = previous_probe_ip_address - if hop_details[1 + probe_index * 5] == '*': - host_name = '*' - ip_address = '*' - traceroute_result['success'][hop_index]['probes'][probe_index + 1] = { - 'host_name': py23_compat.text_type(host_name), - 'ip_address': py23_compat.text_type(ip_address), - 'rtt': rtt + if hop_details[1 + probe_index * 5] == "*": + host_name = "*" + ip_address = "*" + traceroute_result["success"][hop_index]["probes"][ + probe_index + 1 + ] = { + "host_name": py23_compat.text_type(host_name), + "ip_address": py23_compat.text_type(ip_address), + "rtt": rtt, } previous_probe_host_name = host_name previous_probe_ip_address = ip_address return traceroute_result def _get_checkpoint_file(self): - filename = 'temp_cp_file_from_napalm' + filename = "temp_cp_file_from_napalm" self._set_checkpoint(filename) - command = 'show file {}'.format(filename) + command = "show file {}".format(filename) output = self._send_command(command, raw_text=True) self._delete_file(filename) return output def _set_checkpoint(self, filename): commands = [ - 'terminal dont-ask', - 'checkpoint file {}'.format(filename), - 'no terminal dont-ask', + "terminal dont-ask", + "checkpoint file {}".format(filename), + "no terminal dont-ask", ] self._send_command_list(commands) @@ -452,16 +490,16 @@ def _save_to_checkpoint(self, filename): """Save the current running config to the given file.""" commands = [ "terminal dont-ask", - 'checkpoint file {}'.format(filename), - 'no terminal dont-ask', + "checkpoint file {}".format(filename), + "no terminal dont-ask", ] self._send_command_list(commands) def _delete_file(self, filename): commands = [ - 'terminal dont-ask', - 'delete {}'.format(filename), - 'no terminal dont-ask' + "terminal dont-ask", + "delete {}".format(filename), + "no terminal dont-ask", ] self._send_command_list(commands) @@ -470,59 +508,65 @@ def _create_tmp_file(config): tmp_dir = tempfile.gettempdir() rand_fname = py23_compat.text_type(uuid.uuid4()) filename = os.path.join(tmp_dir, rand_fname) - with open(filename, 'wt') as fobj: + with open(filename, "wt") as fobj: fobj.write(config) return filename def _disable_confirmation(self): - self._send_command_list(['terminal dont-ask']) - - def get_config(self, retrieve='all'): - config = { - 'startup': '', - 'running': '', - 'candidate': '' - } # default values - - if retrieve.lower() in ('running', 'all'): - command = 'show running-config' - config['running'] = py23_compat.text_type(self._send_command(command, raw_text=True)) - if retrieve.lower() in ('startup', 'all'): - command = 'show startup-config' - config['startup'] = py23_compat.text_type(self._send_command(command, raw_text=True)) + self._send_command_list(["terminal dont-ask"]) + + def get_config(self, retrieve="all"): + config = {"startup": "", "running": "", "candidate": ""} # default values + + if retrieve.lower() in ("running", "all"): + command = "show running-config" + config["running"] = py23_compat.text_type( + self._send_command(command, raw_text=True) + ) + if retrieve.lower() in ("startup", "all"): + command = "show startup-config" + config["startup"] = py23_compat.text_type( + self._send_command(command, raw_text=True) + ) return config class NXOSDriver(NXOSDriverBase): def __init__(self, hostname, username, password, timeout=60, optional_args=None): - super().__init__(hostname, username, password, timeout=timeout, optional_args=optional_args) + super().__init__( + hostname, username, password, timeout=timeout, optional_args=optional_args + ) if optional_args is None: optional_args = {} # nxos_protocol is there for backwards compatibility, transport is the preferred method - self.transport = optional_args.get('transport', optional_args.get('nxos_protocol', 'https')) - if self.transport == 'https': - self.port = optional_args.get('port', 443) - elif self.transport == 'http': - self.port = optional_args.get('port', 80) + self.transport = optional_args.get( + "transport", optional_args.get("nxos_protocol", "https") + ) + if self.transport == "https": + self.port = optional_args.get("port", 443) + elif self.transport == "http": + self.port = optional_args.get("port", 80) - self.ssl_verify = optional_args.get('ssl_verify', False) - self.platform = 'nxos' + self.ssl_verify = optional_args.get("ssl_verify", False) + self.platform = "nxos" def open(self): try: - self.device = NXOSDevice(host=self.hostname, - username=self.username, - password=self.password, - timeout=self.timeout, - port=self.port, - transport=self.transport, - verify=self.ssl_verify, - api_format="jsonrpc") - self._send_command('show hostname') + self.device = NXOSDevice( + host=self.hostname, + username=self.username, + password=self.password, + timeout=self.timeout, + port=self.port, + transport=self.transport, + verify=self.ssl_verify, + api_format="jsonrpc", + ) + self._send_command("show hostname") except (NXAPIConnectionError, NXAPIAuthError): # unable to open connection - raise ConnectionException('Cannot connect to {}'.format(self.hostname)) + raise ConnectionException("Cannot connect to {}".format(self.hostname)) def close(self): self.device = None @@ -551,48 +595,39 @@ def _compute_timestamp(stupid_cisco_output): This method will determine the epoch of the event. e.g.: 23week(s) 3day(s) -> 1462248287 """ - if not stupid_cisco_output or stupid_cisco_output == 'never': + if not stupid_cisco_output or stupid_cisco_output == "never": return -1.0 - if '(s)' in stupid_cisco_output: + if "(s)" in stupid_cisco_output: pass - elif ':' in stupid_cisco_output: - stupid_cisco_output = stupid_cisco_output.replace(':', 'hour(s) ', 1) - stupid_cisco_output = stupid_cisco_output.replace(':', 'minute(s) ', 1) - stupid_cisco_output += 'second(s)' + elif ":" in stupid_cisco_output: + stupid_cisco_output = stupid_cisco_output.replace(":", "hour(s) ", 1) + stupid_cisco_output = stupid_cisco_output.replace(":", "minute(s) ", 1) + stupid_cisco_output += "second(s)" else: - stupid_cisco_output = stupid_cisco_output.replace('d', 'day(s) ') - stupid_cisco_output = stupid_cisco_output.replace('h', 'hour(s)') + stupid_cisco_output = stupid_cisco_output.replace("d", "day(s) ") + stupid_cisco_output = stupid_cisco_output.replace("h", "hour(s)") things = { - 'second(s)': { - 'weight': 1 - }, - 'minute(s)': { - 'weight': 60 - }, - 'hour(s)': { - 'weight': 3600 - }, - 'day(s)': { - 'weight': 24 * 3600 - }, - 'week(s)': { - 'weight': 7 * 24 * 3600 - }, - 'year(s)': { - 'weight': 365.25 * 24 * 3600 - } + "second(s)": {"weight": 1}, + "minute(s)": {"weight": 60}, + "hour(s)": {"weight": 3600}, + "day(s)": {"weight": 24 * 3600}, + "week(s)": {"weight": 7 * 24 * 3600}, + "year(s)": {"weight": 365.25 * 24 * 3600}, } things_keys = things.keys() for part in stupid_cisco_output.split(): for key in things_keys: if key in part: - things[key]['count'] = napalm.base.helpers.convert( - int, part.replace(key, ''), 0) + things[key]["count"] = napalm.base.helpers.convert( + int, part.replace(key, ""), 0 + ) - delta = sum([det.get('count', 0) * det.get('weight') for det in things.values()]) + delta = sum( + [det.get("count", 0) * det.get("weight") for det in things.values()] + ) return time.time() - delta @staticmethod @@ -624,20 +659,22 @@ def _get_command_table(self, command, table_name, row_name): def is_alive(self): if self.device: - return {'is_alive': True} + return {"is_alive": True} else: - return {'is_alive': False} + return {"is_alive": False} def _copy_run_start(self): - results = self.device.save(filename='startup-config') + results = self.device.save(filename="startup-config") if not results: - msg = 'Unable to save running-config to startup-config!' + msg = "Unable to save running-config to startup-config!" raise CommandErrorException(msg) def _load_cfg_from_checkpoint(self): - commands = ['terminal dont-ask', - 'rollback running-config file {}'.format(self.candidate_cfg), - 'no terminal dont-ask'] + commands = [ + "terminal dont-ask", + "rollback running-config file {}".format(self.candidate_cfg), + "no terminal dont-ask", + ] try: rollback_result = self._send_command_list(commands) except ConnectionError: @@ -649,10 +686,14 @@ def _load_cfg_from_checkpoint(self): # For nx-api a list is returned so extract the result associated with the # 'rollback' command. rollback_result = rollback_result[1] - msg = rollback_result.get('msg') if rollback_result.get('msg') else rollback_result - error_msg = True if rollback_result.get('error') else False - - if 'Rollback failed.' in msg or error_msg: + msg = ( + rollback_result.get("msg") + if rollback_result.get("msg") + else rollback_result + ) + error_msg = True if rollback_result.get("error") else False + + if "Rollback failed." in msg or error_msg: raise ReplaceConfigException(msg) elif rollback_result == []: raise ReplaceConfigException @@ -665,13 +706,13 @@ def rollback(self): def get_facts(self): facts = {} - facts['vendor'] = "Cisco" + facts["vendor"] = "Cisco" show_version = self._send_command("show version") - facts['model'] = show_version.get("chassis_id", "") - facts['hostname'] = show_version.get("host_name", "") - facts['serial_number'] = show_version.get("proc_board_id", "") - facts['os_version'] = show_version.get("sys_ver_str", "") + facts["model"] = show_version.get("chassis_id", "") + facts["hostname"] = show_version.get("host_name", "") + facts["serial_number"] = show_version.get("proc_board_id", "") + facts["os_version"] = show_version.get("sys_ver_str", "") uptime_days = show_version.get("kern_uptm_days", 0) uptime_hours = show_version.get("kern_uptm_hrs", 0) @@ -684,68 +725,75 @@ def get_facts(self): uptime += uptime_mins * 60 uptime += uptime_secs - facts['uptime'] = uptime + facts["uptime"] = uptime - iface_cmd = 'show interface' + iface_cmd = "show interface" interfaces_out = self._send_command(iface_cmd) - interfaces_body = interfaces_out['TABLE_interface']['ROW_interface'] - interface_list = [intf_data['interface'] for intf_data in interfaces_body] - facts['interface_list'] = interface_list + interfaces_body = interfaces_out["TABLE_interface"]["ROW_interface"] + interface_list = [intf_data["interface"] for intf_data in interfaces_body] + facts["interface_list"] = interface_list - hostname_cmd = 'show hostname' - hostname = self._send_command(hostname_cmd).get('hostname') + hostname_cmd = "show hostname" + hostname = self._send_command(hostname_cmd).get("hostname") if hostname: - facts['fqdn'] = hostname + facts["fqdn"] = hostname return facts def get_interfaces(self): interfaces = {} - iface_cmd = 'show interface' + iface_cmd = "show interface" interfaces_out = self._send_command(iface_cmd) - interfaces_body = interfaces_out['TABLE_interface']['ROW_interface'] + interfaces_body = interfaces_out["TABLE_interface"]["ROW_interface"] for interface_details in interfaces_body: - interface_name = interface_details.get('interface') + interface_name = interface_details.get("interface") # Earlier version of Nexus returned a list for 'eth_bw' (observed on 7.1(0)N1(1a)) - interface_speed = interface_details.get('eth_bw', 0) + interface_speed = interface_details.get("eth_bw", 0) if isinstance(interface_speed, list): interface_speed = interface_speed[0] interface_speed = int(interface_speed / 1000) - if 'admin_state' in interface_details: - is_up = interface_details.get('admin_state', '') == 'up' + if "admin_state" in interface_details: + is_up = interface_details.get("admin_state", "") == "up" else: - is_up = interface_details.get('state', '') == 'up' + is_up = interface_details.get("state", "") == "up" interfaces[interface_name] = { - 'is_up': is_up, - 'is_enabled': (interface_details.get('state') == 'up'), - 'description': py23_compat.text_type(interface_details.get('desc', '').strip('"')), - 'last_flapped': self._compute_timestamp( - interface_details.get('eth_link_flapped', '')), - 'speed': interface_speed, - 'mac_address': napalm.base.helpers.convert( - napalm.base.helpers.mac, interface_details.get('eth_hw_addr')), + "is_up": is_up, + "is_enabled": (interface_details.get("state") == "up"), + "description": py23_compat.text_type( + interface_details.get("desc", "").strip('"') + ), + "last_flapped": self._compute_timestamp( + interface_details.get("eth_link_flapped", "") + ), + "speed": interface_speed, + "mac_address": napalm.base.helpers.convert( + napalm.base.helpers.mac, interface_details.get("eth_hw_addr") + ), } return interfaces def get_lldp_neighbors(self): results = {} try: - command = 'show lldp neighbors' - lldp_raw_output = self.cli([command]).get(command, '') + command = "show lldp neighbors" + lldp_raw_output = self.cli([command]).get(command, "") lldp_neighbors = napalm.base.helpers.textfsm_extractor( - self, 'lldp_neighbors', lldp_raw_output) + self, "lldp_neighbors", lldp_raw_output + ) except NXAPICommandError: lldp_neighbors = [] for neighbor in lldp_neighbors: - local_iface = neighbor.get('local_interface') + local_iface = neighbor.get("local_interface") if neighbor.get(local_iface) is None: if local_iface not in results: results[local_iface] = [] - neighbor_dict = {'hostname': py23_compat.text_type(neighbor.get('neighbor')), - 'port': py23_compat.text_type(neighbor.get('neighbor_interface'))} + neighbor_dict = { + "hostname": py23_compat.text_type(neighbor.get("neighbor")), + "port": py23_compat.text_type(neighbor.get("neighbor_interface")), + } results[local_iface].append(neighbor_dict) return results @@ -753,12 +801,12 @@ def get_lldp_neighbors(self): def get_bgp_neighbors(self): results = {} bgp_state_dict = { - 'Idle': {'is_up': False, 'is_enabled': True}, - 'Active': {'is_up': False, 'is_enabled': True}, - 'Open': {'is_up': False, 'is_enabled': True}, - 'Established': {'is_up': True, 'is_enabled': True}, - 'Closing': {'is_up': True, 'is_enabled': True}, - 'Shutdown': {'is_up': False, 'is_enabled': False}, + "Idle": {"is_up": False, "is_enabled": True}, + "Active": {"is_up": False, "is_enabled": True}, + "Open": {"is_up": False, "is_enabled": True}, + "Established": {"is_up": True, "is_enabled": True}, + "Closing": {"is_up": True, "is_enabled": True}, + "Shutdown": {"is_up": False, "is_enabled": False}, } """ af_name_dict = { @@ -768,74 +816,80 @@ def get_bgp_neighbors(self): } """ af_name_dict = { - 1: {1: 'ipv4', 128: 'vpnv4'}, - 2: {1: 'ipv6', 128: 'vpnv6'}, - 25: {70: 'l2vpn'} + 1: {1: "ipv4", 128: "vpnv4"}, + 2: {1: "ipv6", 128: "vpnv6"}, + 25: {70: "l2vpn"}, } try: - cmd = 'show bgp all summary vrf all' - vrf_list = self._get_command_table(cmd, 'TABLE_vrf', 'ROW_vrf') + cmd = "show bgp all summary vrf all" + vrf_list = self._get_command_table(cmd, "TABLE_vrf", "ROW_vrf") except NXAPICommandError: vrf_list = [] for vrf_dict in vrf_list: result_vrf_dict = { - 'router_id': py23_compat.text_type(vrf_dict['vrf-router-id']), - 'peers': {} + "router_id": py23_compat.text_type(vrf_dict["vrf-router-id"]), + "peers": {}, } - af_list = vrf_dict.get('TABLE_af', {}).get('ROW_af', []) + af_list = vrf_dict.get("TABLE_af", {}).get("ROW_af", []) if isinstance(af_list, dict): af_list = [af_list] for af_dict in af_list: - saf_dict = af_dict.get('TABLE_saf', {}).get('ROW_saf', {}) - neighbors_list = saf_dict.get('TABLE_neighbor', {}).get('ROW_neighbor', []) + saf_dict = af_dict.get("TABLE_saf", {}).get("ROW_saf", {}) + neighbors_list = saf_dict.get("TABLE_neighbor", {}).get( + "ROW_neighbor", [] + ) if isinstance(neighbors_list, dict): neighbors_list = [neighbors_list] for neighbor_dict in neighbors_list: - neighborid = napalm.base.helpers.ip(neighbor_dict['neighborid']) - remoteas = napalm.base.helpers.as_number(neighbor_dict['neighboras']) - state = py23_compat.text_type(neighbor_dict['state']) + neighborid = napalm.base.helpers.ip(neighbor_dict["neighborid"]) + remoteas = napalm.base.helpers.as_number( + neighbor_dict["neighboras"] + ) + state = py23_compat.text_type(neighbor_dict["state"]) bgp_state = bgp_state_dict[state] - afid_dict = af_name_dict[int(af_dict['af-id'])] - safi_name = afid_dict[int(saf_dict['safi'])] + afid_dict = af_name_dict[int(af_dict["af-id"])] + safi_name = afid_dict[int(saf_dict["safi"])] result_peer_dict = { - 'local_as': int(vrf_dict['vrf-local-as']), - 'remote_as': remoteas, - 'remote_id': neighborid, - 'is_enabled': bgp_state['is_enabled'], - 'uptime': -1, - 'description': '', - 'is_up': bgp_state['is_up'], - 'address_family': { - safi_name: { - 'sent_prefixes': -1, - 'accepted_prefixes': -1, - 'received_prefixes': int(neighbor_dict['prefixreceived']) + "local_as": int(vrf_dict["vrf-local-as"]), + "remote_as": remoteas, + "remote_id": neighborid, + "is_enabled": bgp_state["is_enabled"], + "uptime": -1, + "description": "", + "is_up": bgp_state["is_up"], + "address_family": { + safi_name: { + "sent_prefixes": -1, + "accepted_prefixes": -1, + "received_prefixes": int( + neighbor_dict["prefixreceived"] + ), } - } + }, } - result_vrf_dict['peers'][neighborid] = result_peer_dict + result_vrf_dict["peers"][neighborid] = result_peer_dict - vrf_name = vrf_dict['vrf-name-out'] - if vrf_name == 'default': - vrf_name = 'global' + vrf_name = vrf_dict["vrf-name-out"] + if vrf_name == "default": + vrf_name = "global" results[vrf_name] = result_vrf_dict return results - def get_lldp_neighbors_detail(self, interface=''): + def get_lldp_neighbors_detail(self, interface=""): lldp_neighbors = {} - filter = '' + filter = "" if interface: - filter = 'interface {name} '.format(name=interface) + filter = "interface {name} ".format(name=interface) - command = 'show lldp neighbors {filter}detail'.format(filter=filter) + command = "show lldp neighbors {filter}detail".format(filter=filter) # seems that some old devices may not return JSON output... try: @@ -848,15 +902,15 @@ def get_lldp_neighbors_detail(self, interface=''): if not lldp_neighbors_list: return lldp_neighbors # empty dict - CHASSIS_REGEX = r'^(Chassis id:)\s+([a-z0-9\.]+)$' - PORT_REGEX = r'^(Port id:)\s+([0-9]+)$' - LOCAL_PORT_ID_REGEX = r'^(Local Port id:)\s+(.*)$' - PORT_DESCR_REGEX = r'^(Port Description:)\s+(.*)$' - SYSTEM_NAME_REGEX = r'^(System Name:)\s+(.*)$' - SYSTEM_DESCR_REGEX = r'^(System Description:)\s+(.*)$' - SYST_CAPAB_REEGX = r'^(System Capabilities:)\s+(.*)$' - ENABL_CAPAB_REGEX = r'^(Enabled Capabilities:)\s+(.*)$' - VLAN_ID_REGEX = r'^(Vlan ID:)\s+(.*)$' + CHASSIS_REGEX = r"^(Chassis id:)\s+([a-z0-9\.]+)$" + PORT_REGEX = r"^(Port id:)\s+([0-9]+)$" + LOCAL_PORT_ID_REGEX = r"^(Local Port id:)\s+(.*)$" + PORT_DESCR_REGEX = r"^(Port Description:)\s+(.*)$" + SYSTEM_NAME_REGEX = r"^(System Name:)\s+(.*)$" + SYSTEM_DESCR_REGEX = r"^(System Description:)\s+(.*)$" + SYST_CAPAB_REEGX = r"^(System Capabilities:)\s+(.*)$" + ENABL_CAPAB_REGEX = r"^(Enabled Capabilities:)\s+(.*)$" + VLAN_ID_REGEX = r"^(Vlan ID:)\s+(.*)$" lldp_neighbor = {} interface_name = None @@ -865,13 +919,17 @@ def get_lldp_neighbors_detail(self, interface=''): chassis_rgx = re.search(CHASSIS_REGEX, line, re.I) if chassis_rgx: lldp_neighbor = { - 'remote_chassis_id': napalm.base.helpers.mac(chassis_rgx.groups()[1]) + "remote_chassis_id": napalm.base.helpers.mac( + chassis_rgx.groups()[1] + ) } continue - lldp_neighbor['parent_interface'] = '' + lldp_neighbor["parent_interface"] = "" port_rgx = re.search(PORT_REGEX, line, re.I) if port_rgx: - lldp_neighbor['parent_interface'] = py23_compat.text_type(port_rgx.groups()[1]) + lldp_neighbor["parent_interface"] = py23_compat.text_type( + port_rgx.groups()[1] + ) continue local_port_rgx = re.search(LOCAL_PORT_ID_REGEX, line, re.I) if local_port_rgx: @@ -879,29 +937,36 @@ def get_lldp_neighbors_detail(self, interface=''): continue port_descr_rgx = re.search(PORT_DESCR_REGEX, line, re.I) if port_descr_rgx: - lldp_neighbor['remote_port'] = py23_compat.text_type(port_descr_rgx.groups()[1]) - lldp_neighbor['remote_port_description'] = py23_compat.text_type( - port_descr_rgx.groups()[1]) + lldp_neighbor["remote_port"] = py23_compat.text_type( + port_descr_rgx.groups()[1] + ) + lldp_neighbor["remote_port_description"] = py23_compat.text_type( + port_descr_rgx.groups()[1] + ) continue syst_name_rgx = re.search(SYSTEM_NAME_REGEX, line, re.I) if syst_name_rgx: - lldp_neighbor['remote_system_name'] = py23_compat.text_type( - syst_name_rgx.groups()[1]) + lldp_neighbor["remote_system_name"] = py23_compat.text_type( + syst_name_rgx.groups()[1] + ) continue syst_descr_rgx = re.search(SYSTEM_DESCR_REGEX, line, re.I) if syst_descr_rgx: - lldp_neighbor['remote_system_description'] = py23_compat.text_type( - syst_descr_rgx.groups()[1]) + lldp_neighbor["remote_system_description"] = py23_compat.text_type( + syst_descr_rgx.groups()[1] + ) continue syst_capab_rgx = re.search(SYST_CAPAB_REEGX, line, re.I) if syst_capab_rgx: - lldp_neighbor['remote_system_capab'] = py23_compat.text_type( - syst_capab_rgx.groups()[1]) + lldp_neighbor["remote_system_capab"] = py23_compat.text_type( + syst_capab_rgx.groups()[1] + ) continue syst_enabled_rgx = re.search(ENABL_CAPAB_REGEX, line, re.I) if syst_enabled_rgx: - lldp_neighbor['remote_system_enable_capab'] = py23_compat.text_type( - syst_enabled_rgx.groups()[1]) + lldp_neighbor["remote_system_enable_capab"] = py23_compat.text_type( + syst_enabled_rgx.groups()[1] + ) continue vlan_rgx = re.search(VLAN_ID_REGEX, line, re.I) if vlan_rgx: @@ -914,7 +979,7 @@ def get_lldp_neighbors_detail(self, interface=''): def cli(self, commands): cli_output = {} if type(commands) is not list: - raise TypeError('Please enter a valid list of commands!') + raise TypeError("Please enter a valid list of commands!") for command in commands: command_output = self._send_command(command, raw_text=True) @@ -923,24 +988,24 @@ def cli(self, commands): def get_arp_table(self): arp_table = [] - command = 'show ip arp' - arp_table_vrf = self._get_command_table(command, 'TABLE_vrf', 'ROW_vrf') - arp_table_raw = self._get_table_rows(arp_table_vrf[0], 'TABLE_adj', 'ROW_adj') + command = "show ip arp" + arp_table_vrf = self._get_command_table(command, "TABLE_vrf", "ROW_vrf") + arp_table_raw = self._get_table_rows(arp_table_vrf[0], "TABLE_adj", "ROW_adj") for arp_table_entry in arp_table_raw: - raw_ip = arp_table_entry.get('ip-addr-out') - raw_mac = arp_table_entry.get('mac') - age = arp_table_entry.get('time-stamp') - if age == '-': + raw_ip = arp_table_entry.get("ip-addr-out") + raw_mac = arp_table_entry.get("mac") + age = arp_table_entry.get("time-stamp") + if age == "-": age_sec = -1.0 - elif ':' not in age: + elif ":" not in age: # Cisco sometimes returns a sub second arp time 0.411797 try: age_sec = float(age) except ValueError: age_sec = -1.0 else: - fields = age.split(':') + fields = age.split(":") if len(fields) == 3: try: fields = [float(x) for x in fields] @@ -950,300 +1015,321 @@ def get_arp_table(self): age_sec = -1.0 age_sec = round(age_sec, 1) - interface = py23_compat.text_type(arp_table_entry.get('intf-out')) - arp_table.append({ - 'interface': interface, - 'mac': napalm.base.helpers.convert( - napalm.base.helpers.mac, raw_mac, raw_mac), - 'ip': napalm.base.helpers.ip(raw_ip), - 'age': age_sec - }) + interface = py23_compat.text_type(arp_table_entry.get("intf-out")) + arp_table.append( + { + "interface": interface, + "mac": napalm.base.helpers.convert( + napalm.base.helpers.mac, raw_mac, raw_mac + ), + "ip": napalm.base.helpers.ip(raw_ip), + "age": age_sec, + } + ) return arp_table def _get_ntp_entity(self, peer_type): ntp_entities = {} - command = 'show ntp peers' - ntp_peers_table = self._get_command_table(command, 'TABLE_peers', 'ROW_peers') + command = "show ntp peers" + ntp_peers_table = self._get_command_table(command, "TABLE_peers", "ROW_peers") for ntp_peer in ntp_peers_table: - if ntp_peer.get('serv_peer', '').strip() != peer_type: + if ntp_peer.get("serv_peer", "").strip() != peer_type: continue - peer_addr = napalm.base.helpers.ip(ntp_peer.get('PeerIPAddress').strip()) + peer_addr = napalm.base.helpers.ip(ntp_peer.get("PeerIPAddress").strip()) ntp_entities[peer_addr] = {} return ntp_entities def get_ntp_peers(self): - return self._get_ntp_entity('Peer') + return self._get_ntp_entity("Peer") def get_ntp_servers(self): - return self._get_ntp_entity('Server') + return self._get_ntp_entity("Server") def get_ntp_stats(self): ntp_stats = [] - command = 'show ntp peer-status' - ntp_stats_table = self._get_command_table(command, 'TABLE_peersstatus', 'ROW_peersstatus') + command = "show ntp peer-status" + ntp_stats_table = self._get_command_table( + command, "TABLE_peersstatus", "ROW_peersstatus" + ) for ntp_peer in ntp_stats_table: - peer_address = napalm.base.helpers.ip(ntp_peer.get('remote').strip()) - syncmode = ntp_peer.get('syncmode') - stratum = int(ntp_peer.get('st')) - hostpoll = int(ntp_peer.get('poll')) - reachability = int(ntp_peer.get('reach')) - delay = float(ntp_peer.get('delay')) - ntp_stats.append({ - 'remote': peer_address, - 'synchronized': (syncmode == '*'), - 'referenceid': peer_address, - 'stratum': stratum, - 'type': '', - 'when': '', - 'hostpoll': hostpoll, - 'reachability': reachability, - 'delay': delay, - 'offset': 0.0, - 'jitter': 0.0 - }) + peer_address = napalm.base.helpers.ip(ntp_peer.get("remote").strip()) + syncmode = ntp_peer.get("syncmode") + stratum = int(ntp_peer.get("st")) + hostpoll = int(ntp_peer.get("poll")) + reachability = int(ntp_peer.get("reach")) + delay = float(ntp_peer.get("delay")) + ntp_stats.append( + { + "remote": peer_address, + "synchronized": (syncmode == "*"), + "referenceid": peer_address, + "stratum": stratum, + "type": "", + "when": "", + "hostpoll": hostpoll, + "reachability": reachability, + "delay": delay, + "offset": 0.0, + "jitter": 0.0, + } + ) return ntp_stats def get_interfaces_ip(self): interfaces_ip = {} - ipv4_command = 'show ip interface' - ipv4_interf_table_vrf = self._get_command_table(ipv4_command, 'TABLE_intf', 'ROW_intf') + ipv4_command = "show ip interface" + ipv4_interf_table_vrf = self._get_command_table( + ipv4_command, "TABLE_intf", "ROW_intf" + ) for interface in ipv4_interf_table_vrf: - interface_name = py23_compat.text_type(interface.get('intf-name', '')) - addr_str = interface.get('prefix') - unnumbered = py23_compat.text_type(interface.get('unnum-intf', '')) + interface_name = py23_compat.text_type(interface.get("intf-name", "")) + addr_str = interface.get("prefix") + unnumbered = py23_compat.text_type(interface.get("unnum-intf", "")) if addr_str: address = napalm.base.helpers.ip(addr_str) - prefix = int(interface.get('masklen', '')) + prefix = int(interface.get("masklen", "")) if interface_name not in interfaces_ip.keys(): interfaces_ip[interface_name] = {} - if 'ipv4' not in interfaces_ip[interface_name].keys(): - interfaces_ip[interface_name]['ipv4'] = {} - if address not in interfaces_ip[interface_name].get('ipv4'): - interfaces_ip[interface_name]['ipv4'][address] = {} - interfaces_ip[interface_name]['ipv4'][address].update({ - 'prefix_length': prefix - }) + if "ipv4" not in interfaces_ip[interface_name].keys(): + interfaces_ip[interface_name]["ipv4"] = {} + if address not in interfaces_ip[interface_name].get("ipv4"): + interfaces_ip[interface_name]["ipv4"][address] = {} + interfaces_ip[interface_name]["ipv4"][address].update( + {"prefix_length": prefix} + ) elif unnumbered: for interf in ipv4_interf_table_vrf: - interf_name = py23_compat.text_type(interf.get('intf-name', '')) + interf_name = py23_compat.text_type(interf.get("intf-name", "")) if interf_name == unnumbered: - address = napalm.base.helpers.ip(interf.get('prefix')) - prefix = int(interf.get('masklen', '')) + address = napalm.base.helpers.ip(interf.get("prefix")) + prefix = int(interf.get("masklen", "")) if interface_name not in interfaces_ip.keys(): interfaces_ip[interface_name] = {} - if 'ipv4' not in interfaces_ip[interface_name].keys(): - interfaces_ip[interface_name]['ipv4'] = {} - if address not in interfaces_ip[interface_name].get('ipv4'): - interfaces_ip[interface_name]['ipv4'][address] = {} - interfaces_ip[interface_name]['ipv4'][address].update({ - 'prefix_length': prefix - }) - - secondary_addresses = interface.get('TABLE_secondary_address', {}) \ - .get('ROW_secondary_address', []) + if "ipv4" not in interfaces_ip[interface_name].keys(): + interfaces_ip[interface_name]["ipv4"] = {} + if address not in interfaces_ip[interface_name].get("ipv4"): + interfaces_ip[interface_name]["ipv4"][address] = {} + interfaces_ip[interface_name]["ipv4"][address].update( + {"prefix_length": prefix} + ) + + secondary_addresses = interface.get("TABLE_secondary_address", {}).get( + "ROW_secondary_address", [] + ) if type(secondary_addresses) is dict: secondary_addresses = [secondary_addresses] for secondary_address in secondary_addresses: - secondary_address_ip = napalm.base.helpers.ip(secondary_address.get('prefix1')) - secondary_address_prefix = int(secondary_address.get('masklen1', '')) - if 'ipv4' not in interfaces_ip[interface_name].keys(): - interfaces_ip[interface_name]['ipv4'] = {} - if secondary_address_ip not in interfaces_ip[interface_name].get('ipv4'): - interfaces_ip[interface_name]['ipv4'][secondary_address_ip] = {} - interfaces_ip[interface_name]['ipv4'][secondary_address_ip].update({ - 'prefix_length': secondary_address_prefix - }) - - ipv6_command = 'show ipv6 interface' - ipv6_interf_table_vrf = self._get_command_table(ipv6_command, 'TABLE_intf', 'ROW_intf') + secondary_address_ip = napalm.base.helpers.ip( + secondary_address.get("prefix1") + ) + secondary_address_prefix = int(secondary_address.get("masklen1", "")) + if "ipv4" not in interfaces_ip[interface_name].keys(): + interfaces_ip[interface_name]["ipv4"] = {} + if secondary_address_ip not in interfaces_ip[interface_name].get( + "ipv4" + ): + interfaces_ip[interface_name]["ipv4"][secondary_address_ip] = {} + interfaces_ip[interface_name]["ipv4"][secondary_address_ip].update( + {"prefix_length": secondary_address_prefix} + ) + + ipv6_command = "show ipv6 interface" + ipv6_interf_table_vrf = self._get_command_table( + ipv6_command, "TABLE_intf", "ROW_intf" + ) for interface in ipv6_interf_table_vrf: - interface_name = py23_compat.text_type(interface.get('intf-name', '')) + interface_name = py23_compat.text_type(interface.get("intf-name", "")) if interface_name not in interfaces_ip.keys(): interfaces_ip[interface_name] = {} - if 'ipv6' not in interfaces_ip[interface_name].keys(): - interfaces_ip[interface_name]['ipv6'] = {} - - if type(interface.get('addr', '')) is list: - for ipv6_address in interface.get('addr', ''): - address = napalm.base.helpers.ip(ipv6_address.split('/')[0]) - prefix = int(ipv6_address.split('/')[-1]) - if address not in interfaces_ip[interface_name].get('ipv6'): - interfaces_ip[interface_name]['ipv6'][address] = {} - interfaces_ip[interface_name]['ipv6'][address].update({ - 'prefix_length': prefix - }) + if "ipv6" not in interfaces_ip[interface_name].keys(): + interfaces_ip[interface_name]["ipv6"] = {} + + if type(interface.get("addr", "")) is list: + for ipv6_address in interface.get("addr", ""): + address = napalm.base.helpers.ip(ipv6_address.split("/")[0]) + prefix = int(ipv6_address.split("/")[-1]) + if address not in interfaces_ip[interface_name].get("ipv6"): + interfaces_ip[interface_name]["ipv6"][address] = {} + interfaces_ip[interface_name]["ipv6"][address].update( + {"prefix_length": prefix} + ) else: - address = napalm.base.helpers.ip(interface.get('addr', '').split('/')[0]) - prefix = interface.get('prefix', '').split('/')[-1] + address = napalm.base.helpers.ip( + interface.get("addr", "").split("/")[0] + ) + prefix = interface.get("prefix", "").split("/")[-1] if prefix: - prefix = int(interface.get('prefix', '').split('/')[-1]) + prefix = int(interface.get("prefix", "").split("/")[-1]) else: prefix = 128 - if address not in interfaces_ip[interface_name].get('ipv6'): - interfaces_ip[interface_name]['ipv6'][address] = {} - interfaces_ip[interface_name]['ipv6'][address].update({ - 'prefix_length': prefix - }) + if address not in interfaces_ip[interface_name].get("ipv6"): + interfaces_ip[interface_name]["ipv6"][address] = {} + interfaces_ip[interface_name]["ipv6"][address].update( + {"prefix_length": prefix} + ) return interfaces_ip def get_mac_address_table(self): mac_table = [] - command = 'show mac address-table' - mac_table_raw = self._get_command_table(command, 'TABLE_mac_address', 'ROW_mac_address') + command = "show mac address-table" + mac_table_raw = self._get_command_table( + command, "TABLE_mac_address", "ROW_mac_address" + ) for mac_entry in mac_table_raw: - raw_mac = mac_entry.get('disp_mac_addr') - interface = py23_compat.text_type(mac_entry.get('disp_port')) - vlan = int(mac_entry.get('disp_vlan')) + raw_mac = mac_entry.get("disp_mac_addr") + interface = py23_compat.text_type(mac_entry.get("disp_port")) + vlan = int(mac_entry.get("disp_vlan")) active = True - static = (mac_entry.get('disp_is_static') != '0') + static = mac_entry.get("disp_is_static") != "0" moves = 0 last_move = 0.0 - mac_table.append({ - 'mac': napalm.base.helpers.mac(raw_mac), - 'interface': interface, - 'vlan': vlan, - 'active': active, - 'static': static, - 'moves': moves, - 'last_move': last_move - }) + mac_table.append( + { + "mac": napalm.base.helpers.mac(raw_mac), + "interface": interface, + "vlan": vlan, + "active": active, + "static": static, + "moves": moves, + "last_move": last_move, + } + ) return mac_table def get_snmp_information(self): snmp_information = {} - snmp_command = 'show running-config' - snmp_raw_output = self.cli([snmp_command]).get(snmp_command, '') - snmp_config = napalm.base.helpers.textfsm_extractor(self, 'snmp_config', snmp_raw_output) + snmp_command = "show running-config" + snmp_raw_output = self.cli([snmp_command]).get(snmp_command, "") + snmp_config = napalm.base.helpers.textfsm_extractor( + self, "snmp_config", snmp_raw_output + ) if not snmp_config: return snmp_information snmp_information = { - 'contact': py23_compat.text_type(''), - 'location': py23_compat.text_type(''), - 'community': {}, - 'chassis_id': py23_compat.text_type('') + "contact": py23_compat.text_type(""), + "location": py23_compat.text_type(""), + "community": {}, + "chassis_id": py23_compat.text_type(""), } for snmp_entry in snmp_config: - contact = py23_compat.text_type(snmp_entry.get('contact', '')) + contact = py23_compat.text_type(snmp_entry.get("contact", "")) if contact: - snmp_information['contact'] = contact - location = py23_compat.text_type(snmp_entry.get('location', '')) + snmp_information["contact"] = contact + location = py23_compat.text_type(snmp_entry.get("location", "")) if location: - snmp_information['location'] = location + snmp_information["location"] = location - community_name = py23_compat.text_type(snmp_entry.get('community', '')) + community_name = py23_compat.text_type(snmp_entry.get("community", "")) if not community_name: continue - if community_name not in snmp_information['community'].keys(): - snmp_information['community'][community_name] = { - 'acl': py23_compat.text_type(snmp_entry.get('acl', '')), - 'mode': py23_compat.text_type(snmp_entry.get('mode', '').lower()) + if community_name not in snmp_information["community"].keys(): + snmp_information["community"][community_name] = { + "acl": py23_compat.text_type(snmp_entry.get("acl", "")), + "mode": py23_compat.text_type(snmp_entry.get("mode", "").lower()), } else: - acl = py23_compat.text_type(snmp_entry.get('acl', '')) + acl = py23_compat.text_type(snmp_entry.get("acl", "")) if acl: - snmp_information['community'][community_name]['acl'] = acl - mode = py23_compat.text_type(snmp_entry.get('mode', '').lower()) + snmp_information["community"][community_name]["acl"] = acl + mode = py23_compat.text_type(snmp_entry.get("mode", "").lower()) if mode: - snmp_information['community'][community_name]['mode'] = mode + snmp_information["community"][community_name]["mode"] = mode return snmp_information def get_users(self): - _CISCO_TO_CISCO_MAP = { - 'network-admin': 15, - 'network-operator': 5 - } + _CISCO_TO_CISCO_MAP = {"network-admin": 15, "network-operator": 5} - _DEFAULT_USER_DICT = { - 'password': '', - 'level': 0, - 'sshkeys': [] - } + _DEFAULT_USER_DICT = {"password": "", "level": 0, "sshkeys": []} users = {} - command = 'show running-config' - section_username_raw_output = self.cli([command]).get(command, '') + command = "show running-config" + section_username_raw_output = self.cli([command]).get(command, "") section_username_tabled_output = napalm.base.helpers.textfsm_extractor( - self, 'users', section_username_raw_output) + self, "users", section_username_raw_output + ) for user in section_username_tabled_output: - username = user.get('username', '') + username = user.get("username", "") if not username: continue if username not in users: users[username] = _DEFAULT_USER_DICT.copy() - password = user.get('password', '') + password = user.get("password", "") if password: - users[username]['password'] = py23_compat.text_type(password.strip()) + users[username]["password"] = py23_compat.text_type(password.strip()) level = 0 - role = user.get('role', '') - if role.startswith('priv'): - level = int(role.split('-')[-1]) + role = user.get("role", "") + if role.startswith("priv"): + level = int(role.split("-")[-1]) else: level = _CISCO_TO_CISCO_MAP.get(role, 0) - if level > users.get(username).get('level'): + if level > users.get(username).get("level"): # unfortunately on Cisco you can set different priv levels for the same user # Good news though: the device will consider the highest level - users[username]['level'] = level + users[username]["level"] = level - sshkeytype = user.get('sshkeytype', '') - sshkeyvalue = user.get('sshkeyvalue', '') + sshkeytype = user.get("sshkeytype", "") + sshkeyvalue = user.get("sshkeyvalue", "") if sshkeytype and sshkeyvalue: - if sshkeytype not in ['ssh-rsa', 'ssh-dsa']: + if sshkeytype not in ["ssh-rsa", "ssh-dsa"]: continue - users[username]['sshkeys'].append(py23_compat.text_type(sshkeyvalue)) + users[username]["sshkeys"].append(py23_compat.text_type(sshkeyvalue)) return users - def get_network_instances(self, name=''): + def get_network_instances(self, name=""): """ get_network_instances implementation for NX-OS """ # command 'show vrf detail' returns all VRFs with detailed information # format: list of dictionaries with keys such as 'vrf_name' and 'rd' - command = u'show vrf detail' - vrf_table_raw = self._get_command_table(command, u'TABLE_vrf', u'ROW_vrf') + command = "show vrf detail" + vrf_table_raw = self._get_command_table(command, "TABLE_vrf", "ROW_vrf") # command 'show vrf interface' returns all interfaces including their assigned VRF # format: list of dictionaries with keys 'if_name', 'vrf_name', 'vrf_id' and 'soo' - command = u'show vrf interface' - intf_table_raw = self._get_command_table(command, u'TABLE_if', u'ROW_if') + command = "show vrf interface" + intf_table_raw = self._get_command_table(command, "TABLE_if", "ROW_if") # create a dictionary with key = 'vrf_name' and value = list of interfaces vrf_intfs = defaultdict(list) for intf in intf_table_raw: - vrf_intfs[intf[u'vrf_name']].append(py23_compat.text_type(intf['if_name'])) + vrf_intfs[intf["vrf_name"]].append(py23_compat.text_type(intf["if_name"])) vrfs = {} for vrf in vrf_table_raw: - vrf_name = py23_compat.text_type(vrf.get('vrf_name')) + vrf_name = py23_compat.text_type(vrf.get("vrf_name")) vrfs[vrf_name] = {} - vrfs[vrf_name][u'name'] = vrf_name + vrfs[vrf_name]["name"] = vrf_name # differentiate between VRF type 'DEFAULT_INSTANCE' and 'L3VRF' - if vrf_name == u'default': - vrfs[vrf_name][u'type'] = u'DEFAULT_INSTANCE' + if vrf_name == "default": + vrfs[vrf_name]["type"] = "DEFAULT_INSTANCE" else: - vrfs[vrf_name][u'type'] = u'L3VRF' + vrfs[vrf_name]["type"] = "L3VRF" - vrfs[vrf_name][u'state'] = {u'route_distinguisher': - py23_compat.text_type(vrf.get('rd'))} + vrfs[vrf_name]["state"] = { + "route_distinguisher": py23_compat.text_type(vrf.get("rd")) + } # convert list of interfaces (vrf_intfs[vrf_name]) to expected format # format = dict with key = interface name and empty values - vrfs[vrf_name][u'interfaces'] = {} - vrfs[vrf_name][u'interfaces'][u'interface'] = dict.fromkeys(vrf_intfs[vrf_name], {}) + vrfs[vrf_name]["interfaces"] = {} + vrfs[vrf_name]["interfaces"]["interface"] = dict.fromkeys( + vrf_intfs[vrf_name], {} + ) # if name of a specific VRF was passed as an argument # only return results for this particular VRF diff --git a/napalm/nxos_ssh/__init__.py b/napalm/nxos_ssh/__init__.py index 67cb8a654..18fdb6746 100644 --- a/napalm/nxos_ssh/__init__.py +++ b/napalm/nxos_ssh/__init__.py @@ -23,8 +23,8 @@ try: - __version__ = pkg_resources.get_distribution('napalm-nxos-ssh').version + __version__ = pkg_resources.get_distribution("napalm-nxos-ssh").version except pkg_resources.DistributionNotFound: __version__ = "Not installed" -__all__ = ('NXOSSSHDriver',) +__all__ = ("NXOSSSHDriver",) diff --git a/napalm/nxos_ssh/nxos_ssh.py b/napalm/nxos_ssh/nxos_ssh.py index 18e709890..b2ac66917 100644 --- a/napalm/nxos_ssh/nxos_ssh.py +++ b/napalm/nxos_ssh/nxos_ssh.py @@ -42,10 +42,14 @@ IPV4_ADDR_REGEX = IP_ADDR_REGEX IPV6_ADDR_REGEX_1 = r"::" IPV6_ADDR_REGEX_2 = r"[0-9a-fA-F:]{1,39}::[0-9a-fA-F:]{1,39}" -IPV6_ADDR_REGEX_3 = r"[0-9a-fA-F]{1,3}:[0-9a-fA-F]{1,3}:[0-9a-fA-F]{1,3}:[0-9a-fA-F]{1,3}:" \ - r"[0-9a-fA-F]{1,3}:[0-9a-fA-F]{1,3}:[0-9a-fA-F]{1,3}:[0-9a-fA-F]{1,3}" +IPV6_ADDR_REGEX_3 = ( + r"[0-9a-fA-F]{1,3}:[0-9a-fA-F]{1,3}:[0-9a-fA-F]{1,3}:[0-9a-fA-F]{1,3}:" + r"[0-9a-fA-F]{1,3}:[0-9a-fA-F]{1,3}:[0-9a-fA-F]{1,3}:[0-9a-fA-F]{1,3}" +) # Should validate IPv6 address using an IP address library after matching with this regex -IPV6_ADDR_REGEX = r"(?:{}|{}|{})".format(IPV6_ADDR_REGEX_1, IPV6_ADDR_REGEX_2, IPV6_ADDR_REGEX_3) +IPV6_ADDR_REGEX = r"(?:{}|{}|{})".format( + IPV6_ADDR_REGEX_1, IPV6_ADDR_REGEX_2, IPV6_ADDR_REGEX_3 +) IPV4_OR_IPV6_REGEX = r"(?:{}|{})".format(IPV4_ADDR_REGEX, IPV6_ADDR_REGEX) MAC_REGEX = r"[a-fA-F0-9]{4}\.[a-fA-F0-9]{4}\.[a-fA-F0-9]{4}" @@ -73,8 +77,10 @@ def parse_intf_section(interface): Ethernet154/1/48 is up (with no 'admin state') """ interface = interface.strip() - re_protocol = r"^(?P\S+?)\s+is\s+(?P.+?)" \ - r",\s+line\s+protocol\s+is\s+(?P\S+).*$" + re_protocol = ( + r"^(?P\S+?)\s+is\s+(?P.+?)" + r",\s+line\s+protocol\s+is\s+(?P\S+).*$" + ) re_intf_name_state = r"^(?P\S+) is (?P\S+).*" re_is_enabled_1 = r"^admin state is (?P\S+)$" re_is_enabled_2 = r"^admin state is (?P\S+), " @@ -88,22 +94,22 @@ def parse_intf_section(interface): # Check for 'protocol is ' lines match = re.search(re_protocol, interface, flags=re.M) if match: - intf_name = match.group('intf_name') - status = match.group('status') - protocol = match.group('protocol') + intf_name = match.group("intf_name") + status = match.group("status") + protocol = match.group("protocol") - if 'admin' in status.lower(): + if "admin" in status.lower(): is_enabled = False else: is_enabled = True - is_up = bool('up' in protocol) + is_up = bool("up" in protocol) else: # More standard is up, next line admin state is lines match = re.search(re_intf_name_state, interface) - intf_name = match.group('intf_name') - intf_state = match.group('intf_state').strip() - is_up = True if intf_state == 'up' else False + intf_name = match.group("intf_name") + intf_state = match.group("intf_state").strip() + is_up = True if intf_state == "up" else False admin_state_present = re.search("admin state is", interface) if admin_state_present: @@ -111,11 +117,13 @@ def parse_intf_section(interface): for x_pattern in [re_is_enabled_1, re_is_enabled_2]: match = re.search(x_pattern, interface, flags=re.M) if match: - is_enabled = match.group('is_enabled').strip() + is_enabled = match.group("is_enabled").strip() is_enabled = True if re.search("up", is_enabled) else False break else: - msg = "Error parsing intf, 'admin state' never detected:\n\n{}".format(interface) + msg = "Error parsing intf, 'admin state' never detected:\n\n{}".format( + interface + ) raise ValueError(msg) else: # No 'admin state' should be 'is up' or 'is down' strings @@ -128,7 +136,7 @@ def parse_intf_section(interface): match = re.search(re_mac, interface, flags=re.M) if match: - mac_address = match.group('mac_address') + mac_address = match.group("mac_address") mac_address = napalm.base.helpers.mac(mac_address) else: mac_address = "" @@ -136,38 +144,41 @@ def parse_intf_section(interface): match = re.search(re_hardware, interface, flags=re.M) speed_exist = True if match: - if match.group('hardware') == "NVE": + if match.group("hardware") == "NVE": speed_exist = False if speed_exist: match = re.search(re_speed, interface, flags=re.M) - speed = int(match.group('speed')) - speed_unit = match.group('speed_unit') + speed = int(match.group("speed")) + speed_unit = match.group("speed_unit") speed_unit = speed_unit.rstrip(",") # This was alway in Kbit (in the data I saw) if speed_unit != "Kbit": - msg = "Unexpected speed unit in show interfaces parsing:\n\n{}".format(interface) + msg = "Unexpected speed unit in show interfaces parsing:\n\n{}".format( + interface + ) raise ValueError(msg) speed = int(round(speed / 1000.0)) else: speed = -1 - description = '' + description = "" for x_pattern in [re_description_1, re_description_2]: match = re.search(x_pattern, interface, flags=re.M) if match: - description = match.group('description') + description = match.group("description") break return { - intf_name: { - 'description': description, - 'is_enabled': is_enabled, - 'is_up': is_up, - 'last_flapped': -1.0, - 'mac_address': mac_address, - 'speed': speed} - } + intf_name: { + "description": description, + "is_enabled": is_enabled, + "is_up": is_up, + "last_flapped": -1.0, + "mac_address": mac_address, + "speed": speed, + } + } def convert_hhmmss(hhmmss): @@ -197,20 +208,20 @@ def bgp_time_conversion(bgp_uptime): never """ bgp_uptime = bgp_uptime.strip() - uptime_letters = set(['w', 'h', 'd']) + uptime_letters = set(["w", "h", "d"]) - if 'never' in bgp_uptime: + if "never" in bgp_uptime: return -1 - elif ':' in bgp_uptime: + elif ":" in bgp_uptime: times = bgp_uptime.split(":") times = [int(x) for x in times] hours, minutes, seconds = times return (hours * 3600) + (minutes * 60) + seconds # Check if any letters 'w', 'h', 'd' are in the time string elif uptime_letters & set(bgp_uptime): - form1 = r'(\d+)d(\d+)h' # 1d17h - form2 = r'(\d+)w(\d+)d' # 8w5d - form3 = r'(\d+)y(\d+)w' # 1y28w + form1 = r"(\d+)d(\d+)h" # 1d17h + form2 = r"(\d+)w(\d+)d" # 8w5d + form3 = r"(\d+)y(\d+)w" # 1y28w match = re.search(form1, bgp_uptime) if match: days = int(match.group(1)) @@ -242,7 +253,7 @@ def bgp_normalize_table_data(bgp_table): bgp_table = bgp_table.strip() bgp_multiline_pattern = r"({})\s*\n".format(IPV4_OR_IPV6_REGEX) # Strip out the newline - return re.sub(bgp_multiline_pattern, r'\1', bgp_table) + return re.sub(bgp_multiline_pattern, r"\1", bgp_table) def bgp_table_parser(bgp_table): @@ -256,15 +267,38 @@ def bgp_table_parser(bgp_table): bgp_table_fields = bgp_entry.split() try: - if re.search(r'Shut.*Admin', bgp_entry): - (peer_ip, bgp_version, remote_as, msg_rcvd, msg_sent, _, _, _, - uptime, state_1, state_2) = bgp_table_fields + if re.search(r"Shut.*Admin", bgp_entry): + ( + peer_ip, + bgp_version, + remote_as, + msg_rcvd, + msg_sent, + _, + _, + _, + uptime, + state_1, + state_2, + ) = bgp_table_fields state_pfxrcd = "{} {}".format(state_1, state_2) else: - (peer_ip, bgp_version, remote_as, msg_rcvd, msg_sent, _, _, _, - uptime, state_pfxrcd) = bgp_table_fields + ( + peer_ip, + bgp_version, + remote_as, + msg_rcvd, + msg_sent, + _, + _, + _, + uptime, + state_pfxrcd, + ) = bgp_table_fields except ValueError: - raise ValueError("Unexpected entry ({}) in BGP summary table".format(bgp_table_fields)) + raise ValueError( + "Unexpected entry ({}) in BGP summary table".format(bgp_table_fields) + ) is_enabled = True try: @@ -273,7 +307,7 @@ def bgp_table_parser(bgp_table): except ValueError: received_prefixes = -1 is_up = False - if re.search(r'Shut.*Admin', state_pfxrcd): + if re.search(r"Shut.*Admin", state_pfxrcd): is_enabled = False if not is_up: @@ -301,11 +335,15 @@ def bgp_summary_parser(bgp_summary): if len(bgp_summary.strip().splitlines()) <= 1: return {} - allowed_afi = ['ipv4', 'ipv6', 'l2vpn'] + allowed_afi = ["ipv4", "ipv6", "l2vpn"] vrf_regex = r"^BGP summary information for VRF\s+(?P\S+)," - afi_regex = r"^BGP summary information.*address family (?P\S+ (?:Unicast|EVPN))" - local_router_regex = (r"^BGP router identifier\s+(?P\S+)" - r",\s+local AS number\s+(?P\S+)") + afi_regex = ( + r"^BGP summary information.*address family (?P\S+ (?:Unicast|EVPN))" + ) + local_router_regex = ( + r"^BGP router identifier\s+(?P\S+)" + r",\s+local AS number\s+(?P\S+)" + ) for pattern in [vrf_regex, afi_regex, local_router_regex]: match = re.search(pattern, bgp_summary, flags=re.M) @@ -313,36 +351,35 @@ def bgp_summary_parser(bgp_summary): bgp_summary_dict.update(match.groupdict(1)) # Some post regex cleanup and validation - vrf = bgp_summary_dict['vrf'] - if vrf.lower() == 'default': - bgp_summary_dict['vrf'] = 'global' + vrf = bgp_summary_dict["vrf"] + if vrf.lower() == "default": + bgp_summary_dict["vrf"] = "global" - afi = bgp_summary_dict['afi'] + afi = bgp_summary_dict["afi"] afi = afi.split()[0].lower() if afi not in allowed_afi: raise ValueError("AFI ({}) is invalid and not supported.".format(afi)) - bgp_summary_dict['afi'] = afi + bgp_summary_dict["afi"] = afi - local_as = bgp_summary_dict['local_as'] + local_as = bgp_summary_dict["local_as"] local_as = napalm.base.helpers.as_number(local_as) - match = re.search(IPV4_ADDR_REGEX, bgp_summary_dict['router_id']) + match = re.search(IPV4_ADDR_REGEX, bgp_summary_dict["router_id"]) if not match: - raise ValueError("BGP router_id ({}) is not valid".format(bgp_summary_dict['router_id'])) + raise ValueError( + "BGP router_id ({}) is not valid".format(bgp_summary_dict["router_id"]) + ) - vrf = bgp_summary_dict['vrf'] - bgp_return_dict = { - vrf: { - "router_id": bgp_summary_dict['router_id'], - "peers": {}, - } - } + vrf = bgp_summary_dict["vrf"] + bgp_return_dict = {vrf: {"router_id": bgp_summary_dict["router_id"], "peers": {}}} # Extract and process the tabular data tabular_divider = r"^Neighbor\s+.*PfxRcd$" tabular_data = re.split(tabular_divider, bgp_summary, flags=re.M) if len(tabular_data) != 2: - msg = "Unexpected data processing BGP summary information:\n\n{}".format(bgp_summary) + msg = "Unexpected data processing BGP summary information:\n\n{}".format( + bgp_summary + ) raise ValueError(msg) tabular_data = tabular_data[1] bgp_table = bgp_normalize_table_data(tabular_data) @@ -353,9 +390,11 @@ def bgp_summary_parser(bgp_summary): for neighbor, bgp_data in bgp_return_dict[vrf]["peers"].items(): received_prefixes = bgp_data.pop("received_prefixes") bgp_data["address_family"] = {} - prefixes_dict = {"sent_prefixes": -1, - "accepted_prefixes": -1, - "received_prefixes": received_prefixes} + prefixes_dict = { + "sent_prefixes": -1, + "accepted_prefixes": -1, + "received_prefixes": received_prefixes, + } bgp_data["address_family"][afi] = prefixes_dict bgp_data["local_as"] = local_as # FIX, hard-coding @@ -368,15 +407,15 @@ def bgp_summary_parser(bgp_summary): class NXOSSSHDriver(NXOSDriverBase): - def __init__(self, hostname, username, password, timeout=60, optional_args=None): - super().__init__(hostname, username, password, timeout=timeout, optional_args=optional_args) - self.platform = 'nxos_ssh' + super().__init__( + hostname, username, password, timeout=timeout, optional_args=optional_args + ) + self.platform = "nxos_ssh" def open(self): self.device = self._netmiko_open( - device_type='cisco_nxos', - netmiko_optional_args=self.netmiko_optional_args, + device_type="cisco_nxos", netmiko_optional_args=self.netmiko_optional_args ) def close(self): @@ -392,9 +431,11 @@ def _send_command(self, command, raw_text=False): def _send_command_list(self, commands): """Wrapper for Netmiko's send_command method (for list of commands.""" - output = '' + output = "" for command in commands: - output += self.device.send_command(command, strip_prompt=False, strip_command=False) + output += self.device.send_command( + command, strip_prompt=False, strip_command=False + ) return output def _send_config(self, commands): @@ -412,7 +453,7 @@ def parse_uptime(uptime_str): (years, weeks, days, hours, minutes) = (0, 0, 0, 0, 0) uptime_str = uptime_str.strip() - time_list = uptime_str.split(',') + time_list = uptime_str.split(",") for element in time_list: if re.search("year", element): years = int(element.split()[0]) @@ -427,8 +468,14 @@ def parse_uptime(uptime_str): elif re.search("second", element): seconds = int(element.split()[0]) - uptime_sec = (years * YEAR_SECONDS) + (weeks * WEEK_SECONDS) + (days * DAY_SECONDS) + \ - (hours * 3600) + (minutes * 60) + seconds + uptime_sec = ( + (years * YEAR_SECONDS) + + (weeks * WEEK_SECONDS) + + (days * DAY_SECONDS) + + (hours * 3600) + + (minutes * 60) + + seconds + ) return uptime_sec def is_alive(self): @@ -436,44 +483,44 @@ def is_alive(self): null = chr(0) try: if self.device is None: - return {'is_alive': False} + return {"is_alive": False} else: # Try sending ASCII null byte to maintain the connection alive self._send_command(null) except (socket.error, EOFError): # If unable to send, we can tell for sure that the connection is unusable, # hence return False. - return {'is_alive': False} - return { - 'is_alive': self.device.remote_conn.transport.is_active() - } + return {"is_alive": False} + return {"is_alive": self.device.remote_conn.transport.is_active()} def _copy_run_start(self): output = self.device.save_config() - if 'complete' in output.lower(): + if "complete" in output.lower(): return True else: - msg = 'Unable to save running-config to startup-config!' + msg = "Unable to save running-config to startup-config!" raise CommandErrorException(msg) def _load_cfg_from_checkpoint(self): - commands = ['terminal dont-ask', - 'rollback running-config file {}'.format(self.candidate_cfg), - 'no terminal dont-ask'] + commands = [ + "terminal dont-ask", + "rollback running-config file {}".format(self.candidate_cfg), + "no terminal dont-ask", + ] try: rollback_result = self._send_command_list(commands) finally: self.changed = True msg = rollback_result - if 'Rollback failed.' in msg: + if "Rollback failed." in msg: raise ReplaceConfigException(msg) def rollback(self): if self.changed: - command = 'rollback running-config file {}'.format(self.rollback_cfg) + command = "rollback running-config file {}".format(self.rollback_cfg) result = self._send_command(command) - if 'completed' not in result.lower(): + if "completed" not in result.lower(): raise ReplaceConfigException(result) self._copy_run_start() self.changed = False @@ -487,45 +534,45 @@ def _apply_key_map(self, key_map, table): return new_dict def _convert_uptime_to_seconds(self, uptime_facts): - seconds = int(uptime_facts['up_days']) * 24 * 60 * 60 - seconds += int(uptime_facts['up_hours']) * 60 * 60 - seconds += int(uptime_facts['up_mins']) * 60 - seconds += int(uptime_facts['up_secs']) + seconds = int(uptime_facts["up_days"]) * 24 * 60 * 60 + seconds += int(uptime_facts["up_hours"]) * 60 * 60 + seconds += int(uptime_facts["up_mins"]) * 60 + seconds += int(uptime_facts["up_secs"]) return seconds def get_facts(self): """Return a set of facts from the devices.""" # default values. - vendor = u'Cisco' + vendor = "Cisco" uptime = -1 - serial_number, fqdn, os_version, hostname, domain_name, model = ('',) * 6 + serial_number, fqdn, os_version, hostname, domain_name, model = ("",) * 6 # obtain output from device - show_ver = self._send_command('show version') - show_hosts = self._send_command('show hosts') - show_int_status = self._send_command('show interface status') - show_hostname = self._send_command('show hostname') + show_ver = self._send_command("show version") + show_hosts = self._send_command("show hosts") + show_int_status = self._send_command("show interface status") + show_hostname = self._send_command("show hostname") # uptime/serial_number/IOS version for line in show_ver.splitlines(): - if ' uptime is ' in line: - _, uptime_str = line.split(' uptime is ') + if " uptime is " in line: + _, uptime_str = line.split(" uptime is ") uptime = self.parse_uptime(uptime_str) - if 'Processor Board ID' in line: + if "Processor Board ID" in line: _, serial_number = line.split("Processor Board ID ") serial_number = serial_number.strip() - if 'system: ' in line or 'NXOS: ' in line: + if "system: " in line or "NXOS: " in line: line = line.strip() os_version = line.split()[2] os_version = os_version.strip() - if 'cisco' in line and 'hassis' in line: - match = re.search(r'.cisco (.*) \(', line) + if "cisco" in line and "hassis" in line: + match = re.search(r".cisco (.*) \(", line) if match: model = match.group(1).strip() - match = re.search(r'.cisco (.* [cC]hassis)', line) + match = re.search(r".cisco (.* [cC]hassis)", line) if match: model = match.group(1).strip() @@ -533,7 +580,7 @@ def get_facts(self): # Determine domain_name and fqdn for line in show_hosts.splitlines(): - if 'Default domain' in line: + if "Default domain" in line: _, domain_name = re.split(r".*Default domain.*is ", line) domain_name = domain_name.strip() break @@ -541,17 +588,18 @@ def get_facts(self): fqdn = hostname # Remove domain name from hostname if domain_name: - hostname = re.sub(re.escape(domain_name) + '$', '', hostname) - hostname = hostname.strip('.') + hostname = re.sub(re.escape(domain_name) + "$", "", hostname) + hostname = hostname.strip(".") elif domain_name: - fqdn = '{}.{}'.format(hostname, domain_name) + fqdn = "{}.{}".format(hostname, domain_name) # interface_list filter interface_list = [] show_int_status = show_int_status.strip() # Remove the header information - show_int_status = re.sub(r'(?:^---------+$|^Port .*$|^ .*$)', '', - show_int_status, flags=re.M) + show_int_status = re.sub( + r"(?:^---------+$|^Port .*$|^ .*$)", "", show_int_status, flags=re.M + ) for line in show_int_status.splitlines(): if not line: continue @@ -560,14 +608,14 @@ def get_facts(self): interface_list.append(canonical_interface_name(interface)) return { - 'uptime': int(uptime), - 'vendor': vendor, - 'os_version': py23_compat.text_type(os_version), - 'serial_number': py23_compat.text_type(serial_number), - 'model': py23_compat.text_type(model), - 'hostname': py23_compat.text_type(hostname), - 'fqdn': fqdn, - 'interface_list': interface_list + "uptime": int(uptime), + "vendor": vendor, + "os_version": py23_compat.text_type(os_version), + "serial_number": py23_compat.text_type(serial_number), + "model": py23_compat.text_type(model), + "hostname": py23_compat.text_type(hostname), + "fqdn": fqdn, + "interface_list": interface_list, } def get_interfaces(self): @@ -598,7 +646,7 @@ def get_interfaces(self): 'speed': 100}} """ interfaces = {} - command = 'show interface' + command = "show interface" output = self._send_command(command) if not output: return {} @@ -611,7 +659,9 @@ def get_interfaces(self): interface_lines = re.split(separators, output, flags=re.M) if len(interface_lines) == 1: - msg = "Unexpected output data in '{}':\n\n{}".format(command, interface_lines) + msg = "Unexpected output data in '{}':\n\n{}".format( + command, interface_lines + ) raise ValueError(msg) # Get rid of the blank data at the beginning @@ -619,13 +669,15 @@ def get_interfaces(self): # Must be pairs of data (the separator and section corresponding to it) if len(interface_lines) % 2 != 0: - msg = "Unexpected output data in '{}':\n\n{}".format(command, interface_lines) + msg = "Unexpected output data in '{}':\n\n{}".format( + command, interface_lines + ) raise ValueError(msg) # Combine the separator and section into one string intf_iter = iter(interface_lines) try: - new_interfaces = [line + next(intf_iter, '') for line in intf_iter] + new_interfaces = [line + next(intf_iter, "") for line in intf_iter] except TypeError: raise ValueError() @@ -636,19 +688,22 @@ def get_interfaces(self): def get_lldp_neighbors(self): results = {} - command = 'show lldp neighbors' + command = "show lldp neighbors" output = self._send_command(command) lldp_neighbors = napalm.base.helpers.textfsm_extractor( - self, 'lldp_neighbors', output) + self, "lldp_neighbors", output + ) for neighbor in lldp_neighbors: - local_iface = neighbor.get('local_interface') + local_iface = neighbor.get("local_interface") if neighbor.get(local_iface) is None: if local_iface not in results: results[local_iface] = [] - neighbor_dict = {'hostname': py23_compat.text_type(neighbor.get('neighbor')), - 'port': py23_compat.text_type(neighbor.get('neighbor_interface'))} + neighbor_dict = { + "hostname": py23_compat.text_type(neighbor.get("neighbor")), + "port": py23_compat.text_type(neighbor.get("neighbor_interface")), + } results[local_iface].append(neighbor_dict) return results @@ -684,7 +739,7 @@ def get_bgp_neighbors(self): bgp_dict = {} # get summary output from device - cmd_bgp_all_sum = 'show bgp all summary vrf all' + cmd_bgp_all_sum = "show bgp all summary vrf all" bgp_summary_output = self._send_command(cmd_bgp_all_sum).strip() section_separator = r"BGP summary information for " @@ -700,13 +755,13 @@ def get_bgp_neighbors(self): # FIX -- need to merge IPv6 and IPv4 AFI for same neighbor return bgp_dict - def get_lldp_neighbors_detail(self, interface=''): + def get_lldp_neighbors_detail(self, interface=""): lldp_neighbors = {} - filter = '' + filter = "" if interface: - filter = 'interface {name} '.format(name=interface) + filter = "interface {name} ".format(name=interface) - command = 'show lldp neighbors {filter}detail'.format(filter=filter) + command = "show lldp neighbors {filter}detail".format(filter=filter) # seems that some old devices may not return JSON output... output = self._send_command(command) @@ -716,15 +771,15 @@ def get_lldp_neighbors_detail(self, interface=''): if not lldp_neighbors_list: return lldp_neighbors # empty dict - CHASSIS_REGEX = r'^(Chassis id:)\s+([a-z0-9\.]+)$' - PORT_REGEX = r'^(Port id:)\s+([0-9]+)$' - LOCAL_PORT_ID_REGEX = r'^(Local Port id:)\s+(.*)$' - PORT_DESCR_REGEX = r'^(Port Description:)\s+(.*)$' - SYSTEM_NAME_REGEX = r'^(System Name:)\s+(.*)$' - SYSTEM_DESCR_REGEX = r'^(System Description:)\s+(.*)$' - SYST_CAPAB_REEGX = r'^(System Capabilities:)\s+(.*)$' - ENABL_CAPAB_REGEX = r'^(Enabled Capabilities:)\s+(.*)$' - VLAN_ID_REGEX = r'^(Vlan ID:)\s+(.*)$' + CHASSIS_REGEX = r"^(Chassis id:)\s+([a-z0-9\.]+)$" + PORT_REGEX = r"^(Port id:)\s+([0-9]+)$" + LOCAL_PORT_ID_REGEX = r"^(Local Port id:)\s+(.*)$" + PORT_DESCR_REGEX = r"^(Port Description:)\s+(.*)$" + SYSTEM_NAME_REGEX = r"^(System Name:)\s+(.*)$" + SYSTEM_DESCR_REGEX = r"^(System Description:)\s+(.*)$" + SYST_CAPAB_REEGX = r"^(System Capabilities:)\s+(.*)$" + ENABL_CAPAB_REGEX = r"^(Enabled Capabilities:)\s+(.*)$" + VLAN_ID_REGEX = r"^(Vlan ID:)\s+(.*)$" lldp_neighbor = {} interface_name = None @@ -733,13 +788,17 @@ def get_lldp_neighbors_detail(self, interface=''): chassis_rgx = re.search(CHASSIS_REGEX, line, re.I) if chassis_rgx: lldp_neighbor = { - 'remote_chassis_id': napalm.base.helpers.mac(chassis_rgx.groups()[1]) + "remote_chassis_id": napalm.base.helpers.mac( + chassis_rgx.groups()[1] + ) } continue - lldp_neighbor['parent_interface'] = '' + lldp_neighbor["parent_interface"] = "" port_rgx = re.search(PORT_REGEX, line, re.I) if port_rgx: - lldp_neighbor['parent_interface'] = py23_compat.text_type(port_rgx.groups()[1]) + lldp_neighbor["parent_interface"] = py23_compat.text_type( + port_rgx.groups()[1] + ) continue local_port_rgx = re.search(LOCAL_PORT_ID_REGEX, line, re.I) if local_port_rgx: @@ -747,29 +806,36 @@ def get_lldp_neighbors_detail(self, interface=''): continue port_descr_rgx = re.search(PORT_DESCR_REGEX, line, re.I) if port_descr_rgx: - lldp_neighbor['remote_port'] = py23_compat.text_type(port_descr_rgx.groups()[1]) - lldp_neighbor['remote_port_description'] = py23_compat.text_type( - port_descr_rgx.groups()[1]) + lldp_neighbor["remote_port"] = py23_compat.text_type( + port_descr_rgx.groups()[1] + ) + lldp_neighbor["remote_port_description"] = py23_compat.text_type( + port_descr_rgx.groups()[1] + ) continue syst_name_rgx = re.search(SYSTEM_NAME_REGEX, line, re.I) if syst_name_rgx: - lldp_neighbor['remote_system_name'] = py23_compat.text_type( - syst_name_rgx.groups()[1]) + lldp_neighbor["remote_system_name"] = py23_compat.text_type( + syst_name_rgx.groups()[1] + ) continue syst_descr_rgx = re.search(SYSTEM_DESCR_REGEX, line, re.I) if syst_descr_rgx: - lldp_neighbor['remote_system_description'] = py23_compat.text_type( - syst_descr_rgx.groups()[1]) + lldp_neighbor["remote_system_description"] = py23_compat.text_type( + syst_descr_rgx.groups()[1] + ) continue syst_capab_rgx = re.search(SYST_CAPAB_REEGX, line, re.I) if syst_capab_rgx: - lldp_neighbor['remote_system_capab'] = py23_compat.text_type( - syst_capab_rgx.groups()[1]) + lldp_neighbor["remote_system_capab"] = py23_compat.text_type( + syst_capab_rgx.groups()[1] + ) continue syst_enabled_rgx = re.search(ENABL_CAPAB_REGEX, line, re.I) if syst_enabled_rgx: - lldp_neighbor['remote_system_enable_capab'] = py23_compat.text_type( - syst_enabled_rgx.groups()[1]) + lldp_neighbor["remote_system_enable_capab"] = py23_compat.text_type( + syst_enabled_rgx.groups()[1] + ) continue vlan_rgx = re.search(VLAN_ID_REGEX, line, re.I) if vlan_rgx: @@ -782,7 +848,7 @@ def get_lldp_neighbors_detail(self, interface=''): def cli(self, commands): cli_output = {} if type(commands) is not list: - raise TypeError('Please enter a valid list of commands!') + raise TypeError("Please enter a valid list of commands!") for command in commands: output = self._send_command(command) @@ -817,7 +883,7 @@ def get_arp_table(self): """ arp_table = [] - command = 'show ip arp vrf default | exc INCOMPLETE' + command = "show ip arp vrf default | exc INCOMPLETE" output = self._send_command(command) separator = r"^Address\s+Age.*Interface.*$" @@ -834,9 +900,9 @@ def get_arp_table(self): else: raise ValueError("Unexpected output from: {}".format(line.split())) - if age == '-': + if age == "-": age = -1.0 - elif ':' not in age: + elif ":" not in age: # Cisco sometimes returns a sub second arp time 0.411797 try: age = float(age) @@ -853,22 +919,22 @@ def get_arp_table(self): if not re.search(RE_MAC, mac): raise ValueError("Invalid MAC Address detected: {}".format(mac)) entry = { - 'interface': interface, - 'mac': napalm.base.helpers.mac(mac), - 'ip': address, - 'age': age + "interface": interface, + "mac": napalm.base.helpers.mac(mac), + "ip": address, + "age": age, } arp_table.append(entry) return arp_table def _get_ntp_entity(self, peer_type): ntp_entities = {} - command = 'show ntp peers' + command = "show ntp peers" output = self._send_command(command) for line in output.splitlines(): # Skip first two lines and last line of command output - if line == "" or '-----' in line or 'Peer IP Address' in line: + if line == "" or "-----" in line or "Peer IP Address" in line: continue elif IPAddress(len(line.split()[0])).is_unicast: peer_addr = line.split()[0] @@ -879,15 +945,15 @@ def _get_ntp_entity(self, peer_type): return ntp_entities def get_ntp_peers(self): - return self._get_ntp_entity('Peer') + return self._get_ntp_entity("Peer") def get_ntp_servers(self): - return self._get_ntp_entity('Server') + return self._get_ntp_entity("Server") def __get_ntp_stats(self): ntp_stats = [] - command = 'show ntp peer-status' - output = self._send_command(command) # noqa + command = "show ntp peer-status" + output = self._send_command(command) # noqa return ntp_stats def get_interfaces_ip(self): @@ -921,8 +987,8 @@ def get_interfaces_ip(self): } """ interfaces_ip = {} - ipv4_command = 'show ip interface vrf default' - ipv6_command = 'show ipv6 interface vrf default' + ipv4_command = "show ip interface vrf default" + ipv6_command = "show ipv6 interface vrf default" output_v4 = self._send_command(ipv4_command) output_v6 = self._send_command(ipv6_command) @@ -931,16 +997,16 @@ def get_interfaces_ip(self): # Ethernet2/2, Interface status: protocol-up/link-up/admin-up, iod: 38, # IP address: 2.2.2.2, IP subnet: 2.2.2.0/27 route-preference: 0, tag: 0 # IP address: 3.3.3.3, IP subnet: 3.3.3.0/25 secondary route-preference: 0, tag: 0 - if 'Interface status' in line: - interface = line.split(',')[0] + if "Interface status" in line: + interface = line.split(",")[0] continue - if 'IP address' in line: - ip_address = line.split(',')[0].split()[2] + if "IP address" in line: + ip_address = line.split(",")[0].split()[2] try: - prefix_len = int(line.split()[5].split('/')[1]) + prefix_len = int(line.split()[5].split("/")[1]) except ValueError: - prefix_len = 'N/A' - val = {'prefix_length': prefix_len} + prefix_len = "N/A" + val = {"prefix_length": prefix_len} v4_interfaces.setdefault(interface, {})[ip_address] = val v6_interfaces = {} @@ -951,26 +1017,26 @@ def get_interfaces_ip(self): # 2001:cc11:22bb:0:2ec2:60ff:fe4f:feb2/64 [VALID] # IPv6 subnet: 2001::/24 # IPv6 link-local address: fe80::2ec2:60ff:fe4f:feb2 (default) [VALID] - if 'Interface status' in line: - interface = line.split(',')[0] + if "Interface status" in line: + interface = line.split(",")[0] continue - if 'VALID' in line: + if "VALID" in line: line = line.strip() - if 'link-local address' in line: + if "link-local address" in line: ip_address = line.split()[3] - prefix_len = '64' + prefix_len = "64" else: - ip_address, prefix_len = line.split()[0].split('/') + ip_address, prefix_len = line.split()[0].split("/") prefix_len = int(prefix_len) - val = {'prefix_length': prefix_len} + val = {"prefix_length": prefix_len} v6_interfaces.setdefault(interface, {})[ip_address] = val # Join data from intermediate dictionaries. for interface, data in v4_interfaces.items(): - interfaces_ip.setdefault(interface, {'ipv4': {}})['ipv4'] = data + interfaces_ip.setdefault(interface, {"ipv4": {}})["ipv4"] = data for interface, data in v6_interfaces.items(): - interfaces_ip.setdefault(interface, {'ipv6': {}})['ipv6'] = data + interfaces_ip.setdefault(interface, {"ipv6": {}})["ipv6"] = data return interfaces_ip @@ -1008,49 +1074,54 @@ def get_mac_address_table(self): """ # The '*' is stripped out later - RE_MACTABLE_FORMAT1 = r"^\s+{}\s+{}\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+".format(VLAN_REGEX, - MAC_REGEX) - RE_MACTABLE_FORMAT2 = r"^\s+{}\s+{}\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+".format('-', - MAC_REGEX) + RE_MACTABLE_FORMAT1 = r"^\s+{}\s+{}\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+".format( + VLAN_REGEX, MAC_REGEX + ) + RE_MACTABLE_FORMAT2 = r"^\s+{}\s+{}\s+\S+\s+\S+\s+\S+\s+\S+\s+\S+".format( + "-", MAC_REGEX + ) # REGEX dedicated for lines with only interfaces (suite of the previous MAC address) RE_MACTABLE_FORMAT3 = r"^\s+\S+" mac_address_table = [] - command = 'show mac address-table' - output = self._send_command(command) # noqa + command = "show mac address-table" + output = self._send_command(command) # noqa def remove_prefix(s, prefix): - return s[len(prefix):] if s.startswith(prefix) else s + return s[len(prefix) :] if s.startswith(prefix) else s def process_mac_fields(vlan, mac, mac_type, interface): """Return proper data for mac address fields.""" - if mac_type.lower() in ['self', 'static', 'system']: + if mac_type.lower() in ["self", "static", "system"]: static = True - if vlan.lower() == 'all': + if vlan.lower() == "all": vlan = 0 - elif vlan == '-': + elif vlan == "-": vlan = 0 - if interface.lower() == 'cpu' or re.search(r'router', interface.lower()) or \ - re.search(r'switch', interface.lower()): - interface = '' + if ( + interface.lower() == "cpu" + or re.search(r"router", interface.lower()) + or re.search(r"switch", interface.lower()) + ): + interface = "" else: static = False - if mac_type.lower() in ['dynamic']: + if mac_type.lower() in ["dynamic"]: active = True else: active = False return { - 'mac': napalm.base.helpers.mac(mac), - 'interface': interface, - 'vlan': int(vlan), - 'static': static, - 'active': active, - 'moves': -1, - 'last_move': -1.0 + "mac": napalm.base.helpers.mac(mac), + "interface": interface, + "vlan": int(vlan), + "static": static, + "active": active, + "moves": -1, + "last_move": -1.0, } # Skip the header lines - output = re.split(r'^----.*', output, flags=re.M)[1:] + output = re.split(r"^----.*", output, flags=re.M)[1:] output = "\n".join(output).strip() # Strip any leading characters output = re.sub(r"^[\*\+GOCE]", "", output, flags=re.M) @@ -1062,38 +1133,45 @@ def process_mac_fields(vlan, mac, mac_type, interface): for line in output.splitlines(): # Every 500 Mac's Legend is reprinted, regardless of terminal length - if re.search(r'^Legend', line): + if re.search(r"^Legend", line): continue - elif re.search(r'^\s+\* \- primary entry', line): + elif re.search(r"^\s+\* \- primary entry", line): continue - elif re.search(r'^\s+age \-', line): + elif re.search(r"^\s+age \-", line): continue - elif re.search(r'^\s+VLAN', line): + elif re.search(r"^\s+VLAN", line): continue - elif re.search(r'^------', line): + elif re.search(r"^------", line): continue - elif re.search(r'^\s*$', line): + elif re.search(r"^\s*$", line): continue - for pattern in [RE_MACTABLE_FORMAT1, RE_MACTABLE_FORMAT2, RE_MACTABLE_FORMAT3]: + for pattern in [ + RE_MACTABLE_FORMAT1, + RE_MACTABLE_FORMAT2, + RE_MACTABLE_FORMAT3, + ]: if re.search(pattern, line): fields = line.split() if len(fields) >= 7: vlan, mac, mac_type, _, _, _, interface = fields[:7] - mac_address_table.append(process_mac_fields(vlan, mac, mac_type, - interface)) + mac_address_table.append( + process_mac_fields(vlan, mac, mac_type, interface) + ) # there can be multiples interfaces for the same MAC on the same line for interface in fields[7:]: - mac_address_table.append(process_mac_fields(vlan, mac, mac_type, - interface)) + mac_address_table.append( + process_mac_fields(vlan, mac, mac_type, interface) + ) break # interfaces can overhang to the next line (line only contains interfaces) elif len(fields) < 7: for interface in fields: - mac_address_table.append(process_mac_fields(vlan, mac, mac_type, - interface)) + mac_address_table.append( + process_mac_fields(vlan, mac, mac_type, interface) + ) break else: raise ValueError("Unexpected output from: {}".format(repr(line))) @@ -1102,90 +1180,84 @@ def process_mac_fields(vlan, mac, mac_type, interface): def get_snmp_information(self): snmp_information = {} - command = 'show running-config' + command = "show running-config" output = self._send_command(command) - snmp_config = napalm.base.helpers.textfsm_extractor(self, 'snmp_config', output) + snmp_config = napalm.base.helpers.textfsm_extractor(self, "snmp_config", output) if not snmp_config: return snmp_information snmp_information = { - 'contact': py23_compat.text_type(''), - 'location': py23_compat.text_type(''), - 'community': {}, - 'chassis_id': py23_compat.text_type('') + "contact": py23_compat.text_type(""), + "location": py23_compat.text_type(""), + "community": {}, + "chassis_id": py23_compat.text_type(""), } for snmp_entry in snmp_config: - contact = py23_compat.text_type(snmp_entry.get('contact', '')) + contact = py23_compat.text_type(snmp_entry.get("contact", "")) if contact: - snmp_information['contact'] = contact - location = py23_compat.text_type(snmp_entry.get('location', '')) + snmp_information["contact"] = contact + location = py23_compat.text_type(snmp_entry.get("location", "")) if location: - snmp_information['location'] = location + snmp_information["location"] = location - community_name = py23_compat.text_type(snmp_entry.get('community', '')) + community_name = py23_compat.text_type(snmp_entry.get("community", "")) if not community_name: continue - if community_name not in snmp_information['community'].keys(): - snmp_information['community'][community_name] = { - 'acl': py23_compat.text_type(snmp_entry.get('acl', '')), - 'mode': py23_compat.text_type(snmp_entry.get('mode', '').lower()) + if community_name not in snmp_information["community"].keys(): + snmp_information["community"][community_name] = { + "acl": py23_compat.text_type(snmp_entry.get("acl", "")), + "mode": py23_compat.text_type(snmp_entry.get("mode", "").lower()), } else: - acl = py23_compat.text_type(snmp_entry.get('acl', '')) + acl = py23_compat.text_type(snmp_entry.get("acl", "")) if acl: - snmp_information['community'][community_name]['acl'] = acl - mode = py23_compat.text_type(snmp_entry.get('mode', '').lower()) + snmp_information["community"][community_name]["acl"] = acl + mode = py23_compat.text_type(snmp_entry.get("mode", "").lower()) if mode: - snmp_information['community'][community_name]['mode'] = mode + snmp_information["community"][community_name]["mode"] = mode return snmp_information def get_users(self): - _CISCO_TO_CISCO_MAP = { - 'network-admin': 15, - 'network-operator': 5 - } + _CISCO_TO_CISCO_MAP = {"network-admin": 15, "network-operator": 5} - _DEFAULT_USER_DICT = { - 'password': '', - 'level': 0, - 'sshkeys': [] - } + _DEFAULT_USER_DICT = {"password": "", "level": 0, "sshkeys": []} users = {} - command = 'show running-config' + command = "show running-config" output = self._send_command(command) section_username_tabled_output = napalm.base.helpers.textfsm_extractor( - self, 'users', output) + self, "users", output + ) for user in section_username_tabled_output: - username = user.get('username', '') + username = user.get("username", "") if not username: continue if username not in users: users[username] = _DEFAULT_USER_DICT.copy() - password = user.get('password', '') + password = user.get("password", "") if password: - users[username]['password'] = py23_compat.text_type(password.strip()) + users[username]["password"] = py23_compat.text_type(password.strip()) level = 0 - role = user.get('role', '') - if role.startswith('priv'): - level = int(role.split('-')[-1]) + role = user.get("role", "") + if role.startswith("priv"): + level = int(role.split("-")[-1]) else: level = _CISCO_TO_CISCO_MAP.get(role, 0) - if level > users.get(username).get('level'): + if level > users.get(username).get("level"): # unfortunately on Cisco you can set different priv levels for the same user # Good news though: the device will consider the highest level - users[username]['level'] = level + users[username]["level"] = level - sshkeytype = user.get('sshkeytype', '') - sshkeyvalue = user.get('sshkeyvalue', '') + sshkeytype = user.get("sshkeytype", "") + sshkeyvalue = user.get("sshkeyvalue", "") if sshkeytype and sshkeyvalue: - if sshkeytype not in ['ssh-rsa', 'ssh-dsa']: + if sshkeytype not in ["ssh-rsa", "ssh-dsa"]: continue - users[username]['sshkeys'].append(py23_compat.text_type(sshkeyvalue)) + users[username]["sshkeys"].append(py23_compat.text_type(sshkeyvalue)) return users diff --git a/setup.py b/setup.py index 82e0b751b..9d142e61e 100644 --- a/setup.py +++ b/setup.py @@ -5,37 +5,37 @@ reqs = [r for r in fs.read().splitlines() if (len(r) > 0 and not r.startswith("#"))] -__author__ = 'David Barroso ' +__author__ = "David Barroso " setup( name="napalm", - version='2.3.3', - packages=find_packages(exclude=("test*", )), - test_suite='test_base', + version="2.3.3", + packages=find_packages(exclude=("test*",)), + test_suite="test_base", author="David Barroso, Kirk Byers, Mircea Ulinic", author_email="dbarrosop@dravetech.com, ping@mirceaulinic.net, ktbyers@twb-tech.com", description="Network Automation and Programmability Abstraction Layer with Multivendor support", classifiers=[ - 'Topic :: Utilities', - 'Programming Language :: Python', - 'Programming Language :: Python :: 2', - 'Programming Language :: Python :: 2.7', - 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.4', - 'Programming Language :: Python :: 3.5', - 'Programming Language :: Python :: 3.6', - 'Operating System :: POSIX :: Linux', - 'Operating System :: MacOS', + "Topic :: Utilities", + "Programming Language :: Python", + "Programming Language :: Python :: 2", + "Programming Language :: Python :: 2.7", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.4", + "Programming Language :: Python :: 3.5", + "Programming Language :: Python :: 3.6", + "Operating System :: POSIX :: Linux", + "Operating System :: MacOS", ], url="https://github.com/napalm-automation/napalm", include_package_data=True, install_requires=reqs, entry_points={ - 'console_scripts': [ - 'cl_napalm_configure=napalm.base.clitools.cl_napalm_configure:main', - 'cl_napalm_test=napalm.base.clitools.cl_napalm_test:main', - 'cl_napalm_validate=napalm.base.clitools.cl_napalm_validate:main', - 'napalm=napalm.base.clitools.cl_napalm:main', - ], - } + "console_scripts": [ + "cl_napalm_configure=napalm.base.clitools.cl_napalm_configure:main", + "cl_napalm_test=napalm.base.clitools.cl_napalm_test:main", + "cl_napalm_validate=napalm.base.clitools.cl_napalm_validate:main", + "napalm=napalm.base.clitools.cl_napalm:main", + ] + }, ) diff --git a/test/base/test_get_network_driver.py b/test/base/test_get_network_driver.py index 4ff11f74f..0d17eaf93 100644 --- a/test/base/test_get_network_driver.py +++ b/test/base/test_get_network_driver.py @@ -21,7 +21,7 @@ def test_get_network_driver(self, driver): """Check that we can get the desired driver and is instance of NetworkDriver.""" self.assertTrue(issubclass(get_network_driver(driver), NetworkDriver)) - @data('fake', 'network', 'driver', 'sys', 1) + @data("fake", "network", "driver", "sys", 1) def test_get_wrong_network_driver(self, driver): """Check that inexisting driver throws ModuleImportError.""" self.assertRaises(ModuleImportError, get_network_driver, driver, prepend=False) diff --git a/test/base/test_helpers.py b/test/base/test_helpers.py index e3b43d006..e79d5cd35 100644 --- a/test/base/test_helpers.py +++ b/test/base/test_helpers.py @@ -14,24 +14,28 @@ # third party libs try: import jinja2 # noqa + HAS_JINJA = True except ImportError: HAS_JINJA = False try: import textfsm # noqa + HAS_TEXTFSM = True except ImportError: HAS_TEXTFSM = False try: from lxml import etree as ET + HAS_LXML = True except ImportError: HAS_LXML = False try: from netaddr.core import AddrFormatError + HAS_NETADDR = True except ImportError: HAS_NETADDR = False @@ -69,75 +73,95 @@ def test_load_template(self): """ self.assertTrue(HAS_JINJA) # firstly check if jinja2 is installed - _NTP_PEERS_LIST = [ - '172.17.17.1', - '172.17.17.2' - ] - _TEMPLATE_VARS = { - 'peers': _NTP_PEERS_LIST - } + _NTP_PEERS_LIST = ["172.17.17.1", "172.17.17.2"] + _TEMPLATE_VARS = {"peers": _NTP_PEERS_LIST} + + self.assertRaises( + napalm.base.exceptions.TemplateNotImplemented, + napalm.base.helpers.load_template, + self.network_driver, + "__this_template_does_not_exist__", + **_TEMPLATE_VARS + ) - self.assertRaises(napalm.base.exceptions.TemplateNotImplemented, - napalm.base.helpers.load_template, - self.network_driver, - '__this_template_does_not_exist__', - **_TEMPLATE_VARS) - - self.assertTrue(napalm.base.helpers.load_template(self.network_driver, - '__empty_template__', - **_TEMPLATE_VARS)) - - self.assertRaises(napalm.base.exceptions.TemplateRenderException, - napalm.base.helpers.load_template, - self.network_driver, - '__completely_wrong_template__', - **_TEMPLATE_VARS) - - self.assertTrue(napalm.base.helpers.load_template(self.network_driver, - '__a_very_nice_template__', - **_TEMPLATE_VARS)) - - self.assertRaises(IOError, - napalm.base.helpers.load_template, - self.network_driver, - '__a_very_nice_template__', - template_path='/this/path/does/not/exist', - **_TEMPLATE_VARS) + self.assertTrue( + napalm.base.helpers.load_template( + self.network_driver, "__empty_template__", **_TEMPLATE_VARS + ) + ) - install_dir = os.path.dirname( - os.path.abspath(sys.modules[self.network_driver.__module__].__file__)) - custom_path = os.path.join(install_dir, '../custom/path/base') + self.assertRaises( + napalm.base.exceptions.TemplateRenderException, + napalm.base.helpers.load_template, + self.network_driver, + "__completely_wrong_template__", + **_TEMPLATE_VARS + ) - self.assertRaises(napalm.base.exceptions.TemplateNotImplemented, - napalm.base.helpers.load_template, - self.network_driver, - '__this_template_does_not_exist__', - template_path=custom_path, - **_TEMPLATE_VARS) + self.assertTrue( + napalm.base.helpers.load_template( + self.network_driver, "__a_very_nice_template__", **_TEMPLATE_VARS + ) + ) - self.assertTrue(napalm.base.helpers.load_template(self.network_driver, - '__a_very_nice_template__', - template_path=custom_path, - **_TEMPLATE_VARS)) + self.assertRaises( + IOError, + napalm.base.helpers.load_template, + self.network_driver, + "__a_very_nice_template__", + template_path="/this/path/does/not/exist", + **_TEMPLATE_VARS + ) - template_source = '{% for peer in peers %}ntp peer {{peer}}\n{% endfor %}' + install_dir = os.path.dirname( + os.path.abspath(sys.modules[self.network_driver.__module__].__file__) + ) + custom_path = os.path.join(install_dir, "../custom/path/base") + + self.assertRaises( + napalm.base.exceptions.TemplateNotImplemented, + napalm.base.helpers.load_template, + self.network_driver, + "__this_template_does_not_exist__", + template_path=custom_path, + **_TEMPLATE_VARS + ) - self.assertTrue(napalm.base.helpers.load_template(self.network_driver, - '_this_still_needs_a_name', - template_source=template_source, - **_TEMPLATE_VARS)) + self.assertTrue( + napalm.base.helpers.load_template( + self.network_driver, + "__a_very_nice_template__", + template_path=custom_path, + **_TEMPLATE_VARS + ) + ) + + template_source = "{% for peer in peers %}ntp peer {{peer}}\n{% endfor %}" + + self.assertTrue( + napalm.base.helpers.load_template( + self.network_driver, + "_this_still_needs_a_name", + template_source=template_source, + **_TEMPLATE_VARS + ) + ) def __foo_to_bar(s): if s == "foo": return "bar" return s - jinja_filters = {'foo_to_bar': __foo_to_bar} + jinja_filters = {"foo_to_bar": __foo_to_bar} - self.assertTrue(napalm.base.helpers.load_template(self.network_driver, - '__custom_jinja_filter_template__', - jinja_filters=jinja_filters, - **_TEMPLATE_VARS)) + self.assertTrue( + napalm.base.helpers.load_template( + self.network_driver, + "__custom_jinja_filter_template__", + jinja_filters=jinja_filters, + **_TEMPLATE_VARS + ) + ) # MIGRATION mircea # self.assertRaisesRegexp(napalm.base.exceptions.TemplateNotImplemented, # "path.*napalm-base/test/unit/templates'" + @@ -157,8 +181,10 @@ def test_textfsm_extractor(self): * check if returns a non-empty list as output """ - self.assertTrue(HAS_TEXTFSM) # before anything else, let's see if TextFSM is available - _TEXTFSM_TEST_STRING = ''' + self.assertTrue( + HAS_TEXTFSM + ) # before anything else, let's see if TextFSM is available + _TEXTFSM_TEST_STRING = """ Groups: 3 Peers: 3 Down peers: 0 Table Tot Paths Act Paths Suppressed History Damp State Pending inet.0 947 310 0 0 0 0 @@ -171,30 +197,38 @@ def test_textfsm_extractor(self): inet.0: 0/0/0 inet6.0: 7/8/1 192.0.2.100 65551 1269381 1363320 0 1 9w5d6h 2/3/0 0/0/0 - ''' - - self.assertRaises(napalm.base.exceptions.TemplateNotImplemented, - napalm.base.helpers.textfsm_extractor, - self.network_driver, - '__this_template_does_not_exist__', - _TEXTFSM_TEST_STRING) - - self.assertRaises(napalm.base.exceptions.TemplateRenderException, - napalm.base.helpers.textfsm_extractor, - self.network_driver, - '__empty_template__', - _TEXTFSM_TEST_STRING) - - self.assertRaises(napalm.base.exceptions.TemplateRenderException, - napalm.base.helpers.textfsm_extractor, - self.network_driver, - '__completely_wrong_template__', - _TEXTFSM_TEST_STRING) - - self.assertIsInstance(napalm.base.helpers.textfsm_extractor(self.network_driver, - '__a_very_nice_template__', - _TEXTFSM_TEST_STRING), - list) + """ + + self.assertRaises( + napalm.base.exceptions.TemplateNotImplemented, + napalm.base.helpers.textfsm_extractor, + self.network_driver, + "__this_template_does_not_exist__", + _TEXTFSM_TEST_STRING, + ) + + self.assertRaises( + napalm.base.exceptions.TemplateRenderException, + napalm.base.helpers.textfsm_extractor, + self.network_driver, + "__empty_template__", + _TEXTFSM_TEST_STRING, + ) + + self.assertRaises( + napalm.base.exceptions.TemplateRenderException, + napalm.base.helpers.textfsm_extractor, + self.network_driver, + "__completely_wrong_template__", + _TEXTFSM_TEST_STRING, + ) + + self.assertIsInstance( + napalm.base.helpers.textfsm_extractor( + self.network_driver, "__a_very_nice_template__", _TEXTFSM_TEST_STRING + ), + list, + ) def test_convert(self): """ @@ -204,13 +238,15 @@ def test_convert(self): * cast of str to float returns desired float-type value * cast of None obj to string does not cast, but returns default """ - self.assertTrue(napalm.base.helpers.convert(int, 'non-int-value', default=-100) == -100) + self.assertTrue( + napalm.base.helpers.convert(int, "non-int-value", default=-100) == -100 + ) # default value returned - self.assertIsInstance(napalm.base.helpers.convert(float, '1e-17'), float) + self.assertIsInstance(napalm.base.helpers.convert(float, "1e-17"), float) # converts indeed to float - self.assertFalse(napalm.base.helpers.convert(str, None) == 'None') + self.assertFalse(napalm.base.helpers.convert(str, None) == "None") # should not convert None-type to 'None' string - self.assertTrue(napalm.base.helpers.convert(str, None) == u'') + self.assertTrue(napalm.base.helpers.convert(str, None) == "") # should return empty unicode def test_find_txt(self): @@ -226,7 +262,7 @@ def test_find_txt(self): self.assertTrue(HAS_LXML) # firstly check if lxml is installed - _XML_STRING = ''' + _XML_STRING = """ @@ -253,42 +289,52 @@ def test_find_txt(self): 4 - ''' + """ _XML_TREE = ET.fromstring(_XML_STRING) - self.assertFalse(napalm.base.helpers.find_txt(_XML_TREE, 'parent100/child200', False)) + self.assertFalse( + napalm.base.helpers.find_txt(_XML_TREE, "parent100/child200", False) + ) # returns default value (in this case boolean value False) # check if content inside the tag /parent1/child1 - self.assertTrue(len(napalm.base.helpers.find_txt(_XML_TREE, 'parent1/child1')) > 0) + self.assertTrue( + len(napalm.base.helpers.find_txt(_XML_TREE, "parent1/child1")) > 0 + ) # check if able to eval boolean returned as text inside the XML tree self.assertTrue( - eval(napalm.base.helpers.find_txt(_XML_TREE, 'parent3/@lonely', 'false').title())) + eval( + napalm.base.helpers.find_txt( + _XML_TREE, "parent3/@lonely", "false" + ).title() + ) + ) # int values self.assertIsInstance( - int(napalm.base.helpers.find_txt(_XML_TREE, 'stats/parents')), int) + int(napalm.base.helpers.find_txt(_XML_TREE, "stats/parents")), int + ) # get first match of the tag child3, wherever would be - _CHILD3_TAG = _XML_TREE.find('.//child3') + _CHILD3_TAG = _XML_TREE.find(".//child3") # check if content inside the discovered tag child3 - self.assertTrue(len(napalm.base.helpers.find_txt(_CHILD3_TAG, '.')) > 0) + self.assertTrue(len(napalm.base.helpers.find_txt(_CHILD3_TAG, ".")) > 0) _SPECIAL_CHILD2 = _XML_TREE.find('.//child2[@special="true"]') - self.assertTrue(len(napalm.base.helpers.find_txt(_SPECIAL_CHILD2, '.')) > 0) + self.assertTrue(len(napalm.base.helpers.find_txt(_SPECIAL_CHILD2, ".")) > 0) _SPECIAL_CHILD100 = _XML_TREE.find('.//child100[@special="true"]') - self.assertFalse(len(napalm.base.helpers.find_txt(_SPECIAL_CHILD100, '.')) > 0) + self.assertFalse(len(napalm.base.helpers.find_txt(_SPECIAL_CHILD100, ".")) > 0) _NOT_SPECIAL_CHILD2 = _XML_TREE.xpath('.//child2[not(@special="true")]')[0] # use XPath to get tags using predicates! - self.assertTrue(len(napalm.base.helpers.find_txt(_NOT_SPECIAL_CHILD2, '.')) > 0) + self.assertTrue(len(napalm.base.helpers.find_txt(_NOT_SPECIAL_CHILD2, ".")) > 0) def test_mac(self): @@ -302,11 +348,11 @@ def test_mac(self): self.assertTrue(HAS_NETADDR) # test that raises AddrFormatError when wrong format - self.assertRaises(AddrFormatError, napalm.base.helpers.mac, 'fake') + self.assertRaises(AddrFormatError, napalm.base.helpers.mac, "fake") - self.assertEqual(napalm.base.helpers.mac('0123456789ab'), '01:23:45:67:89:AB') - self.assertEqual(napalm.base.helpers.mac('0123.4567.89ab'), '01:23:45:67:89:AB') - self.assertEqual(napalm.base.helpers.mac('123.4567.89ab'), '01:23:45:67:89:AB') + self.assertEqual(napalm.base.helpers.mac("0123456789ab"), "01:23:45:67:89:AB") + self.assertEqual(napalm.base.helpers.mac("0123.4567.89ab"), "01:23:45:67:89:AB") + self.assertEqual(napalm.base.helpers.mac("123.4567.89ab"), "01:23:45:67:89:AB") def test_ip(self): """ @@ -320,27 +366,29 @@ def test_ip(self): self.assertTrue(HAS_NETADDR) # test that raises AddrFormatError when wrong format - self.assertRaises(AddrFormatError, napalm.base.helpers.ip, 'fake') - self.assertRaises(ValueError, napalm.base.helpers.ip, '2001:db8:85a3::8a2e:370:7334', - version=4) - self.assertRaises(ValueError, napalm.base.helpers.ip, '192.168.17.1', - version=6) + self.assertRaises(AddrFormatError, napalm.base.helpers.ip, "fake") + self.assertRaises( + ValueError, + napalm.base.helpers.ip, + "2001:db8:85a3::8a2e:370:7334", + version=4, + ) + self.assertRaises(ValueError, napalm.base.helpers.ip, "192.168.17.1", version=6) self.assertEqual( - napalm.base.helpers.ip('2001:0dB8:85a3:0000:0000:8A2e:0370:7334'), - '2001:db8:85a3::8a2e:370:7334' + napalm.base.helpers.ip("2001:0dB8:85a3:0000:0000:8A2e:0370:7334"), + "2001:db8:85a3::8a2e:370:7334", ) self.assertEqual( - napalm.base.helpers.ip('2001:0DB8::0003', version=6), - '2001:db8::3' + napalm.base.helpers.ip("2001:0DB8::0003", version=6), "2001:db8::3" ) def test_as_number(self): """Test the as_number helper function.""" - self.assertEqual(napalm.base.helpers.as_number('64001'), 64001) - self.assertEqual(napalm.base.helpers.as_number('1.0'), 65536) - self.assertEqual(napalm.base.helpers.as_number('1.100'), 65636) - self.assertEqual(napalm.base.helpers.as_number('1.65535'), 131071) - self.assertEqual(napalm.base.helpers.as_number('65535.65535'), 4294967295) + self.assertEqual(napalm.base.helpers.as_number("64001"), 64001) + self.assertEqual(napalm.base.helpers.as_number("1.0"), 65536) + self.assertEqual(napalm.base.helpers.as_number("1.100"), 65636) + self.assertEqual(napalm.base.helpers.as_number("1.65535"), 131071) + self.assertEqual(napalm.base.helpers.as_number("65535.65535"), 4294967295) self.assertEqual(napalm.base.helpers.as_number(64001), 64001) def test_convert_uptime_string_seconds(self): @@ -351,182 +399,246 @@ def test_convert_uptime_string_seconds(self): """ # Regex 1 - self.assertEqual(convert_uptime_string_seconds('24 days, 11 hours, 25 minutes'), 2114700) - self.assertEqual(convert_uptime_string_seconds('1 hour, 5 minutes'), 3900) - self.assertEqual(convert_uptime_string_seconds('1 year, 2 weeks, 5 minutes'), 32745900) self.assertEqual( - convert_uptime_string_seconds('95 weeks, 2 days, 10 hours, 58 minutes'), 57668280) + convert_uptime_string_seconds("24 days, 11 hours, 25 minutes"), 2114700 + ) + self.assertEqual(convert_uptime_string_seconds("1 hour, 5 minutes"), 3900) self.assertEqual( - convert_uptime_string_seconds('26 weeks, 2 days, 7 hours, 7 minutes'), 15923220) + convert_uptime_string_seconds("1 year, 2 weeks, 5 minutes"), 32745900 + ) self.assertEqual( - convert_uptime_string_seconds('19 weeks, 2 days, 2 hours, 2 minutes'), 11671320) + convert_uptime_string_seconds("95 weeks, 2 days, 10 hours, 58 minutes"), + 57668280, + ) self.assertEqual( - convert_uptime_string_seconds('15 weeks, 3 days, 5 hours, 57 minutes'), 9352620) + convert_uptime_string_seconds("26 weeks, 2 days, 7 hours, 7 minutes"), + 15923220, + ) self.assertEqual( - convert_uptime_string_seconds('1 year, 8 weeks, 15 minutes'), 36375300) + convert_uptime_string_seconds("19 weeks, 2 days, 2 hours, 2 minutes"), + 11671320, + ) + self.assertEqual( + convert_uptime_string_seconds("15 weeks, 3 days, 5 hours, 57 minutes"), + 9352620, + ) self.assertEqual( - convert_uptime_string_seconds('8 weeks, 2 hours, 5 minutes'), 4845900) + convert_uptime_string_seconds("1 year, 8 weeks, 15 minutes"), 36375300 + ) + self.assertEqual( + convert_uptime_string_seconds("8 weeks, 2 hours, 5 minutes"), 4845900 + ) self.assertEqual( - convert_uptime_string_seconds('8 weeks, 2 hours, 1 minute'), 4845660) + convert_uptime_string_seconds("8 weeks, 2 hours, 1 minute"), 4845660 + ) self.assertEqual( - convert_uptime_string_seconds('2 years, 40 weeks, 1 day, 22 hours, 3 minutes'), - 87429780) + convert_uptime_string_seconds( + "2 years, 40 weeks, 1 day, 22 hours, 3 minutes" + ), + 87429780, + ) self.assertEqual( - convert_uptime_string_seconds('2 years, 40 weeks, 1 day, 19 hours, 46 minutes'), - 87421560) + convert_uptime_string_seconds( + "2 years, 40 weeks, 1 day, 19 hours, 46 minutes" + ), + 87421560, + ) self.assertEqual( - convert_uptime_string_seconds('1 year, 39 weeks, 15 hours, 23 minutes'), 55178580) + convert_uptime_string_seconds("1 year, 39 weeks, 15 hours, 23 minutes"), + 55178580, + ) self.assertEqual( - convert_uptime_string_seconds('33 weeks, 19 hours, 12 minutes'), 20027520) + convert_uptime_string_seconds("33 weeks, 19 hours, 12 minutes"), 20027520 + ) self.assertEqual( - convert_uptime_string_seconds('33 weeks, 19 hours, 8 minutes'), 20027280) + convert_uptime_string_seconds("33 weeks, 19 hours, 8 minutes"), 20027280 + ) self.assertEqual( - convert_uptime_string_seconds('33 weeks, 19 hours, 10 minutes'), 20027400) + convert_uptime_string_seconds("33 weeks, 19 hours, 10 minutes"), 20027400 + ) self.assertEqual( - convert_uptime_string_seconds('51 weeks, 5 days, 13 hours, 0 minutes'), 31323600) + convert_uptime_string_seconds("51 weeks, 5 days, 13 hours, 0 minutes"), + 31323600, + ) self.assertEqual( - convert_uptime_string_seconds('51 weeks, 5 days, 12 hours, 57 minutes'), 31323420) + convert_uptime_string_seconds("51 weeks, 5 days, 12 hours, 57 minutes"), + 31323420, + ) self.assertEqual( - convert_uptime_string_seconds('51 weeks, 5 days, 12 hours, 55 minutes'), 31323300) + convert_uptime_string_seconds("51 weeks, 5 days, 12 hours, 55 minutes"), + 31323300, + ) self.assertEqual( - convert_uptime_string_seconds('51 weeks, 5 days, 12 hours, 58 minutes'), 31323480) + convert_uptime_string_seconds("51 weeks, 5 days, 12 hours, 58 minutes"), + 31323480, + ) # Regex 2 - self.assertEqual(convert_uptime_string_seconds('114 days, 22:27:32'), 9930452) - self.assertEqual(convert_uptime_string_seconds('0 days, 22:27:32'), 80852) - self.assertEqual(convert_uptime_string_seconds('365 days, 5:01:44'), 31554104) + self.assertEqual(convert_uptime_string_seconds("114 days, 22:27:32"), 9930452) + self.assertEqual(convert_uptime_string_seconds("0 days, 22:27:32"), 80852) + self.assertEqual(convert_uptime_string_seconds("365 days, 5:01:44"), 31554104) # Regex 3 - self.assertEqual(convert_uptime_string_seconds('7w6d5h4m3s'), 4770243) - self.assertEqual(convert_uptime_string_seconds('95w2d10h58m'), 57668280) - self.assertEqual(convert_uptime_string_seconds('1h5m'), 3900) + self.assertEqual(convert_uptime_string_seconds("7w6d5h4m3s"), 4770243) + self.assertEqual(convert_uptime_string_seconds("95w2d10h58m"), 57668280) + self.assertEqual(convert_uptime_string_seconds("1h5m"), 3900) def test_canonical_interface_name(self): """Test the canonical_interface_name helper function.""" - self.assertEqual(napalm.base.helpers.canonical_interface_name('Fa0/1'), "FastEthernet0/1") - self.assertEqual(napalm.base.helpers.canonical_interface_name('FastEthernet0/1'), - 'FastEthernet0/1') - self.assertEqual(napalm.base.helpers.canonical_interface_name('TenGig1/1/1.5'), - "TenGigabitEthernet1/1/1.5") - self.assertEqual(napalm.base.helpers.canonical_interface_name('Gi1/2'), - "GigabitEthernet1/2") - self.assertEqual(napalm.base.helpers.canonical_interface_name('HundredGigE105/1/1'), - "HundredGigabitEthernet105/1/1") - self.assertEqual(napalm.base.helpers.canonical_interface_name('Lo0'), "Loopback0") - self.assertEqual(napalm.base.helpers.canonical_interface_name('lo0'), "Loopback0") - self.assertEqual(napalm.base.helpers.canonical_interface_name('no_match0/1'), - "no_match0/1") - self.assertEqual(napalm.base.helpers.canonical_interface_name('lo10', - addl_name_map={"lo": "something_custom"}), "something_custom10") - self.assertEqual(napalm.base.helpers.canonical_interface_name('uniq0/1/1', - addl_name_map={"uniq": "something_custom"}), "something_custom0/1/1") + self.assertEqual( + napalm.base.helpers.canonical_interface_name("Fa0/1"), "FastEthernet0/1" + ) + self.assertEqual( + napalm.base.helpers.canonical_interface_name("FastEthernet0/1"), + "FastEthernet0/1", + ) + self.assertEqual( + napalm.base.helpers.canonical_interface_name("TenGig1/1/1.5"), + "TenGigabitEthernet1/1/1.5", + ) + self.assertEqual( + napalm.base.helpers.canonical_interface_name("Gi1/2"), "GigabitEthernet1/2" + ) + self.assertEqual( + napalm.base.helpers.canonical_interface_name("HundredGigE105/1/1"), + "HundredGigabitEthernet105/1/1", + ) + self.assertEqual( + napalm.base.helpers.canonical_interface_name("Lo0"), "Loopback0" + ) + self.assertEqual( + napalm.base.helpers.canonical_interface_name("lo0"), "Loopback0" + ) + self.assertEqual( + napalm.base.helpers.canonical_interface_name("no_match0/1"), "no_match0/1" + ) + self.assertEqual( + napalm.base.helpers.canonical_interface_name( + "lo10", addl_name_map={"lo": "something_custom"} + ), + "something_custom10", + ) + self.assertEqual( + napalm.base.helpers.canonical_interface_name( + "uniq0/1/1", addl_name_map={"uniq": "something_custom"} + ), + "something_custom0/1/1", + ) def test_abbreviated_interface_name(self): """Test the abbreviated_interface_name helper function.""" - self.assertEqual(napalm.base.helpers.abbreviated_interface_name('Fa0/1'), "Fa0/1") - self.assertEqual(napalm.base.helpers.abbreviated_interface_name('FastEthernet0/1'), - "Fa0/1") - self.assertEqual(napalm.base.helpers.abbreviated_interface_name('TenGig1/1/1.5'), - "Te1/1/1.5") - self.assertEqual(napalm.base.helpers.abbreviated_interface_name('Gi1/2'), "Gi1/2") - self.assertEqual(napalm.base.helpers.abbreviated_interface_name('HundredGigE105/1/1'), - "Hu105/1/1") - self.assertEqual(napalm.base.helpers.abbreviated_interface_name('Lo0'), "Lo0") - self.assertEqual(napalm.base.helpers.abbreviated_interface_name('lo0'), "Lo0") - self.assertEqual(napalm.base.helpers.abbreviated_interface_name('something_custom0/1'), - "something_custom0/1") - self.assertEqual(napalm.base.helpers.abbreviated_interface_name('loop10', - addl_name_map={"loop": "Loopback"}), "Lo10") - self.assertEqual(napalm.base.helpers.abbreviated_interface_name('loop10', - addl_name_map={"loop": "Loopback"}, - addl_reverse_map={"Loopback": "lo"}), "lo10") + self.assertEqual( + napalm.base.helpers.abbreviated_interface_name("Fa0/1"), "Fa0/1" + ) + self.assertEqual( + napalm.base.helpers.abbreviated_interface_name("FastEthernet0/1"), "Fa0/1" + ) + self.assertEqual( + napalm.base.helpers.abbreviated_interface_name("TenGig1/1/1.5"), "Te1/1/1.5" + ) + self.assertEqual( + napalm.base.helpers.abbreviated_interface_name("Gi1/2"), "Gi1/2" + ) + self.assertEqual( + napalm.base.helpers.abbreviated_interface_name("HundredGigE105/1/1"), + "Hu105/1/1", + ) + self.assertEqual(napalm.base.helpers.abbreviated_interface_name("Lo0"), "Lo0") + self.assertEqual(napalm.base.helpers.abbreviated_interface_name("lo0"), "Lo0") + self.assertEqual( + napalm.base.helpers.abbreviated_interface_name("something_custom0/1"), + "something_custom0/1", + ) + self.assertEqual( + napalm.base.helpers.abbreviated_interface_name( + "loop10", addl_name_map={"loop": "Loopback"} + ), + "Lo10", + ) + self.assertEqual( + napalm.base.helpers.abbreviated_interface_name( + "loop10", + addl_name_map={"loop": "Loopback"}, + addl_reverse_map={"Loopback": "lo"}, + ), + "lo10", + ) def test_netmiko_arguments(self): """Test the netmiko argument processing.""" self.assertEqual(netmiko_args(optional_args={}), {}) - test_case = {'secret': 'whatever'} + test_case = {"secret": "whatever"} self.assertEqual(netmiko_args(test_case), test_case) - test_case = { - 'secret': 'whatever', - 'use_keys': True, - } + test_case = {"secret": "whatever", "use_keys": True} self.assertEqual(netmiko_args(test_case), test_case) test_case = { - 'secret': 'whatever', - 'use_keys': True, - 'ssh_config_file': '~/.ssh/config', + "secret": "whatever", + "use_keys": True, + "ssh_config_file": "~/.ssh/config", } self.assertEqual(netmiko_args(test_case), test_case) - test_case = { - 'secret': 'whatever', - 'transport': 'telnet', - } - self.assertEqual(netmiko_args(test_case), {'secret': 'whatever'}) + test_case = {"secret": "whatever", "transport": "telnet"} + self.assertEqual(netmiko_args(test_case), {"secret": "whatever"}) - test_case = { - 'secret': 'whatever', - 'transport': 'telnet', - 'port': 8022, - } - self.assertEqual(netmiko_args(test_case), {'secret': 'whatever', 'port': 8022}) + test_case = {"secret": "whatever", "transport": "telnet", "port": 8022} + self.assertEqual(netmiko_args(test_case), {"secret": "whatever", "port": 8022}) test_case = { - 'secret': '', - 'port': None, - 'verbose': False, - 'global_delay_factor': 1, - 'use_keys': False, - 'key_file': None, - 'allow_agent': False, - 'ssh_strict': False, - 'system_host_keys': False, - 'alt_host_keys': False, - 'alt_key_file': '', - 'ssh_config_file': None, - 'session_timeout': 60, - 'blocking_timeout': 8, - 'keepalive': 0, - 'default_enter': None, - 'response_return': None, - 'serial_settings': None + "secret": "", + "port": None, + "verbose": False, + "global_delay_factor": 1, + "use_keys": False, + "key_file": None, + "allow_agent": False, + "ssh_strict": False, + "system_host_keys": False, + "alt_host_keys": False, + "alt_key_file": "", + "ssh_config_file": None, + "session_timeout": 60, + "blocking_timeout": 8, + "keepalive": 0, + "default_enter": None, + "response_return": None, + "serial_settings": None, } self.assertEqual(netmiko_args(test_case), test_case) test_case = { - 'inline_transfer': True, - 'transport': 'ssh', - 'secret': '', - 'port': None, - 'verbose': False, - 'global_delay_factor': 1, - 'use_keys': False, - 'key_file': None, - 'allow_agent': False, - 'ssh_strict': False, - 'system_host_keys': False, - 'alt_host_keys': False, - 'alt_key_file': '', - 'ssh_config_file': None, - 'session_timeout': 60, - 'blocking_timeout': 8, - 'keepalive': 0, - 'default_enter': None, - 'response_return': None, - 'serial_settings': None + "inline_transfer": True, + "transport": "ssh", + "secret": "", + "port": None, + "verbose": False, + "global_delay_factor": 1, + "use_keys": False, + "key_file": None, + "allow_agent": False, + "ssh_strict": False, + "system_host_keys": False, + "alt_host_keys": False, + "alt_key_file": "", + "ssh_config_file": None, + "session_timeout": 60, + "blocking_timeout": 8, + "keepalive": 0, + "default_enter": None, + "response_return": None, + "serial_settings": None, } result_dict = {} result_dict.update(test_case) - result_dict.pop('inline_transfer') - result_dict.pop('transport') + result_dict.pop("inline_transfer") + result_dict.pop("transport") self.assertEqual(netmiko_args(test_case), result_dict) class FakeNetworkDriver(NetworkDriver): - def __init__(self): """Connection details not needed.""" pass diff --git a/test/base/test_mock_driver.py b/test/base/test_mock_driver.py index f611989cd..e3c489dc0 100644 --- a/test/base/test_mock_driver.py +++ b/test/base/test_mock_driver.py @@ -37,26 +37,28 @@ class TestMockDriver(object): def test_basic(self): d = driver("blah", "bleh", "blih", optional_args=optional_args) - assert d.is_alive() == {u'is_alive': False} + assert d.is_alive() == {"is_alive": False} d.open() - assert d.is_alive() == {u'is_alive': True} + assert d.is_alive() == {"is_alive": True} d.close() - assert d.is_alive() == {u'is_alive': False} + assert d.is_alive() == {"is_alive": False} with pytest.raises(napalm.base.exceptions.ConnectionClosedException) as excinfo: d.get_facts() assert "connection closed" in py23_compat.text_type(excinfo.value) def test_context_manager(self): - with pytest.raises(napalm.base.exceptions.ConnectionException) as e, \ - driver("blah", "bleh", "blih", optional_args=fail_args) as d: + with pytest.raises(napalm.base.exceptions.ConnectionException) as e, driver( + "blah", "bleh", "blih", optional_args=fail_args + ) as d: pass assert "You told me to do this" in py23_compat.text_type(e.value) - with pytest.raises(AttributeError) as e, \ - driver("blah", "bleh", "blih", optional_args=optional_args) as d: - assert d.is_alive() == {u'is_alive': True} + with pytest.raises(AttributeError) as e, driver( + "blah", "bleh", "blih", optional_args=optional_args + ) as d: + assert d.is_alive() == {"is_alive": True} d.__fake_call() - assert d.is_alive() == {u'is_alive': False} + assert d.is_alive() == {"is_alive": False} assert "object has no attribute" in py23_compat.text_type(e.value) def test_mocking_getters(self): @@ -72,12 +74,16 @@ def test_not_mocking_getters(self): with pytest.raises(NotImplementedError) as excinfo: d.get_route_to() - expected = "You can provide mocked data in {}/get_route_to.1".format(optional_args["path"]) + expected = "You can provide mocked data in {}/get_route_to.1".format( + optional_args["path"] + ) assert expected in py23_compat.text_type(excinfo.value) with pytest.raises(NotImplementedError) as excinfo: d.get_route_to() - expected = "You can provide mocked data in {}/get_route_to.2".format(optional_args["path"]) + expected = "You can provide mocked data in {}/get_route_to.2".format( + optional_args["path"] + ) assert expected in py23_compat.text_type(excinfo.value) d.close() @@ -88,18 +94,24 @@ def test_arguments(self): with pytest.raises(TypeError) as excinfo: d.get_route_to(1, 2, 3) - assert "get_route_to: expected at most 3 arguments, got 4" in py23_compat.text_type( - excinfo.value) + assert ( + "get_route_to: expected at most 3 arguments, got 4" + in py23_compat.text_type(excinfo.value) + ) with pytest.raises(TypeError) as excinfo: d.get_route_to(1, 1, protocol=2) - assert "get_route_to: expected at most 3 arguments, got 3" in py23_compat.text_type( - excinfo.value) + assert ( + "get_route_to: expected at most 3 arguments, got 3" + in py23_compat.text_type(excinfo.value) + ) with pytest.raises(TypeError) as excinfo: d.get_route_to(proto=2) - assert "get_route_to got an unexpected keyword argument 'proto'" in py23_compat.text_type( - excinfo.value) + assert ( + "get_route_to got an unexpected keyword argument 'proto'" + in py23_compat.text_type(excinfo.value) + ) d.close() @@ -117,8 +129,10 @@ def test_mock_error(self): with pytest.raises(TypeError) as excinfo: d.get_bgp_neighbors() - assert "Couldn't resolve exception NoIdeaException" in py23_compat.text_type( - excinfo.value) + assert ( + "Couldn't resolve exception NoIdeaException" + in py23_compat.text_type(excinfo.value) + ) d.close() @@ -126,7 +140,10 @@ def test_cli(self): d = driver("blah", "bleh", "blih", optional_args=optional_args) d.open() result = d.cli(["a_command", "b_command"]) - assert result == {'a_command': 'result command a\n', 'b_command': 'result command b\n'} + assert result == { + "a_command": "result command a\n", + "b_command": "result command b\n", + } d.close() def test_configuration_merge(self): diff --git a/test/base/test_napalm_test_framework.py b/test/base/test_napalm_test_framework.py index e3a220870..3097b0088 100644 --- a/test/base/test_napalm_test_framework.py +++ b/test/base/test_napalm_test_framework.py @@ -12,8 +12,10 @@ class TestSkipNotImplemented(unittest.TestCase, ntb.TestGettersNetworkDriver): """Ensure that any tests are skipped if not implemented.""" + def setUp(self): class FakeThing(NetworkDriver): def __init__(self): pass + self.device = FakeThing() diff --git a/test/base/validate/test_unit.py b/test/base/validate/test_unit.py index e0d9ad39f..d9654f6e2 100644 --- a/test/base/validate/test_unit.py +++ b/test/base/validate/test_unit.py @@ -7,271 +7,392 @@ ( {"list": [r"\d{2}", 1, 2]}, [1, 2, 33], - {u'complies': True, u'extra': [], u'missing': [], u'present': [r'\d{2}', 1, 2]} + {u"complies": True, u"extra": [], u"missing": [], u"present": [r"\d{2}", 1, 2]}, ), ( {"list": [1, 2, 3]}, [1, 2, 3, 4, 5], - {u'complies': True, u'extra': [], u'missing': [], u'present': [1, 2, 3]} + {u"complies": True, u"extra": [], u"missing": [], u"present": [1, 2, 3]}, ), ( {"list": [2, 1, 3]}, [3, 2, 1], - {u'complies': True, u'extra': [], u'missing': [], u'present': [2, 1, 3]} + {u"complies": True, u"extra": [], u"missing": [], u"present": [2, 1, 3]}, ), ( {"list": [1, 2, {"list": [1, 2]}]}, [1, 2, [1, 2]], # {u'complies': True, u'extra': [], u'missing': [], u'present': [1, 2, [1, 2]]} - {u'complies': True, - u'extra': [], - u'missing': [], - u'present': [1, 2, {'list': [1, 2]}]} + { + u"complies": True, + u"extra": [], + u"missing": [], + u"present": [1, 2, {"list": [1, 2]}], + }, ), ( - {"list": [r'\d{2}', 4, 3]}, + {"list": [r"\d{2}", 4, 3]}, [1, 2, 3], - {u'complies': False, u'extra': [], u'missing': [r'\d{2}', 4], u'present': [3]} + {u"complies": False, u"extra": [], u"missing": [r"\d{2}", 4], u"present": [3]}, ), ( {"list": [{"list": [1, 2]}, 3]}, [1, 2, 3], - {u'complies': False, - u'extra': [], - u'missing': [{'list': [1, 2]}], - u'present': [3]} + { + u"complies": False, + u"extra": [], + u"missing": [{"list": [1, 2]}], + u"present": [3], + }, ), ( {"_mode": "strict", "list": [1, 2, 3]}, [1, 2, 3], - {u'complies': True, u'extra': [], u'missing': [], u'present': [1, 2, 3]} + {u"complies": True, u"extra": [], u"missing": [], u"present": [1, 2, 3]}, ), ( {"_mode": "strict", "list": [1, 2, 3]}, [1, 2, 3, 4, 5], - {u'complies': False, u'extra': [4, 5], u'missing': [], u'present': [1, 2, 3]} + {u"complies": False, u"extra": [4, 5], u"missing": [], u"present": [1, 2, 3]}, ), ( {"_mode": "strict", "list": [2, 1, 3]}, [3, 2, 1], - {u'complies': True, u'extra': [], u'missing': [], u'present': [2, 1, 3]} + {u"complies": True, u"extra": [], u"missing": [], u"present": [2, 1, 3]}, ), ( {"_mode": "strict", "list": [1, 2, {"_mode": "strict", "list": [1, 2]}]}, [1, 2, [1, 2]], # {u'complies': True, u'extra': [], u'missing': [], u'present': [1, 2, [1, 2]]} - {u'complies': True, - u'extra': [], - u'missing': [], - u'present': [1, 2, {'list': [1, 2]}]} + { + u"complies": True, + u"extra": [], + u"missing": [], + u"present": [1, 2, {"list": [1, 2]}], + }, ), ( {"_mode": "strict", "list": [4, 3]}, [1, 2, 3], - {u'complies': False, u'extra': [1, 2], u'missing': [4], u'present': [3]} + {u"complies": False, u"extra": [1, 2], u"missing": [4], u"present": [3]}, ), ( {"_mode": "strict", "list": [{"_mode": "strict", "list": [1, 2]}, 3]}, [1, 2, 3], - {u'complies': False, - u'extra': [1, 2], - u'missing': [{'list': [1, 2]}], - u'present': [3]} + { + u"complies": False, + u"extra": [1, 2], + u"missing": [{"list": [1, 2]}], + u"present": [3], + }, ), - ( - {'a': 1, 'b': 2, 'c': 3}, - {'a': 1, 'b': 2, 'c': 3}, - {u'complies': True, - u'extra': [], - u'missing': [], - u'present': {'a': {u'complies': True, u'nested': False}, - 'b': {u'complies': True, u'nested': False}, - 'c': {u'complies': True, u'nested': False}}} + {"a": 1, "b": 2, "c": 3}, + {"a": 1, "b": 2, "c": 3}, + { + u"complies": True, + u"extra": [], + u"missing": [], + u"present": { + "a": {u"complies": True, u"nested": False}, + "b": {u"complies": True, u"nested": False}, + "c": {u"complies": True, u"nested": False}, + }, + }, ), ( - {'a': 1, 'b': 2, 'c': 3}, - {'a': 2, 'b': 2, 'c': 3}, - {u'complies': False, - u'extra': [], - u'missing': [], - u'present': {'a': {u'actual_value': 2, u'expected_value': 1, - u'complies': False, u'nested': False}, - 'b': {u'complies': True, u'nested': False}, - 'c': {u'complies': True, u'nested': False}}} + {"a": 1, "b": 2, "c": 3}, + {"a": 2, "b": 2, "c": 3}, + { + u"complies": False, + u"extra": [], + u"missing": [], + u"present": { + "a": { + u"actual_value": 2, + u"expected_value": 1, + u"complies": False, + u"nested": False, + }, + "b": {u"complies": True, u"nested": False}, + "c": {u"complies": True, u"nested": False}, + }, + }, ), ( - {'a': 1, 'b': 2, 'c': 3}, - {'b': 1, 'c': 3}, - {u'complies': False, - u'extra': [], - u'missing': ['a'], - u'present': {'b': {u'actual_value': 1, u'expected_value': 2, - u'complies': False, u'nested': False}, - 'c': {u'complies': True, u'nested': False}}} + {"a": 1, "b": 2, "c": 3}, + {"b": 1, "c": 3}, + { + u"complies": False, + u"extra": [], + u"missing": ["a"], + u"present": { + "b": { + u"actual_value": 1, + u"expected_value": 2, + u"complies": False, + u"nested": False, + }, + "c": {u"complies": True, u"nested": False}, + }, + }, ), ( - {'a': 1, 'b': 2, 'c': {"A": 1, "B": 2}}, - {'a': 1, 'b': 2, 'c': {"A": 1, "B": 2}}, - {u'complies': True, - u'extra': [], - u'missing': [], - u'present': {'a': {u'complies': True, u'nested': False}, - 'b': {u'complies': True, u'nested': False}, - 'c': {u'complies': True, u'nested': True}}} + {"a": 1, "b": 2, "c": {"A": 1, "B": 2}}, + {"a": 1, "b": 2, "c": {"A": 1, "B": 2}}, + { + u"complies": True, + u"extra": [], + u"missing": [], + u"present": { + "a": {u"complies": True, u"nested": False}, + "b": {u"complies": True, u"nested": False}, + "c": {u"complies": True, u"nested": True}, + }, + }, ), ( - {'a': 1, 'b': 2, 'c': {"A": 1, "B": 2}}, - {'a': 1, 'b': 2, 'd': {"A": 1, "B": 2}}, - {u'complies': False, - u'extra': [], - u'missing': ['c'], - u'present': {'a': {u'complies': True, u'nested': False}, - 'b': {u'complies': True, u'nested': False}}} + {"a": 1, "b": 2, "c": {"A": 1, "B": 2}}, + {"a": 1, "b": 2, "d": {"A": 1, "B": 2}}, + { + u"complies": False, + u"extra": [], + u"missing": ["c"], + u"present": { + "a": {u"complies": True, u"nested": False}, + "b": {u"complies": True, u"nested": False}, + }, + }, ), ( - {'a': 1, 'b': 2, 'c': {"A": 3, "B": 2}}, - {'a': 1, 'b': 2, 'c': {"A": 1, "B": 2}}, - {u'complies': False, - u'extra': [], - u'missing': [], - u'present': {'a': {u'complies': True, u'nested': False}, - 'b': {u'complies': True, u'nested': False}, - 'c': {u'complies': False, - u'diff': {u'complies': False, - u'extra': [], - u'missing': [], - u'present': {'A': {u'actual_value': 1, - u'expected_value': 3, - u'complies': False, - u'nested': False}, - 'B': {u'complies': True, - u'nested': False}}}, - u'nested': True}}} + {"a": 1, "b": 2, "c": {"A": 3, "B": 2}}, + {"a": 1, "b": 2, "c": {"A": 1, "B": 2}}, + { + u"complies": False, + u"extra": [], + u"missing": [], + u"present": { + "a": {u"complies": True, u"nested": False}, + "b": {u"complies": True, u"nested": False}, + "c": { + u"complies": False, + u"diff": { + u"complies": False, + u"extra": [], + u"missing": [], + u"present": { + "A": { + u"actual_value": 1, + u"expected_value": 3, + u"complies": False, + u"nested": False, + }, + "B": {u"complies": True, u"nested": False}, + }, + }, + u"nested": True, + }, + }, + }, ), ( - {'a': 1, 'b': 2, 'c': {"A": 3, "B": 2}}, - {'a': 1, 'b': 2, 'c': {"A": 1}}, - {u'complies': False, - u'extra': [], - u'missing': [], - u'present': {'a': {u'complies': True, u'nested': False}, - 'b': {u'complies': True, u'nested': False}, - 'c': {u'complies': False, - u'diff': {u'complies': False, - u'extra': [], - u'missing': ['B'], - u'present': {'A': {u'actual_value': 1, - u'expected_value': 3, - u'complies': False, - u'nested': False}}}, - u'nested': True}}} + {"a": 1, "b": 2, "c": {"A": 3, "B": 2}}, + {"a": 1, "b": 2, "c": {"A": 1}}, + { + u"complies": False, + u"extra": [], + u"missing": [], + u"present": { + "a": {u"complies": True, u"nested": False}, + "b": {u"complies": True, u"nested": False}, + "c": { + u"complies": False, + u"diff": { + u"complies": False, + u"extra": [], + u"missing": ["B"], + u"present": { + "A": { + u"actual_value": 1, + u"expected_value": 3, + u"complies": False, + u"nested": False, + } + }, + }, + u"nested": True, + }, + }, + }, ), ( - {"_mode": "strict", 'a': 1, 'b': 2, 'c': 3}, - {'a': 1, 'b': 2, 'c': 3}, - {u'complies': True, - u'extra': [], - u'missing': [], - u'present': {'a': {u'complies': True, u'nested': False}, - 'b': {u'complies': True, u'nested': False}, - 'c': {u'complies': True, u'nested': False}}} + {"_mode": "strict", "a": 1, "b": 2, "c": 3}, + {"a": 1, "b": 2, "c": 3}, + { + u"complies": True, + u"extra": [], + u"missing": [], + u"present": { + "a": {u"complies": True, u"nested": False}, + "b": {u"complies": True, u"nested": False}, + "c": {u"complies": True, u"nested": False}, + }, + }, ), ( - {"_mode": "strict", 'a': 1, 'b': 2, 'c': 3}, - {'a': 2, 'b': 2, 'c': 3}, - {u'complies': False, - u'extra': [], - u'missing': [], - u'present': {'a': {u'actual_value': 2, u'expected_value': 1, - u'complies': False, u'nested': False}, - 'b': {u'complies': True, u'nested': False}, - 'c': {u'complies': True, u'nested': False}}} + {"_mode": "strict", "a": 1, "b": 2, "c": 3}, + {"a": 2, "b": 2, "c": 3}, + { + u"complies": False, + u"extra": [], + u"missing": [], + u"present": { + "a": { + u"actual_value": 2, + u"expected_value": 1, + u"complies": False, + u"nested": False, + }, + "b": {u"complies": True, u"nested": False}, + "c": {u"complies": True, u"nested": False}, + }, + }, ), ( - {"_mode": "strict", 'a': 1, 'b': 2, 'c': 3}, - {'b': 1, 'c': 3}, - {u'complies': False, - u'extra': [], - u'missing': ['a'], - u'present': {'b': {u'actual_value': 1, u'expected_value': 2, - u'complies': False, u'nested': False}, - 'c': {u'complies': True, u'nested': False}}} + {"_mode": "strict", "a": 1, "b": 2, "c": 3}, + {"b": 1, "c": 3}, + { + u"complies": False, + u"extra": [], + u"missing": ["a"], + u"present": { + "b": { + u"actual_value": 1, + u"expected_value": 2, + u"complies": False, + u"nested": False, + }, + "c": {u"complies": True, u"nested": False}, + }, + }, ), ( - {"_mode": "strict", 'a': 1, 'b': 2, 'c': {"_mode": "strict", "A": 1, "B": 2}}, - {'a': 1, 'b': 2, 'c': {"A": 1, "B": 2}}, - {u'complies': True, - u'extra': [], - u'missing': [], - u'present': {'a': {u'complies': True, u'nested': False}, - 'b': {u'complies': True, u'nested': False}, - 'c': {u'complies': True, u'nested': True}}} + {"_mode": "strict", "a": 1, "b": 2, "c": {"_mode": "strict", "A": 1, "B": 2}}, + {"a": 1, "b": 2, "c": {"A": 1, "B": 2}}, + { + u"complies": True, + u"extra": [], + u"missing": [], + u"present": { + "a": {u"complies": True, u"nested": False}, + "b": {u"complies": True, u"nested": False}, + "c": {u"complies": True, u"nested": True}, + }, + }, ), ( - {"_mode": "strict", 'a': 1, 'b': 2, 'c': {"_mode": "strict", "A": 1, "B": 2}}, - {'a': 1, 'b': 2, 'd': {"A": 1, "B": 2}}, - {u'complies': False, - u'extra': ['d'], - u'missing': ['c'], - u'present': {'a': {u'complies': True, u'nested': False}, - 'b': {u'complies': True, u'nested': False}}} + {"_mode": "strict", "a": 1, "b": 2, "c": {"_mode": "strict", "A": 1, "B": 2}}, + {"a": 1, "b": 2, "d": {"A": 1, "B": 2}}, + { + u"complies": False, + u"extra": ["d"], + u"missing": ["c"], + u"present": { + "a": {u"complies": True, u"nested": False}, + "b": {u"complies": True, u"nested": False}, + }, + }, ), ( - {"_mode": "strict", 'a': 1, 'b': 2, 'c': {"_mode": "strict", "A": 3, "B": 2}}, - {'a': 1, 'b': 2, 'c': {"A": 1, "B": 2}}, - {u'complies': False, - u'extra': [], - u'missing': [], - u'present': {'a': {u'complies': True, u'nested': False}, - 'b': {u'complies': True, u'nested': False}, - 'c': {u'complies': False, - u'diff': {u'complies': False, - u'extra': [], - u'missing': [], - u'present': {'A': {u'actual_value': 1, - u'expected_value': 3, - u'complies': False, - u'nested': False}, - 'B': {u'complies': True, - u'nested': False}}}, - u'nested': True}}} + {"_mode": "strict", "a": 1, "b": 2, "c": {"_mode": "strict", "A": 3, "B": 2}}, + {"a": 1, "b": 2, "c": {"A": 1, "B": 2}}, + { + u"complies": False, + u"extra": [], + u"missing": [], + u"present": { + "a": {u"complies": True, u"nested": False}, + "b": {u"complies": True, u"nested": False}, + "c": { + u"complies": False, + u"diff": { + u"complies": False, + u"extra": [], + u"missing": [], + u"present": { + "A": { + u"actual_value": 1, + u"expected_value": 3, + u"complies": False, + u"nested": False, + }, + "B": {u"complies": True, u"nested": False}, + }, + }, + u"nested": True, + }, + }, + }, ), ( - {"_mode": "strict", 'a': 1, 'b': 2, 'c': {"_mode": "strict", "A": 3, "B": 2}}, - {'a': 1, 'b': 2, 'c': {"A": 1, "C": 4}}, - {u'complies': False, - u'extra': [], - u'missing': [], - u'present': {'a': {u'complies': True, u'nested': False}, - 'b': {u'complies': True, u'nested': False}, - 'c': {u'complies': False, - u'diff': {u'complies': False, - u'extra': ['C'], - u'missing': ['B'], - u'present': {'A': {u'actual_value': 1, - u'expected_value': 3, - u'complies': False, - u'nested': False}}}, - u'nested': True}}} + {"_mode": "strict", "a": 1, "b": 2, "c": {"_mode": "strict", "A": 3, "B": 2}}, + {"a": 1, "b": 2, "c": {"A": 1, "C": 4}}, + { + u"complies": False, + u"extra": [], + u"missing": [], + u"present": { + "a": {u"complies": True, u"nested": False}, + "b": {u"complies": True, u"nested": False}, + "c": { + u"complies": False, + u"diff": { + u"complies": False, + u"extra": ["C"], + u"missing": ["B"], + u"present": { + "A": { + u"actual_value": 1, + u"expected_value": 3, + u"complies": False, + u"nested": False, + } + }, + }, + u"nested": True, + }, + }, + }, ), ( - {"_mode": "strict", 'a': 1, 'b': 2, 'c': {"_mode": "strict", "A": 3, "B": 2}}, - {'a': 1, 'b': 2, 'c': {"A": 1, "C": 4}}, - {u'complies': False, - u'extra': [], - u'missing': [], - u'present': {'a': {u'complies': True, u'nested': False}, - 'b': {u'complies': True, u'nested': False}, - 'c': {u'complies': False, - u'diff': {u'complies': False, - u'extra': ['C'], - u'missing': ['B'], - u'present': {'A': {u'actual_value': 1, - u'expected_value': 3, - u'complies': False, - u'nested': False}}}, - u'nested': True}}} + {"_mode": "strict", "a": 1, "b": 2, "c": {"_mode": "strict", "A": 3, "B": 2}}, + {"a": 1, "b": 2, "c": {"A": 1, "C": 4}}, + { + u"complies": False, + u"extra": [], + u"missing": [], + u"present": { + "a": {u"complies": True, u"nested": False}, + "b": {u"complies": True, u"nested": False}, + "c": { + u"complies": False, + u"diff": { + u"complies": False, + u"extra": ["C"], + u"missing": ["B"], + u"present": { + "A": { + u"actual_value": 1, + u"expected_value": 3, + u"complies": False, + u"nested": False, + } + }, + }, + u"nested": True, + }, + }, + }, ), ] @@ -279,7 +400,7 @@ class TestValidate: """Wraps tests.""" - @pytest.mark.parametrize('src, dst, result', _compare_getter) + @pytest.mark.parametrize("src, dst, result", _compare_getter) def test__compare_getter_list(self, src, dst, result): """Test for _compare_getter_list.""" assert validate.compare(src, dst) == result diff --git a/test/base/validate/test_validate.py b/test/base/validate/test_validate.py index 2ad377764..ef3688e93 100644 --- a/test/base/validate/test_validate.py +++ b/test/base/validate/test_validate.py @@ -19,9 +19,9 @@ def construct_yaml_str(self, node): def _read_yaml(filename): - yaml.Loader.add_constructor(u'tag:yaml.org,2002:str', construct_yaml_str) - yaml.SafeLoader.add_constructor(u'tag:yaml.org,2002:str', construct_yaml_str) - with open(filename, 'r') as f: + yaml.Loader.add_constructor("tag:yaml.org,2002:str", construct_yaml_str) + yaml.SafeLoader.add_constructor("tag:yaml.org,2002:str", construct_yaml_str) + with open(filename, "r") as f: return yaml.safe_load(f.read()) @@ -34,7 +34,9 @@ def test_simple_fail(self): expected_report = _read_yaml(os.path.join(mocked_data, "report.yml")) device = FakeDriver(mocked_data) - actual_report = device.compliance_report(os.path.join(mocked_data, "validate.yml")) + actual_report = device.compliance_report( + os.path.join(mocked_data, "validate.yml") + ) assert expected_report == actual_report, yaml.safe_dump(actual_report) @@ -44,7 +46,9 @@ def test_non_strict_pass(self): expected_report = _read_yaml(os.path.join(mocked_data, "report.yml")) device = FakeDriver(mocked_data) - actual_report = device.compliance_report(os.path.join(mocked_data, "validate.yml")) + actual_report = device.compliance_report( + os.path.join(mocked_data, "validate.yml") + ) assert expected_report == actual_report, yaml.safe_dump(actual_report) @@ -65,7 +69,9 @@ def test_non_strict_fail(self): expected_report = _read_yaml(os.path.join(mocked_data, "report.yml")) device = FakeDriver(mocked_data) - actual_report = device.compliance_report(os.path.join(mocked_data, "validate.yml")) + actual_report = device.compliance_report( + os.path.join(mocked_data, "validate.yml") + ) assert expected_report == actual_report, yaml.safe_dump(actual_report) @@ -86,7 +92,9 @@ def test_strict_fail(self): expected_report = _read_yaml(os.path.join(mocked_data, "report.yml")) device = FakeDriver(mocked_data) - actual_report = device.compliance_report(os.path.join(mocked_data, "validate.yml")) + actual_report = device.compliance_report( + os.path.join(mocked_data, "validate.yml") + ) assert expected_report == actual_report, yaml.safe_dump(actual_report) @@ -107,7 +115,9 @@ def test_strict_pass(self): expected_report = _read_yaml(os.path.join(mocked_data, "report.yml")) device = FakeDriver(mocked_data) - actual_report = device.compliance_report(os.path.join(mocked_data, "validate.yml")) + actual_report = device.compliance_report( + os.path.join(mocked_data, "validate.yml") + ) assert expected_report == actual_report, yaml.safe_dump(actual_report) @@ -128,7 +138,9 @@ def test_strict_pass_skip(self): expected_report = _read_yaml(os.path.join(mocked_data, "report.yml")) device = FakeDriver(mocked_data) - actual_report = device.compliance_report(os.path.join(mocked_data, "validate.yml")) + actual_report = device.compliance_report( + os.path.join(mocked_data, "validate.yml") + ) assert expected_report == actual_report, yaml.safe_dump(actual_report) @@ -164,9 +176,11 @@ def __init__(self, path): def __getattribute__(self, name): def load_json(filename): def func(**kwargs): - with open(filename, 'r') as f: + with open(filename, "r") as f: return json.loads(f.read()) + return func + if name.startswith("get_") or name in C.ACTION_TYPE_METHODS: filename = os.path.join(self.path, "{}.json".format(name)) return load_json(filename) diff --git a/test/eos/TestEOSDriver.py b/test/eos/TestEOSDriver.py index b2280bc71..5139915d6 100644 --- a/test/eos/TestEOSDriver.py +++ b/test/eos/TestEOSDriver.py @@ -19,18 +19,18 @@ class TestConfigEOSDriver(unittest.TestCase, TestConfigNetworkDriver): - @classmethod def setUpClass(cls): - hostname = '127.0.0.1' - username = 'vagrant' - password = 'vagrant' - cls.vendor = 'eos' + hostname = "127.0.0.1" + username = "vagrant" + password = "vagrant" + cls.vendor = "eos" - optional_args = {'port': 12443, } - cls.device = eos.EOSDriver(hostname, username, password, - timeout=60, optional_args=optional_args) + optional_args = {"port": 12443} + cls.device = eos.EOSDriver( + hostname, username, password, timeout=60, optional_args=optional_args + ) cls.device.open() - cls.device.load_replace_candidate(filename='%s/initial.conf' % cls.vendor) + cls.device.load_replace_candidate(filename="%s/initial.conf" % cls.vendor) cls.device.commit_config() diff --git a/test/eos/conftest.py b/test/eos/conftest.py index 5d929bee9..c885f5849 100644 --- a/test/eos/conftest.py +++ b/test/eos/conftest.py @@ -9,16 +9,18 @@ from napalm.eos import eos -@pytest.fixture(scope='class') +@pytest.fixture(scope="class") def set_device_parameters(request): """Set up the class.""" + def fin(): request.cls.device.close() + request.addfinalizer(fin) request.cls.driver = eos.EOSDriver request.cls.patched_driver = PatchedEOSDriver - request.cls.vendor = 'eos' + request.cls.vendor = "eos" parent_conftest.set_device_parameters(request) @@ -33,24 +35,24 @@ class PatchedEOSDriver(eos.EOSDriver): def __init__(self, hostname, username, password, timeout=60, optional_args=None): super().__init__(hostname, username, password, timeout, optional_args) - self.patched_attrs = ['device'] + self.patched_attrs = ["device"] self.device = FakeEOSDevice() class FakeEOSDevice(BaseTestDouble): """EOS device test double.""" - def run_commands(self, command_list, encoding='json'): + def run_commands(self, command_list, encoding="json"): """Fake run_commands.""" result = list() for command in command_list: - filename = '{}.{}'.format(self.sanitize_text(command), encoding) + filename = "{}.{}".format(self.sanitize_text(command), encoding) full_path = self.find_file(filename) - if encoding == 'json': + if encoding == "json": result.append(self.read_json_file(full_path)) else: - result.append({'output': self.read_txt_file(full_path)}) + result.append({"output": self.read_txt_file(full_path)}) return result diff --git a/test/eos/test_heredoc.py b/test/eos/test_heredoc.py index 03c58d6d5..7ffd1bfb6 100644 --- a/test/eos/test_heredoc.py +++ b/test/eos/test_heredoc.py @@ -7,7 +7,8 @@ class TestConfigMangling(object): def test_heredoc(self): - raw_config = dedent("""\ + raw_config = dedent( + """\ hostname vEOS ip name-server 192.0.2.1 ! @@ -31,7 +32,8 @@ def test_heredoc(self): management ssh idle-timeout 15 ! - """) + """ + ) self.device.device.run_commands = mock.MagicMock() @@ -44,21 +46,22 @@ def test_heredoc(self): "ip name-server 192.0.2.1", { "cmd": "banner login", - "input": "This is a banner that spans\nmultiple lines in order to test\nHEREDOC conversion" # noqa + "input": "This is a banner that spans\nmultiple lines in order to test\nHEREDOC conversion", # noqa }, "management api http-commands", { "cmd": "protocol https certificate", - "input": "---BEGIN CERTIFICATE---\nFAKE-CERTIFICATE-DATA\n---END CERTIFICATE---\nEOF\n---BEGIN PRIVATE KEY---\nFAKE-KEY-DATA\n---END PRIVATE KEY---" # noqa + "input": "---BEGIN CERTIFICATE---\nFAKE-CERTIFICATE-DATA\n---END CERTIFICATE---\nEOF\n---BEGIN PRIVATE KEY---\nFAKE-KEY-DATA\n---END PRIVATE KEY---", # noqa }, "management ssh", - "idle-timeout 15" + "idle-timeout 15", ] self.device.device.run_commands.assert_called_with(expected_result) def test_mode_comment(self): - raw_config = dedent("""\ + raw_config = dedent( + """\ ip access-list standard test1 !! This is a !! multiline mode comment @@ -76,7 +79,8 @@ def test_mode_comment(self): EOF permit host 192.0.2.3 ! - """) + """ + ) self.device.device.run_commands = mock.MagicMock() @@ -87,28 +91,29 @@ def test_mode_comment(self): "ip access-list standard test1", { "cmd": "comment", - "input": "This is a\nmultiline mode comment\nfor standard ACL test1" + "input": "This is a\nmultiline mode comment\nfor standard ACL test1", }, "permit host 192.0.2.1", "ip access-list standard test2", { "cmd": "comment", - "input": "This is a single-line mode comment for standard ACL test2" + "input": "This is a single-line mode comment for standard ACL test2", }, "permit host 192.0.2.2", "ip access-list standard test3", { "cmd": "comment", - "input": "This is a multi-line HEREDOC\ncomment for standard ACL test3" + "input": "This is a multi-line HEREDOC\ncomment for standard ACL test3", }, - "permit host 192.0.2.3" + "permit host 192.0.2.3", ] self.device.device.run_commands.assert_called_with(expected_result) def test_heredoc_with_bangs(self): - raw_config = dedent("""\ + raw_config = dedent( + """\ hostname vEOS ip name-server 192.0.2.1 ! @@ -120,7 +125,8 @@ def test_heredoc_with_bangs(self): management ssh idle-timeout 15 ! - """) + """ + ) self.device.device.run_commands = mock.MagicMock() @@ -133,10 +139,10 @@ def test_heredoc_with_bangs(self): "ip name-server 192.0.2.1", { "cmd": "banner login", - "input": "!! This is a banner that contains\n!!!bangs!" # noqa + "input": "!! This is a banner that contains\n!!!bangs!", # noqa }, "management ssh", - "idle-timeout 15" + "idle-timeout 15", ] self.device.device.run_commands.assert_called_with(expected_result) diff --git a/test/ios/TestIOSDriver.py b/test/ios/TestIOSDriver.py index f559e00f6..4fe9520d3 100755 --- a/test/ios/TestIOSDriver.py +++ b/test/ios/TestIOSDriver.py @@ -49,17 +49,19 @@ class TestConfigIOSDriver(unittest.TestCase, TestConfigNetworkDriver): @classmethod def setUpClass(cls): """Executed when the class is instantiated.""" - ip_addr = '127.0.0.1' - username = 'vagrant' - password = 'vagrant' - cls.vendor = 'ios' - optional_args = {'port': 12204, 'dest_file_system': 'bootflash:'} - - cls.device = ios.IOSDriver(ip_addr, username, password, optional_args=optional_args) + ip_addr = "127.0.0.1" + username = "vagrant" + password = "vagrant" + cls.vendor = "ios" + optional_args = {"port": 12204, "dest_file_system": "bootflash:"} + + cls.device = ios.IOSDriver( + ip_addr, username, password, optional_args=optional_args + ) cls.device.open() # Setup initial state - cls.device.load_replace_candidate(filename='%s/initial.conf' % cls.vendor) + cls.device.load_replace_candidate(filename="%s/initial.conf" % cls.vendor) cls.device.commit_config() def test_ios_only_confirm(self): @@ -69,48 +71,52 @@ def test_ios_only_confirm(self): _enable_confirm() reenables this """ # Set initial device configuration - self.device.load_replace_candidate(filename='%s/initial.conf' % self.vendor) + self.device.load_replace_candidate(filename="%s/initial.conf" % self.vendor) self.device.commit_config() # Verify initial state - output = self.device.device.send_command('show run | inc file prompt') + output = self.device.device.send_command("show run | inc file prompt") output = output.strip() - self.assertEqual(output, '') + self.assertEqual(output, "") # Disable confirmation self.device._disable_confirm() - output = self.device.device.send_command('show run | inc file prompt') + output = self.device.device.send_command("show run | inc file prompt") output = output.strip() - self.assertEqual(output, 'file prompt quiet') + self.assertEqual(output, "file prompt quiet") # Reenable confirmation self.device._enable_confirm() - output = self.device.device.send_command('show run | inc file prompt') + output = self.device.device.send_command("show run | inc file prompt") output = output.strip() - self.assertEqual(output, '') + self.assertEqual(output, "") def test_ios_only_gen_full_path(self): """Test gen_full_path() method.""" output = self.device._gen_full_path(self.device.candidate_cfg) - self.assertEqual(output, self.device.dest_file_system + '/candidate_config.txt') + self.assertEqual(output, self.device.dest_file_system + "/candidate_config.txt") output = self.device._gen_full_path(self.device.rollback_cfg) - self.assertEqual(output, self.device.dest_file_system + '/rollback_config.txt') + self.assertEqual(output, self.device.dest_file_system + "/rollback_config.txt") output = self.device._gen_full_path(self.device.merge_cfg) - self.assertEqual(output, self.device.dest_file_system + '/merge_config.txt') + self.assertEqual(output, self.device.dest_file_system + "/merge_config.txt") - output = self.device._gen_full_path(filename='running-config', file_system='system:') - self.assertEqual(output, 'system:/running-config') + output = self.device._gen_full_path( + filename="running-config", file_system="system:" + ) + self.assertEqual(output, "system:/running-config") def test_ios_only_check_file_exists(self): """Test _check_file_exists() method.""" - self.device.load_replace_candidate(filename='%s/initial.conf' % self.vendor) - valid_file = self.device._check_file_exists(self.device.dest_file_system + - '/candidate_config.txt') + self.device.load_replace_candidate(filename="%s/initial.conf" % self.vendor) + valid_file = self.device._check_file_exists( + self.device.dest_file_system + "/candidate_config.txt" + ) self.assertTrue(valid_file) - invalid_file = self.device._check_file_exists(self.device.dest_file_system + - '/bogus_999.txt') + invalid_file = self.device._check_file_exists( + self.device.dest_file_system + "/bogus_999.txt" + ) self.assertFalse(invalid_file) @@ -130,14 +136,16 @@ def setUpClass(cls): """Executed when the class is instantiated.""" cls.mock = True - username = 'vagrant' - ip_addr = '192.168.0.234' - password = 'vagrant' - cls.vendor = 'ios' + username = "vagrant" + ip_addr = "192.168.0.234" + password = "vagrant" + cls.vendor = "ios" optional_args = {} - optional_args['dest_file_system'] = 'flash:' + optional_args["dest_file_system"] = "flash:" - cls.device = ios.IOSDriver(ip_addr, username, password, optional_args=optional_args) + cls.device = ios.IOSDriver( + ip_addr, username, password, optional_args=optional_args + ) if cls.mock: cls.device.device = FakeIOSDevice() @@ -175,8 +183,8 @@ def read_txt_file(filename): def send_command_expect(self, command, **kwargs): """Fake execute a command in the device by just returning the content of a file.""" - cmd = re.sub(r'[\[\]\*\^\+\s\|]', '_', command) - output = self.read_txt_file('ios/mock_data/{}.txt'.format(cmd)) + cmd = re.sub(r"[\[\]\*\^\+\s\|]", "_", command) + output = self.read_txt_file("ios/mock_data/{}.txt".format(cmd)) return py23_compat.text_type(output) def send_command(self, command, **kwargs): diff --git a/test/ios/conftest.py b/test/ios/conftest.py index 926310845..0d1919337 100644 --- a/test/ios/conftest.py +++ b/test/ios/conftest.py @@ -13,16 +13,18 @@ from napalm.ios import ios -@pytest.fixture(scope='class') +@pytest.fixture(scope="class") def set_device_parameters(request): """Set up the class.""" + def fin(): request.cls.device.close() + request.addfinalizer(fin) request.cls.driver = ios.IOSDriver request.cls.patched_driver = PatchedIOSDriver - request.cls.vendor = 'ios' + request.cls.vendor = "ios" parent_conftest.set_device_parameters(request) @@ -38,16 +40,14 @@ def __init__(self, hostname, username, password, timeout=60, optional_args=None) super().__init__(hostname, username, password, timeout, optional_args) - self.patched_attrs = ['device'] + self.patched_attrs = ["device"] self.device = FakeIOSDevice() def disconnect(self): pass def is_alive(self): - return { - 'is_alive': True # In testing everything works.. - } + return {"is_alive": True} # In testing everything works.. def open(self): pass @@ -57,7 +57,7 @@ class FakeIOSDevice(BaseTestDouble): """IOS device test double.""" def send_command(self, command, **kwargs): - filename = '{}.txt'.format(self.sanitize_text(command)) + filename = "{}.txt".format(self.sanitize_text(command)) full_path = self.find_file(filename) result = self.read_txt_file(full_path) return py23_compat.text_type(result) diff --git a/test/iosxr/TestIOSXRDriver.py b/test/iosxr/TestIOSXRDriver.py index 0f86d19d8..7444fa4fe 100644 --- a/test/iosxr/TestIOSXRDriver.py +++ b/test/iosxr/TestIOSXRDriver.py @@ -20,35 +20,31 @@ class TestConfigIOSXRDriver(unittest.TestCase, TestConfigNetworkDriver): - @classmethod def setUpClass(cls): - hostname = '127.0.0.1' - username = 'vagrant' - password = 'vagrant' - cls.vendor = 'iosxr' - - optional_args = {'port': 12202} - cls.device = IOSXRDriver(hostname, - username, - password, - timeout=60, - optional_args=optional_args) + hostname = "127.0.0.1" + username = "vagrant" + password = "vagrant" + cls.vendor = "iosxr" + + optional_args = {"port": 12202} + cls.device = IOSXRDriver( + hostname, username, password, timeout=60, optional_args=optional_args + ) cls.device.open() - cls.device.load_replace_candidate(filename='%s/initial.conf' % cls.vendor) + cls.device.load_replace_candidate(filename="%s/initial.conf" % cls.vendor) cls.device.commit_config() class TestGetterIOSXRDriver(unittest.TestCase, TestGettersNetworkDriver): - @classmethod def setUpClass(cls): cls.mock = True - hostname = '192.168.56.202' - username = 'vagrant' - password = 'vagrant' - cls.vendor = 'iosxr' + hostname = "192.168.56.202" + username = "vagrant" + password = "vagrant" + cls.vendor = "iosxr" cls.device = IOSXRDriver(hostname, username, password, timeout=60) @@ -59,7 +55,6 @@ def setUpClass(cls): class FakeIOSXRDevice: - @staticmethod def read_txt_file(filename): curr_dir = os.path.dirname(os.path.abspath(__file__)) @@ -68,27 +63,29 @@ def read_txt_file(filename): return data_file.read() def _execute_config_show(self, show_command): - rpc_request = '{show_command}'.format( + rpc_request = "{show_command}".format( show_command=show_command ) return self.make_rpc_call(rpc_request) def show_version(self): - return self.read_txt_file('iosxr/mock_data/show_version.txt') + return self.read_txt_file("iosxr/mock_data/show_version.txt") def show_interfaces(self): - return self.read_txt_file('iosxr/mock_data/show_interfaces.txt') + return self.read_txt_file("iosxr/mock_data/show_interfaces.txt") def show_interface_description(self): - return self.read_txt_file('iosxr/mock_data/show_interface_description.txt') + return self.read_txt_file("iosxr/mock_data/show_interface_description.txt") def show_lldp_neighbors(self): - return self.read_txt_file('iosxr/mock_data/show_lldp_neighbors.txt') + return self.read_txt_file("iosxr/mock_data/show_lldp_neighbors.txt") def make_rpc_call(self, rpc_call): - rpc_call = rpc_call.replace('<', '_')\ - .replace('>', '_')\ - .replace('/', '_')\ - .replace('\n', '')\ - .replace(' ', '') - return self.read_txt_file('iosxr/mock_data/{}.rpc'.format(rpc_call[0:150])) + rpc_call = ( + rpc_call.replace("<", "_") + .replace(">", "_") + .replace("/", "_") + .replace("\n", "") + .replace(" ", "") + ) + return self.read_txt_file("iosxr/mock_data/{}.rpc".format(rpc_call[0:150])) diff --git a/test/iosxr/conftest.py b/test/iosxr/conftest.py index b136cc495..c88eeb4a2 100644 --- a/test/iosxr/conftest.py +++ b/test/iosxr/conftest.py @@ -12,16 +12,18 @@ from napalm.iosxr import iosxr -@pytest.fixture(scope='class') +@pytest.fixture(scope="class") def set_device_parameters(request): """Set up the class.""" + def fin(): request.cls.device.close() + request.addfinalizer(fin) request.cls.driver = iosxr.IOSXRDriver request.cls.patched_driver = PatchedIOSXRDriver - request.cls.vendor = 'iosxr' + request.cls.vendor = "iosxr" parent_conftest.set_device_parameters(request) @@ -37,13 +39,11 @@ def __init__(self, hostname, username, password, timeout=60, optional_args=None) super().__init__(hostname, username, password, timeout, optional_args) - self.patched_attrs = ['device'] + self.patched_attrs = ["device"] self.device = FakeIOSXRDevice() def is_alive(self): - return { - 'is_alive': True # In testing everything works.. - } + return {"is_alive": True} # In testing everything works.. def open(self): pass @@ -56,7 +56,7 @@ def close(self): pass def make_rpc_call(self, rpc_call, encoded=True): - filename = '{}.txt'.format(self.sanitize_text(rpc_call)) + filename = "{}.txt".format(self.sanitize_text(rpc_call)) full_path = self.find_file(filename) result = self.read_txt_file(full_path) if encoded: @@ -65,13 +65,13 @@ def make_rpc_call(self, rpc_call, encoded=True): return result def show_lldp_neighbors(self): - filename = 'show_lldp_neighbors.txt' + filename = "show_lldp_neighbors.txt" full_path = self.find_file(filename) result = self.read_txt_file(full_path) return result def _execute_config_show(self, show_command): - rpc_request = '{show_command}'.format( + rpc_request = "{show_command}".format( show_command=show_command ) return self.make_rpc_call(rpc_request, encoded=False) diff --git a/test/junos/TestJunOSDriver.py b/test/junos/TestJunOSDriver.py index 5abbee52d..2f8d5e0ae 100644 --- a/test/junos/TestJunOSDriver.py +++ b/test/junos/TestJunOSDriver.py @@ -22,33 +22,29 @@ class TestConfigJunOSDriver(unittest.TestCase, TestConfigNetworkDriver): - @classmethod def setUpClass(cls): - hostname = '127.0.0.1' - username = 'vagrant' - password = 'vagrant123' - cls.vendor = 'junos' - - optional_args = {'port': 12203, } - cls.device = JunOSDriver(hostname, - username, - password, - timeout=60, - optional_args=optional_args) + hostname = "127.0.0.1" + username = "vagrant" + password = "vagrant123" + cls.vendor = "junos" + + optional_args = {"port": 12203} + cls.device = JunOSDriver( + hostname, username, password, timeout=60, optional_args=optional_args + ) cls.device.open() class TestGetterJunOSDriver(unittest.TestCase, TestGettersNetworkDriver): - @classmethod def setUpClass(cls): cls.mock = True - hostname = '192.168.56.203' - username = 'vagrant' - password = 'vagrant123' - cls.vendor = 'junos' + hostname = "192.168.56.203" + username = "vagrant" + password = "vagrant123" + cls.vendor = "junos" cls.device = JunOSDriver(hostname, username, password, timeout=60) @@ -59,42 +55,41 @@ def setUpClass(cls): class FakeJunOSDevice: - def __init__(self): self.rpc = FakeRPCObject(self) self._conn = FakeConnection(self.rpc) self.ON_JUNOS = True # necessary for fake devices self.facts = { - 'domain': None, - 'hostname': 'vsrx', - 'ifd_style': 'CLASSIC', - '2RE': False, - 'serialnumber': 'beb914a9cca3', - 'fqdn': 'vsrx', - 'virtual': True, - 'switch_style': 'NONE', - 'version': '12.1X47-D20.7', - 'HOME': '/cf/var/home/vagrant', - 'srx_cluster': False, - 'model': 'FIREFLY-PERIMETER', - 'RE0': { - 'status': 'Testing', - 'last_reboot_reason': 'Router rebooted after a normal shutdown.', - 'model': 'FIREFLY-PERIMETER RE', - 'up_time': '1 hour, 13 minutes, 37 seconds' + "domain": None, + "hostname": "vsrx", + "ifd_style": "CLASSIC", + "2RE": False, + "serialnumber": "beb914a9cca3", + "fqdn": "vsrx", + "virtual": True, + "switch_style": "NONE", + "version": "12.1X47-D20.7", + "HOME": "/cf/var/home/vagrant", + "srx_cluster": False, + "model": "FIREFLY-PERIMETER", + "RE0": { + "status": "Testing", + "last_reboot_reason": "Router rebooted after a normal shutdown.", + "model": "FIREFLY-PERIMETER RE", + "up_time": "1 hour, 13 minutes, 37 seconds", }, - 'vc_capable': False, - 'personality': 'SRX_BRANCH' + "vc_capable": False, + "personality": "SRX_BRANCH", } def read_txt_file(self, filename): with open(filename) as data_file: return data_file.read() - def cli(self, command=''): + def cli(self, command=""): return self.read_txt_file( - 'junos/mock_data/{parsed_command}.txt'.format( - parsed_command=command.replace(' ', '_') + "junos/mock_data/{parsed_command}.txt".format( + parsed_command=command.replace(" ", "_") ) ) @@ -113,10 +108,11 @@ def __getattr__(self, item): return self def response(self, **rpc_args): - instance = rpc_args.pop('instance', '') + instance = rpc_args.pop("instance", "") xml_string = self._device.read_txt_file( - 'junos/mock_data/{}{}.txt'.format(self.item, instance)) + "junos/mock_data/{}{}.txt".format(self.item, instance) + ) return lxml.etree.fromstring(xml_string) def get_config(self, get_cmd=None, filter_xml=None, options={}): @@ -126,22 +122,22 @@ def get_config(self, get_cmd=None, filter_xml=None, options={}): if get_cmd is not None: get_cmd_str = lxml.etree.tostring(get_cmd) - filename = get_cmd_str.replace('<', '_')\ - .replace('>', '_')\ - .replace('/', '_')\ - .replace('\n', '')\ - .replace(' ', '') + filename = ( + get_cmd_str.replace("<", "_") + .replace(">", "_") + .replace("/", "_") + .replace("\n", "") + .replace(" ", "") + ) # no get_cmd means it should mock the eznc get_config else: - filename = 'get_config__' + '__'.join( - ['{0}_{1}'.format(k, v) for k, v in sorted(options.items())] + filename = "get_config__" + "__".join( + ["{0}_{1}".format(k, v) for k, v in sorted(options.items())] ) xml_string = self._device.read_txt_file( - 'junos/mock_data/{filename}.txt'.format( - filename=filename[0:150] - ) + "junos/mock_data/{filename}.txt".format(filename=filename[0:150]) ) return lxml.etree.fromstring(xml_string) @@ -161,6 +157,7 @@ def response(self, non_std_command=None): class RPCReply: def __init__(self, reply): self._NCElement__doc = reply + rpc_reply = RPCReply(self._rpc.get_config(get_cmd=non_std_command)) return rpc_reply @@ -168,6 +165,5 @@ def __init__(self, reply): class FakeConnection: - def __init__(self, rpc): self.rpc = FakeConnectionRPCObject(rpc) diff --git a/test/junos/conftest.py b/test/junos/conftest.py index ae86525fa..56e37e11a 100644 --- a/test/junos/conftest.py +++ b/test/junos/conftest.py @@ -11,16 +11,18 @@ from napalm.junos import junos -@pytest.fixture(scope='class') +@pytest.fixture(scope="class") def set_device_parameters(request): """Set up the class.""" + def fin(): request.cls.device.close() + request.addfinalizer(fin) request.cls.driver = junos.JunOSDriver request.cls.patched_driver = PatchedJunOSDriver - request.cls.vendor = 'junos' + request.cls.vendor = "junos" parent_conftest.set_device_parameters(request) @@ -33,46 +35,45 @@ class PatchedJunOSDriver(junos.JunOSDriver): """Patched JunOS Driver.""" def __init__(self, hostname, username, password, timeout=60, optional_args=None): - optional_args['config_lock'] = False # to not try lock on open() - super(self.__class__, self).__init__(hostname, username, password, timeout, optional_args) + optional_args["config_lock"] = False # to not try lock on open() + super(self.__class__, self).__init__( + hostname, username, password, timeout, optional_args + ) - self.patched_attrs = ['device'] + self.patched_attrs = ["device"] self.device = FakeJunOSDevice() def is_alive(self): - return { - 'is_alive': True # always alive during the tests... - } + return {"is_alive": True} # always alive during the tests... class FakeJunOSDevice(BaseTestDouble): - def __init__(self): self.rpc = FakeRPCObject(self) self._conn = FakeConnection(self.rpc) - self.alternative_facts_file = 'facts.yml' + self.alternative_facts_file = "facts.yml" self.ON_JUNOS = True # necessary for fake devices self.default_facts = { - 'domain': None, - 'hostname': 'vsrx', - 'ifd_style': 'CLASSIC', - '2RE': False, - 'serialnumber': 'beb914a9cca3', - 'fqdn': 'vsrx', - 'virtual': True, - 'switch_style': 'NONE', - 'version': '12.1X47-D20.7', - 'HOME': '/cf/var/home/vagrant', - 'srx_cluster': False, - 'model': 'FIREFLY-PERIMETER', - 'RE0': { - 'status': 'Testing', - 'last_reboot_reason': 'Router rebooted after a normal shutdown.', - 'model': 'FIREFLY-PERIMETER RE', - 'up_time': '1 hour, 13 minutes, 37 seconds' + "domain": None, + "hostname": "vsrx", + "ifd_style": "CLASSIC", + "2RE": False, + "serialnumber": "beb914a9cca3", + "fqdn": "vsrx", + "virtual": True, + "switch_style": "NONE", + "version": "12.1X47-D20.7", + "HOME": "/cf/var/home/vagrant", + "srx_cluster": False, + "model": "FIREFLY-PERIMETER", + "RE0": { + "status": "Testing", + "last_reboot_reason": "Router rebooted after a normal shutdown.", + "model": "FIREFLY-PERIMETER RE", + "up_time": "1 hour, 13 minutes, 37 seconds", }, - 'vc_capable': False, - 'personality': 'SRX_BRANCH' + "vc_capable": False, + "personality": "SRX_BRANCH", } self._uptime = 4380 @@ -85,7 +86,7 @@ def facts(self): except IOError: self._facts = self.default_facts return self._facts - with open(alt_facts_filepath, 'r') as alt_facts: + with open(alt_facts_filepath, "r") as alt_facts: self._facts.update(yaml.safe_load(alt_facts)) return self._facts @@ -102,8 +103,8 @@ def close(self): def bind(*args, **kvargs): pass - def cli(self, command=''): - filename = '{safe_command}.txt'.format(safe_command=self.sanitize_text(command)) + def cli(self, command=""): + filename = "{safe_command}.txt".format(safe_command=self.sanitize_text(command)) fielpath = self.find_file(filename) return self.read_txt_file(fielpath) @@ -122,11 +123,9 @@ def __getattr__(self, item): return self def response(self, **rpc_args): - instance = rpc_args.pop('instance', '') + instance = rpc_args.pop("instance", "") - filename = '{item}{instance}.xml'.format( - item=self.item, instance=instance - ) + filename = "{item}{instance}.xml".format(item=self.item, instance=instance) filepathpath = self._device.find_file(filename) xml_string = self._device.read_txt_file(filepathpath) @@ -138,16 +137,16 @@ def get_config(self, get_cmd=None, filter_xml=None, options={}): # E.g.: if get_cmd is not None: - get_cmd_str = lxml.etree.tostring(get_cmd).decode('utf-8') + get_cmd_str = lxml.etree.tostring(get_cmd).decode("utf-8") filename = self._device.sanitize_text(get_cmd_str) # no get_cmd means it should mock the eznc get_config else: - filename = 'get_config__' + '__'.join( - ['{0}_{1}'.format(k, v) for k, v in sorted(options.items())] + filename = "get_config__" + "__".join( + ["{0}_{1}".format(k, v) for k, v in sorted(options.items())] ) - filename = '{filename}.xml'.format(filename=filename[0:150]) + filename = "{filename}.xml".format(filename=filename[0:150]) filepathpath = self._device.find_file(filename) xml_string = self._device.read_txt_file(filepathpath) @@ -169,6 +168,7 @@ def response(self, non_std_command=None): class RPCReply: def __init__(self, reply): self._NCElement__doc = reply + rpc_reply = RPCReply(self._rpc.get_config(get_cmd=non_std_command)) return rpc_reply @@ -176,7 +176,6 @@ def __init__(self, reply): class FakeConnection: - def __init__(self, rpc): self.rpc = FakeConnectionRPCObject(rpc) self._session = FakeSession() diff --git a/test/nxos/TestDriver.py b/test/nxos/TestDriver.py index 1f97ea4cc..d1bf711e5 100644 --- a/test/nxos/TestDriver.py +++ b/test/nxos/TestDriver.py @@ -19,16 +19,15 @@ class TestConfigNXOSDriver(unittest.TestCase, TestConfigNetworkDriver): - @classmethod def setUpClass(cls): - hostname = '127.0.0.1' - username = 'vagrant' - password = 'vagrant' - cls.vendor = 'nxos' + hostname = "127.0.0.1" + username = "vagrant" + password = "vagrant" + cls.vendor = "nxos" cls.device = nxos.NXOSDriver(hostname, username, password) cls.device.open() - cls.device.load_replace_candidate(filename='%s/initial.conf' % cls.vendor) + cls.device.load_replace_candidate(filename="%s/initial.conf" % cls.vendor) cls.device.commit_config() diff --git a/test/nxos/conftest.py b/test/nxos/conftest.py index c2f95b292..cd5f051a6 100644 --- a/test/nxos/conftest.py +++ b/test/nxos/conftest.py @@ -9,16 +9,18 @@ from napalm.nxos import nxos -@pytest.fixture(scope='class') +@pytest.fixture(scope="class") def set_device_parameters(request): """Set up the class.""" + def fin(): request.cls.device.close() + request.addfinalizer(fin) request.cls.driver = nxos.NXOSDriver request.cls.patched_driver = PatchedNXOSDriver - request.cls.vendor = 'nxos' + request.cls.vendor = "nxos" parent_conftest.set_device_parameters(request) @@ -33,16 +35,14 @@ class PatchedNXOSDriver(nxos.NXOSDriver): def __init__(self, hostname, username, password, timeout=60, optional_args=None): super().__init__(hostname, username, password, timeout, optional_args) - self.patched_attrs = ['device'] + self.patched_attrs = ["device"] self.device = FakeNXOSDevice() def disconnect(self): pass def is_alive(self): - return { - 'is_alive': True # In testing everything works.. - } + return {"is_alive": True} # In testing everything works.. def open(self): pass @@ -50,6 +50,7 @@ def open(self): class FakeNXOSDevice(BaseTestDouble): """NXOS device test double.""" + def __init__(self): super().__init__() @@ -66,7 +67,7 @@ def _send_command_list(self, commands): def show(self, command, raw_text=False): """Fake show.""" - filename = '{}.json'.format(command.replace(' ', '_')) + filename = "{}.json".format(command.replace(" ", "_")) full_path = self.find_file(filename) if raw_text: diff --git a/test/nxos/test_getters.py b/test/nxos/test_getters.py index 21af8bae3..48a993d94 100644 --- a/test/nxos/test_getters.py +++ b/test/nxos/test_getters.py @@ -15,7 +15,8 @@ def mock_time(): @pytest.mark.usefixtures("set_device_parameters") class TestGetter(BaseTestGetters): """Test get_* methods.""" - @patch('time.time', mock_time) + + @patch("time.time", mock_time) @wrap_test_cases def test_get_interfaces(self, test_case): """Test get_interfaces.""" diff --git a/test/nxos_ssh/TestDriver.py b/test/nxos_ssh/TestDriver.py index 1f97ea4cc..d1bf711e5 100644 --- a/test/nxos_ssh/TestDriver.py +++ b/test/nxos_ssh/TestDriver.py @@ -19,16 +19,15 @@ class TestConfigNXOSDriver(unittest.TestCase, TestConfigNetworkDriver): - @classmethod def setUpClass(cls): - hostname = '127.0.0.1' - username = 'vagrant' - password = 'vagrant' - cls.vendor = 'nxos' + hostname = "127.0.0.1" + username = "vagrant" + password = "vagrant" + cls.vendor = "nxos" cls.device = nxos.NXOSDriver(hostname, username, password) cls.device.open() - cls.device.load_replace_candidate(filename='%s/initial.conf' % cls.vendor) + cls.device.load_replace_candidate(filename="%s/initial.conf" % cls.vendor) cls.device.commit_config() diff --git a/test/nxos_ssh/conftest.py b/test/nxos_ssh/conftest.py index 8a55bada6..0b7bafb33 100644 --- a/test/nxos_ssh/conftest.py +++ b/test/nxos_ssh/conftest.py @@ -9,16 +9,18 @@ from napalm.nxos_ssh import nxos_ssh -@pytest.fixture(scope='class') +@pytest.fixture(scope="class") def set_device_parameters(request): """Set up the class.""" + def fin(): request.cls.device.close() + request.addfinalizer(fin) request.cls.driver = nxos_ssh.NXOSSSHDriver request.cls.patched_driver = PatchedNXOSSSHDriver - request.cls.vendor = 'nxos_ssh' + request.cls.vendor = "nxos_ssh" parent_conftest.set_device_parameters(request) @@ -29,18 +31,17 @@ def pytest_generate_tests(metafunc): class PatchedNXOSSSHDriver(nxos_ssh.NXOSSSHDriver): """Patched NXOS Driver.""" + def __init__(self, hostname, username, password, timeout=60, optional_args=None): super().__init__(hostname, username, password, timeout, optional_args) - self.patched_attrs = ['device'] + self.patched_attrs = ["device"] self.device = FakeNXOSSSHDevice() def disconnect(self): pass def is_alive(self): - return { - 'is_alive': True # In testing everything works.. - } + return {"is_alive": True} # In testing everything works.. def open(self): pass @@ -48,8 +49,9 @@ def open(self): class FakeNXOSSSHDevice(BaseTestDouble): """NXOS device test double.""" + def send_command(self, command, **kwargs): - filename = '{}.txt'.format(self.sanitize_text(command)) + filename = "{}.txt".format(self.sanitize_text(command)) full_path = self.find_file(filename) result = self.read_txt_file(full_path) return py23_compat.text_type(result) diff --git a/test/nxos_ssh/test_getters.py b/test/nxos_ssh/test_getters.py index 21af8bae3..48a993d94 100644 --- a/test/nxos_ssh/test_getters.py +++ b/test/nxos_ssh/test_getters.py @@ -15,7 +15,8 @@ def mock_time(): @pytest.mark.usefixtures("set_device_parameters") class TestGetter(BaseTestGetters): """Test get_* methods.""" - @patch('time.time', mock_time) + + @patch("time.time", mock_time) @wrap_test_cases def test_get_interfaces(self, test_case): """Test get_interfaces.""" diff --git a/vagrant/provision.py b/vagrant/provision.py index 46ada3bc5..03a7f1aaa 100755 --- a/vagrant/provision.py +++ b/vagrant/provision.py @@ -15,14 +15,18 @@ def print_info_message(): - print("BOX is no longer reachable with vagrant up. Use ssh (check the IP in the initial conf)") + print( + "BOX is no longer reachable with vagrant up. Use ssh (check the IP in the initial conf)" + ) print("Don't forget to change the network type of the first NIC of the box.") def provision_iosxr(port, username, password): - device = IOSXR(hostname='127.0.0.1', username=username, password=password, port=port) + device = IOSXR( + hostname="127.0.0.1", username=username, password=password, port=port + ) device.open() - device.load_candidate_config(filename='../iosxr/initial.conf') + device.load_candidate_config(filename="../iosxr/initial.conf") try: device.commit_replace_config() @@ -33,30 +37,30 @@ def provision_iosxr(port, username, password): def provision_eos(port, username, password): connection = pyeapi.client.connect( - transport='https', - host='localhost', - username='vagrant', - password='vagrant', - port=port + transport="https", + host="localhost", + username="vagrant", + password="vagrant", + port=port, ) device = pyeapi.client.Node(connection) commands = list() - commands.append('configure session') - commands.append('rollback clean-config') + commands.append("configure session") + commands.append("rollback clean-config") - with open('../eos/initial.conf', 'r') as f: + with open("../eos/initial.conf", "r") as f: lines = f.readlines() for line in lines: line = line.strip() - if line == '': + if line == "": continue - if line.startswith('!'): + if line.startswith("!"): continue commands.append(line) - commands[-1] = 'commit' + commands[-1] = "commit" try: device.run_commands(commands) @@ -66,14 +70,14 @@ def provision_eos(port, username, password): def provision_junos(port, username, password): - device = Device('127.0.0.1', user=username, port=port) + device = Device("127.0.0.1", user=username, port=port) device.open() device.bind(cu=Config) - with open('../junos/initial.conf', 'r') as f: + with open("../junos/initial.conf", "r") as f: configuration = f.read() - device.cu.load(configuration, format='text', overwrite=True) + device.cu.load(configuration, format="text", overwrite=True) try: device.cu.commit() @@ -89,9 +93,9 @@ def provision_junos(port, username, password): username = sys.argv[3] password = sys.argv[4] - if os == 'iosxr': + if os == "iosxr": provision_iosxr(port, username, password) - elif os == 'eos': + elif os == "eos": provision_eos(port, username, password) - elif os == 'junos': + elif os == "junos": provision_junos(port, username, password) From e6b9fb52e4af74f48ae614df49cd615f48b6f1fc Mon Sep 17 00:00:00 2001 From: Kirk Byers Date: Sun, 16 Dec 2018 20:56:23 -0800 Subject: [PATCH 2/3] Update tox to use black --- .travis.yml | 11 +++++++---- tox.ini | 20 ++++++++++++++++++-- 2 files changed, 25 insertions(+), 6 deletions(-) diff --git a/.travis.yml b/.travis.yml index 358963511..92f067524 100644 --- a/.travis.yml +++ b/.travis.yml @@ -2,17 +2,20 @@ language: python python: - 2.7 - - 3.4 - 3.5 - 3.6 matrix: include: - - python: 3.6 - env: TOXENV=sphinx + - python: 3.6 + env: TOXENV=pylama + - python: 3.6 + env: TOXENV=black + - python: 3.6 + env: TOXENV=sphinx install: - - pip install tox==3.0.0 tox-travis coveralls + - pip install tox==3.5.3 tox-travis coveralls deploy: provider: pypi diff --git a/tox.ini b/tox.ini index 83f16b50a..f9d23fc45 100644 --- a/tox.ini +++ b/tox.ini @@ -1,13 +1,30 @@ [tox] -envlist = py27,py34,py35,py36 +envlist = py27,py35,py36 [testenv] deps = -rrequirements.txt -rrequirements-dev.txt +passenv = * + commands = py.test --cov=napalm --cov-report term-missing -vs --pylama {posargs} +[testenv:black] +deps = black==18.9b0 + +basepython = python3.6 +commands = + black --check . + +[testenv:pylama] +deps = + -rrequirements-dev.txt + +basepython = python3.6 +commands = + pylama . + [testenv:sphinx] deps = -rdocs/requirements.txt @@ -19,4 +36,3 @@ commands = whitelist_externals = make - From 618de51121367cc72c7ab96a8a7d3f79a5c980bc Mon Sep 17 00:00:00 2001 From: Kirk Byers Date: Sun, 16 Dec 2018 21:17:44 -0800 Subject: [PATCH 3/3] Update setup.cfg and minor lint fix --- napalm/base/test/helpers.py | 4 ++-- setup.cfg | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/napalm/base/test/helpers.py b/napalm/base/test/helpers.py index 45184cb8f..197eea074 100644 --- a/napalm/base/test/helpers.py +++ b/napalm/base/test/helpers.py @@ -23,8 +23,8 @@ def test_model(model, data): if py23_compat.PY2 and isinstance(data[key], long): # noqa # Properly handle PY2 long correct_class = ( - isinstance(data[key], long) - and isinstance(1, instance_class) # noqa + isinstance(data[key], long) # noqa + and isinstance(1, instance_class) and correct_class ) else: diff --git a/setup.cfg b/setup.cfg index a268dc590..d89616ddc 100644 --- a/setup.cfg +++ b/setup.cfg @@ -6,7 +6,7 @@ license_file = LICENSE [pylama] linters = mccabe,pep8,pyflakes -ignore = D203,C901 +ignore = D203,C901,E203 skip = .tox/* [pylama:pep8]