From 4da03285a661e8dddeb5eea63e0b410d1d410992 Mon Sep 17 00:00:00 2001 From: Steffen Schumacher Date: Wed, 9 Sep 2020 14:05:49 +0200 Subject: [PATCH] Port to py3.6 fixes #1252 Move to newest or replacement libs Fix syntax to 3.6+ Overhaul strings to optimize performance and readability Update build stuff to 3.6 + move docker img to 18.04/bionic --- Dockerfile.nipapd | 29 +- ca_certs/README.rst | 8 + nipap/MANIFEST.in | 2 + nipap/Makefile | 4 +- nipap/debian/control | 6 +- nipap/entrypoint.sh | 5 +- nipap/nipap-passwd | 78 +-- nipap/nipap/authlib.py | 80 ++- nipap/nipap/backend.py | 964 +++++++++++++---------------------- nipap/nipap/daemon.py | 309 +++++------ nipap/nipap/errors.py | 1 - nipap/nipap/nipapconfig.py | 14 +- nipap/nipap/smart_parsing.py | 576 ++++++++++----------- nipap/nipap/xmlrpc.py | 369 ++++++-------- nipap/nipapd | 83 +-- nipap/requirements.txt | 11 +- nipap/setup.py | 84 +-- nipap/wait-for-it.sh | 178 +++++++ nipap/xml-test.py | 162 +++--- nipap/xmlbench.py | 111 ++-- tests/nipapbase.py | 1 + 21 files changed, 1470 insertions(+), 1605 deletions(-) create mode 100644 ca_certs/README.rst create mode 100755 nipap/wait-for-it.sh diff --git a/Dockerfile.nipapd b/Dockerfile.nipapd index 14d7fe265..0b7a91fe1 100644 --- a/Dockerfile.nipapd +++ b/Dockerfile.nipapd @@ -32,7 +32,7 @@ # via a volume. # -FROM ubuntu:xenial +FROM ubuntu:bionic MAINTAINER Kristian Larsson ENV DEBIAN_FRONTEND=noninteractive @@ -44,19 +44,30 @@ RUN apt-get update -qy && apt-get upgrade -qy \ libpq-dev \ libsqlite3-dev \ postgresql-client \ - python \ - python-all \ - python-docutils \ - python-pip \ - python-dev \ - && pip --no-input install envtpl \ + software-properties-common \ + python3 \ + python3-all \ + python3-pip \ + python3-dev \ + libsasl2-dev \ + libldap2-dev \ + libssl-dev \ && rm -rf /var/lib/apt/lists/* +# Install any additional CA certs from ca_certs folder required by corp proxies etc +RUN mkdir -p /usr/share/ca-certificates/extra +COPY ca_certs/*.crt /usr/share/ca-certificates/extra/ +RUN ls /usr/share/ca-certificates/extra/*.crt | sed 's/\/usr\/share\/ca-certificates\///g' >> /etc/ca-certificates.conf +RUN update-ca-certificates +RUN pip3 install --upgrade pip +RUN pip3 config set global.cert /etc/ssl/certs/ca-certificates.crt + COPY nipap /nipap WORKDIR /nipap -RUN pip --no-input install -r requirements.txt \ - && python setup.py install +RUN pip3 install --no-input envtpl +RUN pip3 --no-input install -r requirements.txt \ + && python3 setup.py install EXPOSE 1337 ENV LISTEN_ADDRESS=0.0.0.0 LISTEN_PORT=1337 SYSLOG=false DB_PORT=5432 DB_SSLMODE=disable diff --git a/ca_certs/README.rst b/ca_certs/README.rst new file mode 100644 index 000000000..94d3a7549 --- /dev/null +++ b/ca_certs/README.rst @@ -0,0 +1,8 @@ +Custom CA Certs for docker containers +===================================== +If you need to include specific CA certs which you must trust, place them here +in PEM format, named \*.crt. + +This may be required if you need to build the container from inside a network +which uses a proxy or similar, or other dependencies towards internal services +are included in your containers. \ No newline at end of file diff --git a/nipap/MANIFEST.in b/nipap/MANIFEST.in index 796565ce8..9720f598a 100644 --- a/nipap/MANIFEST.in +++ b/nipap/MANIFEST.in @@ -1 +1,3 @@ include README.rst MANIFEST.in +include *.man.rst +include requirements.txt diff --git a/nipap/Makefile b/nipap/Makefile index 97c7b65e5..b3584fa88 100644 --- a/nipap/Makefile +++ b/nipap/Makefile @@ -1,7 +1,7 @@ # $Id: Makefile,v 1.6 2011/04/18 17:14:00 lukagarb Exp $ # -PYTHON=`which python2` +PYTHON=`which python3` DESTDIR=/ BUILDIR=$(CURDIR)/debian/python-nipap-build PROJECT=nipap @@ -38,7 +38,7 @@ test: check: upload: - python setup.py sdist upload + python3 setup.py sdist upload clean: $(PYTHON) setup.py clean diff --git a/nipap/debian/control b/nipap/debian/control index 36ae12f89..72dd0ddc4 100644 --- a/nipap/debian/control +++ b/nipap/debian/control @@ -2,13 +2,13 @@ Source: nipap Maintainer: Lukas Garberg Section: python Priority: optional -Build-Depends: python (>= 2.7), debhelper (>= 7.4.3) +Build-Depends: python (>= 3.6), debhelper (>= 7.4.3) Standards-Version: 4.4.0 Package: nipap-common Architecture: all -Depends: python (>= 2.7), ${misc:Depends}, python-pysqlite2, python-ipy +Depends: python (>= 3.6), ${misc:Depends}, python-pysqlite2, python-ipy Suggests: python-ldap Description: Neat IP Address Planner The Neat IP Address Planner, NIPAP, is a system built for efficiently managing @@ -17,7 +17,7 @@ Description: Neat IP Address Planner Package: nipapd Architecture: all -Depends: debconf, nipap-common, python (>= 2.7), ${misc:Depends}, python-psycopg2, python-flask, python-flask-xml-rpc, python-flask-restful, python-flask-compress, python-tornado, python-parsedatetime, python-tz, python-dateutil, python-psutil, python-pyparsing, python-jwt, python-requests +Depends: debconf, nipap-common, python (>= 3.6), ${misc:Depends}, python-psycopg2, python-flask, python-flask-xml-rpc, python-flask-restful, python-flask-compress, python-tornado, python-parsedatetime, python-tz, python-dateutil, python-psutil, python-pyparsing, python-jwt, python-requests Description: Neat IP Address Planner XML-RPC daemon The Neat IP Address Planner, NIPAP, is a system built for efficiently managing large amounts of IP addresses. This is the XML-RPC daemon. diff --git a/nipap/entrypoint.sh b/nipap/entrypoint.sh index bae261919..fdd4b4c24 100755 --- a/nipap/entrypoint.sh +++ b/nipap/entrypoint.sh @@ -1,11 +1,12 @@ #!/bin/sh -envtpl --allow-missing /nipap/nipap.conf.dist -o /etc/nipap/nipap.conf +envtpl --allow-missing --keep-template /nipap/nipap.conf.dist -o /etc/nipap/nipap.conf +/bin/bash /nipap/wait-for-it.sh -t 60 $DB_HOST:$DB_PORT -- sleep 5 /usr/sbin/nipap-passwd create-database if [ -n "$NIPAP_USERNAME" -a -n "$NIPAP_PASSWORD" ]; then echo "Creating user '$NIPAP_USERNAME'" /usr/sbin/nipap-passwd add --username $NIPAP_USERNAME --name "NIPAP user" --password $NIPAP_PASSWORD fi - +echo "Starting nipap daemon.." exec /usr/sbin/nipapd --debug --foreground --auto-install-db --auto-upgrade-db --no-pid-file diff --git a/nipap/nipap-passwd b/nipap/nipap-passwd index ab70256f2..2be2cd529 100755 --- a/nipap/nipap-passwd +++ b/nipap/nipap-passwd @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # # Manages NIPAP LocalAuth authentication database # @@ -16,10 +16,11 @@ if __name__ == '__main__': # parse arguments parser = argparse.ArgumentParser(description='NIPAP User configuration') parser.add_argument('action', - metavar='{list, add, delete, modify, test-user, latest-version, create-database, upgrade-database}', - nargs='?', type=str, - choices=['list', 'add', 'delete', 'modify', 'test-user', 'latest-version', 'create-database', 'upgrade-database'], - help='define an action to execute') + metavar='{list, add, delete, modify, test-user, latest-version, create-database, upgrade-database}', + nargs='?', type=str, + choices=['list', 'add', 'delete', 'modify', 'test-user', 'latest-version', 'create-database', + 'upgrade-database'], + help='define an action to execute') parser.add_argument('-u', '--username', dest='user', type=str, help='username') parser.add_argument('-p', '--password', dest='password', type=str, @@ -38,7 +39,7 @@ if __name__ == '__main__': help="database file [default: read from config]") parser.add_argument('-c', '--config', dest='config', default='/etc/nipap/nipap.conf', type=str, help= - 'read configuration from CONFIG [default:/etc/nipap/nipap.conf]') + 'read configuration from CONFIG [default:/etc/nipap/nipap.conf]') parser.add_argument('--version', action='version', version='nipap-passwd version %s' % nipap.__version__) args = parser.parse_args() @@ -52,8 +53,8 @@ if __name__ == '__main__': try: cfg = NipapConfig(args.config) - except NipapConfigError, exc: - print >> sys.stderr, "The specified configuration file ('" + args.config + "') does not exist" + except NipapConfigError as exc: + print("The specified configuration file ('" + args.config + "') does not exist", file=sys.stderr) sys.exit(1) if args.db_file: @@ -63,65 +64,69 @@ if __name__ == '__main__': if args.action == 'list': # print a nicely formatted list of users - header = "%-20s %-25s %-7s %-7s" % ('username', 'real name', 'trusted', 'read only') - print "%s\n%s" % (header,''.join('-' for x in range(len(header)))) + header = "{:<20} {:<25} {:<7} {:<7}".format('username', 'real name', 'trusted', 'read only') + print("{}\n{}".format(header, ''.join('-' for x in range(len(header))))) for u in a.list_users(): if not args.user or args.user == u['username']: - print "%-20s %-25s %-7d %-7d" % (u['username'], u['full_name'], int(u['trusted']), int(u['readonly'])) + print("%-20s %-25s %-7d %-7d" % (u['username'], u['full_name'], int(u['trusted']), int(u['readonly']))) elif args.action == 'test-user': if not args.user: - print "Please specify user with --user" + print("Please specify user with --user") sys.exit(1) if not args.password: - print "Please specify password with --password" + print("Please specify password with --password") sys.exit(1) af = nipap.authlib.AuthFactory() auth = af.get_auth(args.user, args.password, "nipap", {}) if not auth.authenticate(): - print "The username or password seems to be wrong" + print("The username or password seems to be wrong") sys.exit(2) - print "Username and password seem to be correct" + print("Username and password seem to be correct") sys.exit(0) - + elif args.action == 'add': if not args.user: - print "Please specify user with --user" + print("Please specify user with --user") sys.exit(1) if not args.password: - print "Please specify password with --password" + print("Please specify password with --password") sys.exit(1) if not args.name: - print "Please specify name with --name" + print("Please specify name with --name") sys.exit(1) try: a.add_user(args.user, args.password, args.name, args.trusted, args.readonly) - print "Added user %s to database %s" % (args.user, cfg.get('auth.backends.local','db_path')) + print("Added user {} to database {}".format(args.user, cfg.get('auth.backends.local', 'db_path'))) except nipap.authlib.AuthError as exc: if str(exc) == 'attempt to write a readonly database': - print "You do not have sufficient rights to write to database: %s" % (cfg.get('auth.backends.local','db_path')) + print("You do not have sufficient rights to write to database: %s" % ( + cfg.get('auth.backends.local', 'db_path'))) elif str(exc) == 'column username is not unique': - print "Username '%s' already exists in the database: %s " % (args.user, cfg.get('auth.backends.local','db_path')) + print("Username '{}' already exists in the database: {} ".format(args.user, + cfg.get('auth.backends.local', + 'db_path'))) else: - print exc + print(exc) elif args.action == 'delete': try: if not args.user: - print "Please specify user with --user" + print("Please specify user with --user") sys.exit(1) a.remove_user(args.user) - print "User %s deleted from database %s" % (args.user, cfg.get('auth.backends.local', 'db_path')) + print("User {} deleted from database {}".format(args.user, cfg.get('auth.backends.local', 'db_path'))) except nipap.authlib.AuthError as exc: if str(exc) == 'attempt to write a readonly database': - print "You do not have sufficient rights to write to database: %s" % (cfg.get('auth.backends.local','db_path')) + print("You do not have sufficient rights to write to database: %s" % ( + cfg.get('auth.backends.local', 'db_path'))) else: - print exc + print(exc) elif args.action == 'modify': if not args.user: - print "Please specify user with --user" + print("Please specify user with --user") sys.exit(1) data = {} @@ -135,16 +140,17 @@ if __name__ == '__main__': data['readonly'] = args.readonly if len(data) == 0: - print "Please specify value to change" + print("Please specify value to change") sys.exit(1) try: a.modify_user(args.user, data) except nipap.authlib.AuthError as exc: if str(exc) == 'attempt to write a readonly database': - print "You do not have sufficient rights to write to database: %s" % (cfg.get('auth.backends.local','db_path')) + print("You do not have sufficient rights to write to database: %s" % ( + cfg.get('auth.backends.local', 'db_path'))) else: - print exc + print(exc) elif args.action == 'upgrade-database': a._upgrade_database() @@ -158,13 +164,13 @@ if __name__ == '__main__': try: latest = a._latest_db_version() if not latest: - print >> sys.stderr, "It seems your Sqlite database for local auth is out of date" - print >> sys.stderr, "Please run 'nipap-passwd upgrade-database' to upgrade your database." + print("It seems your Sqlite database for local auth is out of date", file=sys.stderr) + print("Please run 'nipap-passwd upgrade-database' to upgrade your database.", file=sys.stderr) sys.exit(2) - except nipap.authlib.AuthSqliteError, e: - print >> sys.stderr, "Error checking version of Sqlite database for local auth: %s" % e + except nipap.authlib.AuthSqliteError as e: + print("Error checking version of Sqlite database for local auth: %s" % e, file=sys.stderr) sys.exit(1) - print "Sqlite database for local auth is of the latest version." + print("Sqlite database for local auth is of the latest version.") sys.exit(0) else: diff --git a/nipap/nipap/authlib.py b/nipap/nipap/authlib.py index 0050e40fe..a3dc61c1d 100644 --- a/nipap/nipap/authlib.py +++ b/nipap/nipap/authlib.py @@ -69,7 +69,7 @@ import hashlib import traceback -from nipapconfig import NipapConfig +from .nipapconfig import NipapConfig # Used by auth modules import sqlite3 @@ -120,7 +120,7 @@ def _init_backends(self): auth_backend = section_components[1] self._backends[auth_backend] = eval(self._config.get(section, 'type')) - self._logger.debug("Registered auth backends %s" % str(self._backends)) + self._logger.debug("Registered auth backends %s", str(self._backends)) def reload(self): """ Reload AuthFactory. @@ -157,7 +157,7 @@ def get_auth_bearer_token(self, bearer_token, authoritative_source, auth_options def get_auth(self, username, password, authoritative_source, auth_options=None): """ Returns an authentication object. - + Examines the auth backend given after the '@' in the username and returns a suitable instance of a subclass of the BaseAuth class. @@ -176,7 +176,7 @@ def get_auth(self, username, password, authoritative_source, auth_options=None): auth_options = {} # validate arguments - if (authoritative_source is None): + if authoritative_source is None: raise AuthError("Missing authoritative_source.") # remove invalid cache entries @@ -185,7 +185,7 @@ def get_auth(self, username, password, authoritative_source, auth_options=None): if self._auth_cache[key]['valid_until'] < datetime.utcnow(): rem.append(key) for key in rem: - del (self._auth_cache[key]) + del self._auth_cache[key] user_authbackend = username.rsplit('@', 1) @@ -194,7 +194,7 @@ def get_auth(self, username, password, authoritative_source, auth_options=None): backend = "" if len(user_authbackend) == 1: backend = self._config.get('auth', 'default_backend') - self._logger.debug("Using default auth backend %s" % backend) + self._logger.debug("Using default auth backend %s", backend) else: backend = user_authbackend[1] @@ -202,20 +202,19 @@ def get_auth(self, username, password, authoritative_source, auth_options=None): auth_str = (str(username) + str(password) + str(authoritative_source) + str(auth_options)) if auth_str in self._auth_cache: - self._logger.debug('found cached auth object for user %s' % username) + self._logger.debug('found cached auth object for user %s', username) return self._auth_cache[auth_str]['auth_object'] # Create auth object try: auth = self._backends[backend](backend, user_authbackend[0], password, authoritative_source, auth_options) except KeyError: - raise AuthError("Invalid auth backend '%s' specified" % - str(backend)) + raise AuthError("Invalid auth backend '{}' specified".format(backend)) # save auth object to cache self._auth_cache[auth_str] = { 'valid_until': datetime.utcnow() + timedelta(seconds=self._config.getint('auth', 'auth_cache_timeout')), - 'auth_object': auth + 'auth_object': auth, } return auth @@ -223,7 +222,7 @@ def get_auth(self, username, password, authoritative_source, auth_options=None): class BaseAuth: """ A base authentication class. - + All authentication modules should extend this class. """ @@ -512,10 +511,13 @@ def authenticate(self): self.password) except ldap.SERVER_DOWN as exc: raise AuthError('Could not connect to LDAP server') - except (ldap.INVALID_CREDENTIALS, ldap.INVALID_DN_SYNTAX, - ldap.UNWILLING_TO_PERFORM) as exc: + except ( + ldap.INVALID_CREDENTIALS, + ldap.INVALID_DN_SYNTAX, + ldap.UNWILLING_TO_PERFORM, + ) as exc: # Auth failed - self._logger.debug('erroneous password for user %s' % self.username) + self._logger.debug('erroneous password for user %s', self.username) self._authenticated = False return self._authenticated @@ -532,9 +534,12 @@ def authenticate(self): else: search_conn = self._ldap_conn - res = search_conn.search_s(self._ldap_basedn, ldap.SCOPE_SUBTREE, - self._ldap_search.format(ldap.dn.escape_dn_chars(self.username)), - ['cn', 'memberOf']) + res = search_conn.search_s( + self._ldap_basedn, + ldap.SCOPE_SUBTREE, + self._ldap_search.format(ldap.dn.escape_dn_chars(self.username)), + ['cn', 'memberOf'], + ) if res[0][1]['cn'][0] is not None: self.full_name = res[0][1]['cn'][0].decode('utf-8') # check for ro_group membership if ro_group is configured @@ -569,11 +574,8 @@ def authenticate(self): self._authenticated = True - self._logger.debug('successfully authenticated as ' + - '%s, username %s, full_name %s, readonly %s' % ( - self.authenticated_as, - self.username, self.full_name, - str(self.readonly))) + self._logger.debug('successfully authenticated as %s, username %s, full_name %s, readonly %s', + self.authenticated_as, self.username, self.full_name, str(self.readonly)) return self._authenticated @@ -619,12 +621,13 @@ def __init__(self, name, username, password, authoritative_source, auth_options= try: self._db_conn = sqlite3.connect( self._cfg.get('auth.backends.' + self.auth_backend, 'db_path'), - check_same_thread=False) + check_same_thread=False + ) self._db_conn.row_factory = sqlite3.Row self._db_curs = self._db_conn.cursor() except sqlite3.Error as exc: - self._logger.error('Could not open user database: %s' % str(exc)) + self._logger.error('Could not open user database: %s', str(exc)) raise AuthError(str(exc)) def _latest_db_version(self): @@ -639,9 +642,8 @@ def _latest_db_version(self): if len(self._db_curs.fetchall()) < 1: raise AuthSqliteError("No 'user' table.") - for column in ('username', 'pwd_salt', 'pwd_hash', 'full_name', - 'trusted', 'readonly'): - sql = "SELECT %s FROM user" % column + for column in ('username', 'pwd_salt', 'pwd_hash', 'full_name', 'trusted', 'readonly'): + sql = "SELECT " + column + " FROM user" try: self._db_curs.execute(sql) except: @@ -693,19 +695,19 @@ def authenticate(self): if self._authenticated is not None: return self._authenticated - self._logger.debug('Trying to authenticate as user \'%s\'' % self.username) + self._logger.debug('Trying to authenticate as user \'%s\'', self.username) user = self.get_user(self.username) # Was user found? if user is None: - self._logger.debug('unknown user %s' % self.username) + self._logger.debug('unknown user %s', self.username) self._authenticated = False return self._authenticated # verify password if self._gen_hash(self.password, user['pwd_salt']) != user['pwd_hash']: # Auth failed - self._logger.debug('erroneous password for user %s' % self.username) + self._logger.debug('erroneous password for user %s', self.username) self._authenticated = False return self._authenticated @@ -734,11 +736,8 @@ def authenticate(self): else: self.full_name = user['full_name'] - self._logger.debug( - 'successfully authenticated as' + - ' %s, username %s, full_name %s, readonly %s' % ( - self.authenticated_as, self.username, self.full_name, - str(self.readonly))) + self._logger.debug('successfully authenticated as %s, username %s, full_name %s, readonly %s', + self.authenticated_as, self.username, self.full_name, str(self.readonly)) return self._authenticated def get_user(self, username): @@ -777,8 +776,7 @@ def add_user(self, username, password, full_name=None, trusted=False, readonly=F (?, ?, ?, ?, ?, ?)''' try: self._db_curs.execute(sql, (username, salt, - self._gen_hash(password, salt), - full_name, trusted or False, + self._gen_hash(password, salt), full_name, trusted or False, readonly or False)) self._db_conn.commit() except (sqlite3.OperationalError, sqlite3.IntegrityError) as error: @@ -810,10 +808,10 @@ def modify_user(self, username, data): char_set = string.ascii_letters + string.digits data['pwd_salt'] = ''.join(random.choice(char_set) for x in range(8)) data['pwd_hash'] = self._gen_hash(data['password'], data['pwd_salt']) - del (data['password']) + del data['password'] sql = "UPDATE user SET " - sql += ', '.join("%s = ?" % k for k in sorted(data)) + sql += ', '.join(k + " = ?" for k in sorted(data)) sql += " WHERE username = ?" vals = [] @@ -843,8 +841,8 @@ def _gen_hash(self, password, salt): # generate hash h = hashlib.sha1() - h.update(salt) - h.update(password) + h.update(str.encode(salt)) # encode to bytes + h.update(str.encode(password)) # encode to bytes return h.hexdigest() diff --git a/nipap/nipap/backend.py b/nipap/nipap/backend.py index 7550657e9..7b3d193c0 100644 --- a/nipap/nipap/backend.py +++ b/nipap/nipap/backend.py @@ -188,30 +188,30 @@ from functools import wraps import dateutil.parser import datetime -import exceptions import logging import psycopg2 import psycopg2.extras -import pytz +from psycopg2.extensions import adapt import shlex -import socket import time import re import IPy -from errors import * -import authlib -import smart_parsing -import db_schema +from .errors import * +from . import authlib +from . import smart_parsing +from . import db_schema import nipap # support multiple versions of parsedatetime try: import parsedatetime + pdt = parsedatetime.Calendar(parsedatetime.Constants(usePyICU=False)) except: import parsedatetime.parsedatetime import parsedatetime.parsedatetime_consts as pdc + pdt = parsedatetime.parsedatetime.Calendar(pdc.Constants()) @@ -547,63 +547,31 @@ '<<': '<<', 'contained_within': '<<', '<<=': '<<=', - 'contained_within_equals': '<<=' - } + 'contained_within_equals': '<<=', +} """ Maps operators in a prefix query to SQL operators. """ - def requires_rw(f): """ Adds readwrite authorization This will check if the user is a readonly user and if so reject the query. Apply this decorator to readwrite functions. """ - @wraps(f) + @wraps(f) def decorated(*args, **kwargs): auth = args[1] if auth.readonly: logger = logging.getLogger() - logger.info("read-only user '%s' is not authorized to run function '%s'" % (auth.username, f.__name__)) + logger.info("read-only user '%s' is not authorized to run function '%s'", auth.username, f.__name__) raise authlib.AuthorizationFailed("read-only user '%s' is not authorized to run function '%s'" % (auth.username, f.__name__)) return f(*args, **kwargs) return decorated - - - -class Inet(object): - """ This works around a bug in psycopg2 version somewhere before 2.4. The - __init__ function in the original class is broken and so this is merely - a copy with the bug fixed. - - Wrap a string to allow for correct SQL-quoting of inet values. - - Note that this adapter does NOT check the passed value to make sure it - really is an inet-compatible address but DOES call adapt() on it to make - sure it is impossible to execute an SQL-injection by passing an evil - value to the initializer. - """ - def __init__(self, addr): - self.addr = addr - - def prepare(self, conn): - self._conn = conn - - def getquoted(self): - obj = adapt(self.addr) - if hasattr(obj, 'prepare'): - obj.prepare(self._conn) - return obj.getquoted()+"::inet" - - def __str__(self): - return str(self.addr) - - def _parse_expires(expires): """ Parse the 'expires' attribute, guessing what format it is in and returning a datetime @@ -613,7 +581,7 @@ def _parse_expires(expires): return 'infinity' try: - return dateutil.parser.parse(unicode(expires)) + return dateutil.parser.parse(expires) except ValueError as exc: pass @@ -638,7 +606,7 @@ class Nipap: _logger = None _con_pg = None - _curs_pg = None + _curs_pg = None def __init__(self, auto_install_db=False, auto_upgrade_db=False): """ Constructor. @@ -649,7 +617,8 @@ def __init__(self, auto_install_db=False, auto_upgrade_db=False): self._logger = logging.getLogger(self.__class__.__name__) self._logger.debug("Initialising NIPAP") - from nipapconfig import NipapConfig + from .nipapconfig import NipapConfig + self._cfg = NipapConfig() self._auto_install_db = auto_install_db @@ -657,23 +626,10 @@ def __init__(self, auto_install_db=False, auto_upgrade_db=False): self._connect_db() - # # Miscellaneous help functions # - def _register_inet(self, oid=None, conn_or_curs=None): - """ Create the INET type and an Inet adapter.""" - from psycopg2 import extensions as _ext - if not oid: - oid = 869 - _ext.INET = _ext.new_type((oid, ), "INET", - lambda data, cursor: data and Inet(data) or None) - _ext.register_type(_ext.INET, self._con_pg) - return _ext.INET - - - def _is_ipv4(self, ip): """ Return true if given arg is a valid IPv4 address """ @@ -686,8 +642,6 @@ def _is_ipv4(self, ip): return True return False - - def _is_ipv6(self, ip): """ Return true if given arg is a valid IPv6 address """ @@ -700,13 +654,11 @@ def _is_ipv6(self, ip): return True return False - - def _get_afi(self, ip): """ Return address-family (4 or 6) for IP or None if invalid address """ - parts = unicode(ip).split("/") + parts = ip.split('/') if len(parts) == 1: # just an address if self._is_ipv4(ip): @@ -724,13 +676,13 @@ def _get_afi(self, ip): return None if self._is_ipv4(parts[0]): - if pl >= 0 and pl <= 32: + if 0 <= pl <= 32: # prefix mask must be between 0 and 32 return 4 # otherwise error return None elif self._is_ipv6(parts[0]): - if pl >= 0 and pl <= 128: + if 0 <= pl <= 128: # prefix mask must be between 0 and 128 return 6 # otherwise error @@ -741,8 +693,6 @@ def _get_afi(self, ip): # more than two parts.. this is neither an address or a prefix return None - - # # SQL related functions # @@ -765,7 +715,7 @@ def _connect_db(self): db_args['host'] = None for key in db_args.copy(): if db_args[key] is None: - del(db_args[key]) + del db_args[key] # Create database connection while True: @@ -773,24 +723,23 @@ def _connect_db(self): self._con_pg = psycopg2.connect(**db_args) self._con_pg.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT) self._curs_pg = self._con_pg.cursor(cursor_factory=psycopg2.extras.DictCursor) - self._register_inet() psycopg2.extras.register_hstore(self._con_pg, globally=True, unicode=True) except psycopg2.Error as exc: - if re.search("database.*does not exist", unicode(exc)): + if re.search("database.*does not exist", str(exc)): raise NipapDatabaseNonExistentError("Database '%s' does not exist" % db_args['database']) # no hstore extension, assume empty db (it wouldn't work # otherwise) and do auto upgrade? - if re.search("hstore type not found in the database", unicode(exc)): + if re.search("hstore type not found in the database", str(exc)): # automatically install if auto-install is enabled if self._auto_install_db: self._db_install(db_args['database']) continue raise NipapDatabaseMissingExtensionError("hstore extension not found in the database") - self._logger.error("pgsql: %s" % exc) + self._logger.error("pgsql: %s, using args: %s", exc, db_args) raise NipapError("Backend unable to connect to database") except psycopg2.Warning as warn: - self._logger.warning('pgsql: %s' % warn) + self._logger.warning('pgsql: %s', warn) # check db version try: @@ -804,25 +753,25 @@ def _connect_db(self): continue raise exc except NipapError as exc: - self._logger.error(unicode(exc)) + self._logger.error(str(exc)) raise exc if current_db_version != nipap.__db_version__: if self._auto_upgrade_db: self._db_upgrade(db_args['database']) continue - raise NipapDatabaseWrongVersionError("NIPAP PostgreSQL database is outdated. Schema version %s is required to run but you are using %s" % (nipap.__db_version__, current_db_version)) + raise NipapDatabaseWrongVersionError( + "NIPAP PostgreSQL database is outdated. Schema version {} is required to run but you are using " + "{}".format(nipap.__db_version__, current_db_version)) # if we reach this we should be fine and done break - - - def _execute(self, sql, opt=None, callno = 0): + def _execute(self, sql, opt=None, callno=0): """ Execute query, catch and log errors. """ - self._logger.debug("SQL: " + sql + " params: " + unicode(opt)) + self._logger.debug("SQL: %s params: %s", sql, str(opt)) try: self._curs_pg.execute(sql, opt) except psycopg2.InternalError as exc: @@ -843,22 +792,21 @@ def _execute(self, sql, opt=None, callno = 0): # we throw (and log) a more general exception. # determine if it's "one of our" exceptions or something else - if len(unicode(exc).split(":")) < 2: + if len(str(exc).split(":")) < 2: raise NipapError(exc) - code = unicode(exc).split(":", 1)[0] + code = str(exc).split(":", 1)[0] try: int(code) except: raise NipapError(exc) - text = unicode(exc).splitlines()[0].split(":", 1)[1] + text = str(exc).splitlines()[0].split(":", 1)[1] if code == '1200': raise NipapValueError(text) - estr = "Internal database error: %s" % exc - self._logger.error(estr) - raise NipapError(unicode(exc)) + self._logger.error("Internal database error: %s", exc) + raise NipapError(str(exc)) except psycopg2.IntegrityError as exc: self._con_pg.rollback() @@ -871,14 +819,13 @@ def _execute(self, sql, opt=None, callno = 0): if m is None: raise NipapDuplicateError("Objects primary keys already exist") cursor = self._con_pg.cursor() - cursor.execute(""" SELECT - obj_description(oid) + cursor.execute(""" SELECT obj_description(oid) FROM pg_class - WHERE relname = %(relname)s""", - { 'relname': m.group(1) }) + WHERE relname = %(relname)s""", {'relname': m.group(1)}) + column_desc = '' for desc in cursor: - column_desc = unicode(desc[0]) + column_desc = desc[0] # figure out the value for the duplicate value column_value = None @@ -889,13 +836,11 @@ def _execute(self, sql, opt=None, callno = 0): except: pass else: - raise NipapDuplicateError("Duplicate value for '" + - unicode(column_desc) + "', the value '" + - unicode(column_value) + "' is already in use.") + raise NipapDuplicateError("Duplicate value for '{}', the value '{}' is " + "already in use.".format(column_desc, column_value)) - raise NipapDuplicateError("Duplicate value for '" + - unicode(column_desc) + - "', the value you have inputted is already in use.") + raise NipapDuplicateError("Duplicate value for '{}', the value you have inputted is " + "already in use.".format(column_desc)) self._logger.exception("Unhandled database IntegrityError:") raise NipapError("Unhandled integrity error.") @@ -905,16 +850,16 @@ def _execute(self, sql, opt=None, callno = 0): m = re.search('invalid cidr value: "([^"]+)"', exc.pgerror) if m is not None: - strict_prefix = unicode(IPy.IP(m.group(1), make_net = True)) - estr = "Invalid prefix (%s); bits set to right of mask. Network address for current mask: %s" % (m.group(1), strict_prefix) - raise NipapValueError(estr) + strict_prefix = IPy.IP(m.group(1, make_net=True)) + estr = "Invalid prefix ({}); bits set to right of mask. Network address for current mask: {}" + raise NipapValueError(estr.format(m.group(1), strict_prefix)) - m = re.search('invalid input syntax for(?: type)? (\w+): "([^"]+)"', exc.pgerror) + m = re.search(r'invalid input syntax for(?: type)? (\w+): "([^"]+)"', exc.pgerror) if m is not None: if m.group(1) in ["cidr", "inet"]: - estr = "Invalid syntax for prefix (%s)" % m.group(2) + estr = "Invalid syntax for prefix ({})".format(m.group(2)) else: - estr = "Invalid syntax for %s (%s)" % (m.group(1), m.group(2)) + estr = "Invalid syntax for {} ({})".format(m.group(1), m.group(2)) raise NipapValueError(estr) self._logger.exception("Unhandled database DataError:") @@ -926,13 +871,13 @@ def _execute(self, sql, opt=None, callno = 0): except psycopg2.Error: pass - estr = "Unable to execute query: %s" % exc - self._logger.error(estr) + estr = "Unable to execute query: %s" + self._logger.error(estr, exc) # abort if we've already tried to reconnect if callno > 0: - self._logger.error(estr) - raise NipapError(estr) + self._logger.error(estr, exc) + raise NipapError(estr % exc) # reconnect to database and retry query self._logger.info("Reconnecting to database...") @@ -941,9 +886,7 @@ def _execute(self, sql, opt=None, callno = 0): return self._execute(sql, opt, callno + 1) except psycopg2.Warning as warn: - self._logger.warning(unicode(warn)) - - + self._logger.warning(warn) def _lastrowid(self): """ Get ID of last inserted column. @@ -954,9 +897,7 @@ def _lastrowid(self): for row in self._curs_pg: return row['last'] - - - def _sql_expand_insert(self, spec, key_prefix = '', col_prefix = ''): + def _sql_expand_insert(self, spec, key_prefix='', col_prefix=''): """ Expand a dict so it fits in a INSERT clause """ col = list(spec) @@ -971,9 +912,7 @@ def _sql_expand_insert(self, spec, key_prefix = '', col_prefix = ''): return sql, params - - - def _sql_expand_update(self, spec, key_prefix = '', col_prefix = ''): + def _sql_expand_update(self, spec, key_prefix='', col_prefix=''): """ Expand a dict so it fits in a INSERT clause """ sql = ', '.join(col_prefix + key + ' = %(' + key_prefix + key + ')s' for key in spec) @@ -983,16 +922,14 @@ def _sql_expand_update(self, spec, key_prefix = '', col_prefix = ''): return sql, params - - - def _sql_expand_where(self, spec, key_prefix = '', col_prefix = ''): + def _sql_expand_where(self, spec, key_prefix='', col_prefix=''): """ Expand a dict so it fits in a WHERE clause Logical operator is AND. """ sql = ' AND '.join(col_prefix + key + - ( ' IS ' if spec[key] is None else ' = ' ) + + (' IS ' if spec[key] is None else ' = ') + '%(' + key_prefix + key + ')s' for key in spec) params = {} for key in spec: @@ -1000,27 +937,23 @@ def _sql_expand_where(self, spec, key_prefix = '', col_prefix = ''): return sql, params - - # TODO: make this more generic and use for testing of spec too? def _check_attr(self, attr, req_attr, allowed_attr): + """ Check for presence of required attributes, and absence of illegal ones """ - """ - if type(attr) is not dict: + if not isinstance(attr, dict): raise NipapInputError("invalid input type, must be dict") for a in req_attr: - if not a in attr: - raise NipapMissingInputError("missing attribute %s" % a) + if a not in attr: + raise NipapMissingInputError("missing attribute {}".format(a)) for a in attr: if a not in allowed_attr: - raise NipapExtraneousInputError("extraneous attribute %s" % a) + raise NipapExtraneousInputError("extraneous attribute {}".format(a)) if 'avps' in attr and '' in attr['avps']: raise NipapValueError('AVP with empty name is not allowed') - - def _get_updated_rows(self, auth, function): """ Get rows updated by last update query @@ -1037,13 +970,7 @@ def _get_updated_rows(self, auth, function): # search_* API call. qps = [] for row in self._curs_pg: - qps.append( - { - 'operator': 'equals', - 'val1': 'id', - 'val2': row['id'] - } - ) + qps.append({'operator': 'equals', 'val1': 'id', 'val2': row['id']}) # if we didn't update anything return empty list if len(qps) == 0: @@ -1053,18 +980,12 @@ def _get_updated_rows(self, auth, function): q = qps[0] for qp in qps[1:]: - q = { - 'operator': 'or', - 'val1': q, - 'val2': qp - } + q = {'operator': 'or', 'val1': q, 'val2': qp} - updated = function(auth, q, { 'max_result': 10000 })['result'] + updated = function(auth, q, {'max_result': 10000})['result'] return updated - - def _get_query_parts(self, query_str, search_options=None): """ Split a query string into its parts """ @@ -1079,10 +1000,10 @@ def _get_query_parts(self, query_str, search_options=None): query_str_parts = [] try: for part in shlex.split(query_str.encode('utf-8')): - query_str_parts.append({ 'string': part.decode('utf-8') }) + query_str_parts.append({'string': part.decode('utf-8')}) except ValueError as exc: - if unicode(exc) == 'No closing quotation': - raise NipapValueError(unicode(exc)) + if str(exc) == 'No closing quotation': + raise NipapValueError(str(exc)) raise exc # Handle empty search. @@ -1090,21 +1011,20 @@ def _get_query_parts(self, query_str, search_options=None): # zero-element list for an empty string, so we have to append one # manually if len(query_str_parts) == 0: - query_str_parts.append({ 'string': '' }) + query_str_parts.append({'string': ''}) return query_str_parts - - def _get_db_version(self): """ Get the schema version of the nipap psql db. """ dbname = self._cfg.get('nipapd', 'db_name') - self._execute("SELECT description FROM pg_shdescription JOIN pg_database ON objoid = pg_database.oid WHERE datname = '%s'" % dbname) + self._execute("SELECT description FROM pg_shdescription JOIN pg_database ON objoid = pg_database.oid " + "WHERE datname = '" + dbname + "'") comment = self._curs_pg.fetchone() if comment is None: - raise NipapDatabaseNoVersionError("Could not find comment of psql database %s" % dbname) + raise NipapDatabaseNoVersionError("Could not find comment of psql database {}".format(dbname)) db_version = None m = re.match('NIPAP database - schema version: ([0-9]+)', comment[0]) @@ -1115,8 +1035,6 @@ def _get_db_version(self): return db_version - - def _db_install(self, db_name): """ Install nipap database schema """ @@ -1125,21 +1043,17 @@ def _db_install(self, db_name): self._execute(db_schema.functions) self._execute(db_schema.triggers) - - def _db_upgrade(self, db_name): """ Upgrade nipap database schema """ current_db_version = self._get_db_version() self._execute(db_schema.functions) for i in range(current_db_version, nipap.__db_version__): - self._logger.info("Upgrading DB schema:", i, "to", i+1) - upgrade_sql = db_schema.upgrade[i-1] # 0 count on array + self._logger.info("Upgrading DB schema: %s to %s", i, i + 1) + upgrade_sql = db_schema.upgrade[i - 1] # 0 count on array self._execute(upgrade_sql % (db_name)) self._execute(db_schema.triggers) - - # # VRF functions # @@ -1158,22 +1072,22 @@ def _expand_vrf_spec(self, spec): error will be thrown if both id and name is specified. """ - if type(spec) is not dict: + if not isinstance(spec, dict): raise NipapInputError("vrf specification must be a dict") allowed_values = ['id', 'name', 'rt'] for a in spec: if a not in allowed_values: - raise NipapExtraneousInputError("extraneous specification key %s" % a) + raise NipapExtraneousInputError("extraneous specification key {}".format(a)) if 'id' in spec: - if type(spec['id']) not in (int, long): + if not isinstance(spec['id'], int): raise NipapValueError("VRF specification key 'id' must be an integer.") elif 'rt' in spec: - if type(spec['rt']) != type(''): + if not isinstance(spec['rt'], str): raise NipapValueError("VRF specification key 'rt' must be a string.") elif 'name' in spec: - if type(spec['name']) != type(''): + if not isinstance(spec['name'], str): raise NipapValueError("VRF specification key 'name' must be a string.") if len(spec) > 1: raise NipapExtraneousInputError("VRF specification contains too many keys, specify VRF id, vrf or name.") @@ -1182,16 +1096,14 @@ def _expand_vrf_spec(self, spec): return where, params - - - def _expand_vrf_query(self, query, table_name = None): + def _expand_vrf_query(self, query, table_name=None): """ Expand VRF query dict into a WHERE-clause. If you need to prefix each column reference with a table name, that can be supplied via the table_name argument. """ - where = unicode() + where = str() opt = list() # handle table name, can be None @@ -1200,16 +1112,16 @@ def _expand_vrf_query(self, query, table_name = None): else: col_prefix = table_name + "." - if type(query['val1']) == dict and type(query['val2']) == dict: + if isinstance(query['val1'], dict) and isinstance(query['val2'], dict): # Sub expression, recurse! This is used for boolean operators: AND OR # add parantheses sub_where1, opt1 = self._expand_vrf_query(query['val1'], table_name) sub_where2, opt2 = self._expand_vrf_query(query['val2'], table_name) try: - where += unicode(" (%s %s %s) " % (sub_where1, _operation_map[query['operator']], sub_where2) ) + where += " (%s %s %s) " % (sub_where1, _operation_map[query['operator']], sub_where2) except KeyError: - raise NipapNoSuchOperatorError("No such operator %s" % unicode(query['operator'])) + raise NipapNoSuchOperatorError("No such operator {}".format(query['operator'])) opt += opt1 opt += opt2 @@ -1221,11 +1133,11 @@ def _expand_vrf_query(self, query, table_name = None): # val1 is variable, val2 is string. if query['val1'] not in _vrf_spec: - raise NipapInputError('Search variable \'%s\' unknown' % unicode(query['val1'])) + raise NipapInputError("Search variable '{}' unknown".format(query['val1'])) # build where clause if query['operator'] not in _operation_map: - raise NipapNoSuchOperatorError("No such operator %s" % query['operator']) + raise NipapNoSuchOperatorError("No such operator {}".format(query['operator'])) # workaround for handling equal matches of NULL-values if query['operator'] == 'equals' and query['val2'] is None: @@ -1234,22 +1146,16 @@ def _expand_vrf_query(self, query, table_name = None): query['operator'] = 'is_not' if query['operator'] in ('equals_any',): - where = unicode(" %%s = ANY (%s%s::citext[]) " % - ( col_prefix, _vrf_spec[query['val1']]['column']) - ) + where = " %%s = ANY (%s%s::citext[]) " % (col_prefix, _vrf_spec[query['val1']]['column']) else: - where = unicode(" %s%s %s %%s " % - ( col_prefix, _vrf_spec[query['val1']]['column'], - _operation_map[query['operator']] ) - ) + where = " %s%s %s %%s " % (col_prefix, _vrf_spec[query['val1']]['column'], + _operation_map[query['operator']]) opt.append(query['val2']) return where, opt - - @requires_rw def add_vrf(self, auth, attr): """ Add a new VRF. @@ -1268,10 +1174,10 @@ def add_vrf(self, auth, attr): :py:func:`nipap.xmlrpc.NipapXMLRPC.add_vrf` for full understanding. """ - self._logger.debug("add_vrf called; attr: %s" % unicode(attr)) + self._logger.debug("add_vrf called; attr: %s", attr) # sanity check - do we have all attributes? - req_attr = [ 'rt', 'name' ] + req_attr = ['rt', 'name'] self._check_attr(attr, req_attr, _vrf_attrs) insert, params = self._sql_expand_insert(attr) @@ -1279,7 +1185,7 @@ def add_vrf(self, auth, attr): self._execute(sql, params) vrf_id = self._lastrowid() - vrf = self.list_vrf(auth, { 'id': vrf_id })[0] + vrf = self.list_vrf(auth, {'id': vrf_id})[0] # write to audit table audit_params = { @@ -1290,15 +1196,14 @@ def add_vrf(self, auth, attr): 'authenticated_as': auth.authenticated_as, 'full_name': auth.full_name, 'authoritative_source': auth.authoritative_source, - 'description': 'Added VRF %s with attr: %s' % (vrf['rt'], unicode(vrf)) + 'description': 'Added VRF %s with attr: %s' % (vrf['rt'], vrf) } sql, params = self._sql_expand_insert(audit_params) - self._execute('INSERT INTO ip_net_log %s' % sql, params) + self._execute('INSERT INTO ip_net_log ' + sql, params) return vrf - @requires_rw def remove_vrf(self, auth, spec): """ Remove a VRF. @@ -1316,26 +1221,20 @@ def remove_vrf(self, auth, spec): understanding. """ - self._logger.debug("remove_vrf called; spec: %s" % unicode(spec)) + self._logger.debug("remove_vrf called; spec: %s", spec) # get list of VRFs to remove before removing them vrfs = self.list_vrf(auth, spec) # remove prefixes in VRFs for vrf in vrfs: - v4spec = { - 'prefix': '0.0.0.0/0', - 'vrf_id': vrf['id'] - } - v6spec = { - 'prefix': '::/0', - 'vrf_id': vrf['id'] - } - self.remove_prefix(auth, spec = v4spec, recursive = True) - self.remove_prefix(auth, spec = v6spec, recursive = True) + v4spec = {'prefix': '0.0.0.0/0', 'vrf_id': vrf['id']} + v6spec = {'prefix': '::/0', 'vrf_id': vrf['id']} + self.remove_prefix(auth, spec=v4spec, recursive=True) + self.remove_prefix(auth, spec=v6spec, recursive=True) where, params = self._expand_vrf_spec(spec) - sql = "DELETE FROM ip_net_vrf WHERE %s" % where + sql = "DELETE FROM ip_net_vrf WHERE " + where self._execute(sql, params) # write to audit table @@ -1351,9 +1250,7 @@ def remove_vrf(self, auth, spec): 'description': 'Removed vrf %s' % v['rt'] } sql, params = self._sql_expand_insert(audit_params) - self._execute('INSERT INTO ip_net_log %s' % sql, params) - - + self._execute('INSERT INTO ip_net_log ' + sql, params) def list_vrf(self, auth, spec=None): """ Return a list of VRFs matching `spec`. @@ -1374,7 +1271,7 @@ def list_vrf(self, auth, spec=None): if spec is None: spec = {} - self._logger.debug("list_vrf called; spec: %s" % unicode(spec)) + self._logger.debug("list_vrf called; spec: %s", spec) sql = "SELECT * FROM ip_net_vrf" @@ -1395,9 +1292,7 @@ def list_vrf(self, auth, spec=None): return res - - - def _get_vrf(self, auth, spec, prefix = 'vrf_'): + def _get_vrf(self, auth, spec, prefix='vrf_'): """ Get a VRF based on prefix spec Shorthand function to reduce code in the functions below, since @@ -1414,23 +1309,20 @@ def _get_vrf(self, auth, spec, prefix = 'vrf_'): # if None, mangle it to being 0, ie our default VRF if spec[prefix + 'id'] is None: spec[prefix + 'id'] = 0 - vrf = self.list_vrf(auth, { 'id': spec[prefix + 'id'] }) + vrf = self.list_vrf(auth, {'id': spec[prefix + 'id']}) elif prefix + 'rt' in spec: - vrf = self.list_vrf(auth, { 'rt': spec[prefix + 'rt'] }) + vrf = self.list_vrf(auth, {'rt': spec[prefix + 'rt']}) elif prefix + 'name' in spec: - vrf = self.list_vrf(auth, { 'name': spec[prefix + 'name'] }) + vrf = self.list_vrf(auth, {'name': spec[prefix + 'name']}) else: # no VRF specified - return VRF "default" - vrf = self.list_vrf(auth, { 'id': 0 }) + vrf = self.list_vrf(auth, {'id': 0}) if len(vrf) > 0: return vrf[0] raise NipapNonExistentError('No matching VRF found.') - - - @requires_rw def edit_vrf(self, auth, spec, attr): """ Update VRFs matching `spec` with attributes `attr`. @@ -1448,8 +1340,7 @@ def edit_vrf(self, auth, spec, attr): understanding. """ - self._logger.debug("edit_vrf called; spec: %s attr: %s" % - (unicode(spec), unicode(attr))) + self._logger.debug("edit_vrf called; spec: %s attr: %s", spec, attr) # sanity check - do we have all attributes? self._check_attr(attr, [], _vrf_attrs) @@ -1459,7 +1350,7 @@ def edit_vrf(self, auth, spec, attr): where, params1 = self._expand_vrf_spec(spec) update, params2 = self._sql_expand_update(attr) - params = dict(params2.items() + params1.items()) + params = dict(list(params2.items()) + list(params1.items())) if len(attr) == 0: raise NipapInputError("'attr' must not be empty.") @@ -1481,15 +1372,13 @@ def edit_vrf(self, auth, spec, attr): 'authenticated_as': auth.authenticated_as, 'full_name': auth.full_name, 'authoritative_source': auth.authoritative_source, - 'description': 'Edited VRF %s attr: %s' % (v['rt'], unicode(attr)) + 'description': 'Edited VRF %s attr: %s' % (v['rt'], attr) } sql, params = self._sql_expand_insert(audit_params) - self._execute('INSERT INTO ip_net_log %s' % sql, params) + self._execute('INSERT INTO ip_net_log ' + sql, params) return updated_vrfs - - def search_vrf(self, auth, query, search_options=None): """ Search VRF list for VRFs matching `query`. @@ -1587,8 +1476,7 @@ def search_vrf(self, auth, query, search_options=None): try: search_options['max_result'] = int(search_options['max_result']) except (ValueError, TypeError): - raise NipapValueError('Invalid value for option' + - ''' 'max_result'. Only integer values allowed.''') + raise NipapValueError("Invalid value for option 'max_result'. Only integer values allowed.") # offset if 'offset' not in search_options: @@ -1597,10 +1485,9 @@ def search_vrf(self, auth, query, search_options=None): try: search_options['offset'] = int(search_options['offset']) except (ValueError, TypeError): - raise NipapValueError('Invalid value for option' + - ''' 'offset'. Only integer values allowed.''') + raise NipapValueError("Invalid value for option 'offset'. Only integer values allowed.") - self._logger.debug('search_vrf called; query: %s search_options: %s' % (unicode(query), unicode(search_options))) + self._logger.debug('search_vrf called; query: %s search_options: %s', query, search_options) opt = None sql = """ SELECT * FROM ip_net_vrf""" @@ -1611,16 +1498,15 @@ def search_vrf(self, auth, query, search_options=None): where, opt = self._expand_vrf_query(query) sql += " WHERE " + where - sql += " ORDER BY vrf_rt_order(rt) NULLS FIRST LIMIT " + unicode(search_options['max_result']) + " OFFSET " + unicode(search_options['offset']) + sql += " ORDER BY vrf_rt_order(rt) NULLS FIRST LIMIT %s OFFSET %s" % ( + search_options['max_result'], search_options['offset'],) self._execute(sql, opt) result = list() for row in self._curs_pg: result.append(dict(row)) - return { 'search_options': search_options, 'result': result } - - + return {'search_options': search_options, 'result': result} def smart_search_vrf(self, auth, query_str, search_options=None, extra_query=None): """ Perform a smart search on VRF list. @@ -1667,7 +1553,7 @@ def smart_search_vrf(self, auth, query_str, search_options=None, extra_query=Non if search_options is None: search_options = {} - self._logger.debug("smart_search_vrf query string: %s" % query_str) + self._logger.debug("smart_search_vrf query string: %s", query_str) success, query = self._parse_vrf_query(query_str) if not success: @@ -1676,17 +1562,13 @@ def smart_search_vrf(self, auth, query_str, search_options=None, extra_query=Non 'search_options': search_options, 'result': [], 'error': True, - 'error_message': 'query interpretation failed' + 'error_message': 'query interpretation failed', } if extra_query is not None: - query = { - 'operator': 'and', - 'val1': query, - 'val2': extra_query - } + query = {'operator': 'and', 'val1': query, 'val2': extra_query} - self._logger.debug("smart_search_vrf; query expanded to: %s" % unicode(query)) + self._logger.debug("smart_search_vrf; query expanded to: %s", query) search_result = self.search_vrf(auth, query, search_options) search_result['interpretation'] = query @@ -1694,8 +1576,6 @@ def smart_search_vrf(self, auth, query_str, search_options=None, extra_query=Non return search_result - - def _parse_vrf_query(self, query_str): """ Parse a smart search query for VRFs @@ -1706,8 +1586,6 @@ def _parse_vrf_query(self, query_str): query = sp.parse(query_str) return query - - # # Pool functions # @@ -1715,39 +1593,37 @@ def _expand_pool_spec(self, spec): """ Expand pool specification to sql. """ - if type(spec) is not dict: + if not isinstance(spec, dict): raise NipapInputError("pool specification must be a dict") - allowed_values = ['id', 'name' ] + allowed_values = ['id', 'name'] for a in spec: if a not in allowed_values: - raise NipapExtraneousInputError("extraneous specification key %s" % a) + raise NipapExtraneousInputError("extraneous specification key {}".format(a)) if 'id' in spec: - if type(spec['id']) not in (long, int): + if not isinstance(spec['id'], int): raise NipapValueError("pool specification key 'id' must be an integer") - if spec != { 'id': spec['id'] }: + if spec != {'id': spec['id']}: raise NipapExtraneousInputError("pool specification with 'id' should not contain anything else") elif 'name' in spec: - if type(spec['name']) != type(''): + if not isinstance(spec['name'], str): raise NipapValueError("pool specification key 'name' must be a string") if 'id' in spec: - raise NipapExtraneousInputError("pool specification contain both 'id' and 'name', specify pool id or name") + raise NipapExtraneousInputError("pool specification contain 'id' and 'name', specify pool id or name") where, params = self._sql_expand_where(spec, 'spec_', 'po.') return where, params - - - def _expand_pool_query(self, query, table_name = None): + def _expand_pool_query(self, query, table_name=None): """ Expand pool query dict into a WHERE-clause. If you need to prefix each column reference with a table name, that can be supplied via the table_name argument. """ - where = unicode() + where = '' opt = list() # handle table name, can be None @@ -1756,17 +1632,16 @@ def _expand_pool_query(self, query, table_name = None): else: col_prefix = table_name + "." - - if type(query['val1']) == dict and type(query['val2']) == dict: + if isinstance(query['val1'], dict) and isinstance(query['val2'], dict): # Sub expression, recurse! This is used for boolean operators: AND OR # add parantheses sub_where1, opt1 = self._expand_pool_query(query['val1'], table_name) sub_where2, opt2 = self._expand_pool_query(query['val2'], table_name) try: - where += unicode(" (%s %s %s) " % (sub_where1, _operation_map[query['operator']], sub_where2) ) + where += " (%s %s %s) " % (sub_where1, _operation_map[query['operator']], sub_where2) except KeyError: - raise NipapNoSuchOperatorError("No such operator %s" % unicode(query['operator'])) + raise NipapNoSuchOperatorError("No such operator {}".format(query['operator'])) opt += opt1 opt += opt2 @@ -1778,11 +1653,11 @@ def _expand_pool_query(self, query, table_name = None): # val1 is variable, val2 is string. if query['val1'] not in _pool_spec: - raise NipapInputError('Search variable \'%s\' unknown' % unicode(query['val1'])) + raise NipapInputError("Search variable '{}' unknown".format(query['val1'])) # build where clause if query['operator'] not in _operation_map: - raise NipapNoSuchOperatorError("No such operator %s" % query['operator']) + raise NipapNoSuchOperatorError("No such operator {}".format(query['operator'])) # workaround for handling equal matches of NULL-values if query['operator'] == 'equals' and query['val2'] is None: @@ -1791,22 +1666,16 @@ def _expand_pool_query(self, query, table_name = None): query['operator'] = 'is_not' if query['operator'] in ('equals_any',): - where = unicode(" %%s = ANY (%s%s::citext[]) " % - ( col_prefix, _pool_spec[query['val1']]['column']) - ) + where = " %%s = ANY (%s%s::citext[]) " % (col_prefix, _pool_spec[query['val1']]['column']) else: - where = unicode(" %s%s %s %%s " % - ( col_prefix, _pool_spec[query['val1']]['column'], - _operation_map[query['operator']] ) - ) + where = " %s%s %s %%s " % (col_prefix, _pool_spec[query['val1']]['column'], + _operation_map[query['operator']]) opt.append(query['val2']) return where, opt - - @requires_rw def add_pool(self, auth, attr): """ Create a pool according to `attr`. @@ -1824,7 +1693,7 @@ def add_pool(self, auth, attr): understanding. """ - self._logger.debug("add_pool called; attrs: %s" % unicode(attr)) + self._logger.debug("add_pool called; attrs: %s", attr) # sanity check - do we have all attributes? req_attr = ['name', 'description', 'default_type'] @@ -1835,7 +1704,7 @@ def add_pool(self, auth, attr): self._execute(sql, params) pool_id = self._lastrowid() - pool = self.list_pool(auth, { 'id': pool_id })[0] + pool = self.list_pool(auth, {'id': pool_id})[0] # write to audit table audit_params = { @@ -1845,15 +1714,13 @@ def add_pool(self, auth, attr): 'authenticated_as': auth.authenticated_as, 'full_name': auth.full_name, 'authoritative_source': auth.authoritative_source, - 'description': 'Added pool %s with attr: %s' % (pool['name'], unicode(attr)) + 'description': 'Added pool ' + pool['name'] + ' with attr: ' + str(attr), } sql, params = self._sql_expand_insert(audit_params) - self._execute('INSERT INTO ip_net_log %s' % sql, params) + self._execute('INSERT INTO ip_net_log ' + sql, params) return pool - - @requires_rw def remove_pool(self, auth, spec): """ Remove a pool. @@ -1869,13 +1736,13 @@ def remove_pool(self, auth, spec): understanding. """ - self._logger.debug("remove_pool called; spec: %s" % unicode(spec)) + self._logger.debug("remove_pool called; spec: %s", spec) # fetch list of pools to remove before they are removed pools = self.list_pool(auth, spec) where, params = self._expand_pool_spec(spec) - sql = "DELETE FROM ip_net_pool AS po WHERE %s" % where + sql = "DELETE FROM ip_net_pool AS po WHERE " + where self._execute(sql, params) # write to audit table @@ -1888,15 +1755,13 @@ def remove_pool(self, auth, spec): for p in pools: audit_params['pool_id'] = p['id'], audit_params['pool_name'] = p['name'], - audit_params['description'] = 'Removed pool %s' % p['name'] + audit_params['description'] = 'Removed pool ' + p['name'] sql, params = self._sql_expand_insert(audit_params) - self._execute('INSERT INTO ip_net_log %s' % sql, params) - - + self._execute('INSERT INTO ip_net_log ' + sql, params) def list_pool(self, auth, spec=None): - """ Return a list of pools. + """Return a list of pools. * `auth` [BaseAuth] AAA options. @@ -1914,7 +1779,7 @@ def list_pool(self, auth, spec=None): if spec is None: spec = {} - self._logger.debug("list_pool called; spec: %s" % unicode(spec)) + self._logger.debug("list_pool called; spec: %s", spec) sql = """SELECT DISTINCT (po.id), po.id, @@ -1968,7 +1833,6 @@ def list_pool(self, auth, spec=None): return res - def _check_pool_attr(self, attr, req_attr=None): """ Check pool attributes. """ @@ -1982,11 +1846,8 @@ def _check_pool_attr(self, attr, req_attr=None): # validate IPv4 prefix length if attr.get('ipv4_default_prefix_length') is not None: try: - attr['ipv4_default_prefix_length'] = \ - int(attr['ipv4_default_prefix_length']) - - if (attr['ipv4_default_prefix_length'] > 32 or - attr['ipv4_default_prefix_length'] < 1): + attr['ipv4_default_prefix_length'] = int(attr['ipv4_default_prefix_length']) + if not (1 <= attr['ipv4_default_prefix_length'] <= 32): raise ValueError() except ValueError: raise NipapValueError('Default IPv4 prefix length must be an integer between 1 and 32.') @@ -1994,17 +1855,13 @@ def _check_pool_attr(self, attr, req_attr=None): # validate IPv6 prefix length if attr.get('ipv6_default_prefix_length'): try: - attr['ipv6_default_prefix_length'] = \ - int(attr['ipv6_default_prefix_length']) + attr['ipv6_default_prefix_length'] = int(attr['ipv6_default_prefix_length']) - if (attr['ipv6_default_prefix_length'] > 128 or - attr['ipv6_default_prefix_length'] < 1): + if not (1 <= attr['ipv6_default_prefix_length'] <= 128): raise ValueError() except ValueError: raise NipapValueError('Default IPv6 prefix length must be an integer between 1 and 128.') - - def _get_pool(self, auth, spec): """ Get a pool. @@ -2021,8 +1878,6 @@ def _get_pool(self, auth, spec): raise NipapInputError("non-existing pool specified") return pool[0] - - @requires_rw def edit_pool(self, auth, spec, attr): """ Update pool given by `spec` with attributes `attr`. @@ -2040,17 +1895,16 @@ def edit_pool(self, auth, spec, attr): understanding. """ - self._logger.debug("edit_pool called; spec: %s attr: %s" % - (unicode(spec), unicode(attr))) + self._logger.debug("edit_pool called; spec: %s attr: %s", spec, attr) - if ('id' not in spec and 'name' not in spec) or ( 'id' in spec and 'name' in spec ): - raise NipapMissingInputError('''pool spec must contain either 'id' or 'name' ''') + if ('id' not in spec and 'name' not in spec) or ('id' in spec and 'name' in spec): + raise NipapMissingInputError("pool spec must contain either 'id' or 'name'") self._check_pool_attr(attr) where, params1 = self._expand_pool_spec(spec) update, params2 = self._sql_expand_update(attr) - params = dict(params2.items() + params1.items()) + params = dict(list(params2.items()) + list(params1.items())) pools = self.list_pool(auth, spec) @@ -2067,20 +1921,18 @@ def edit_pool(self, auth, spec, attr): 'username': auth.username, 'authenticated_as': auth.authenticated_as, 'full_name': auth.full_name, - 'authoritative_source': auth.authoritative_source + 'authoritative_source': auth.authoritative_source, } for p in pools: audit_params['pool_id'] = p['id'] audit_params['pool_name'] = p['name'] - audit_params['description'] = 'Edited pool %s attr: %s' % (p['name'], unicode(attr)) + audit_params['description'] = 'Edited pool ' + p['name'] + ' attr: ' + str(attr) sql, params = self._sql_expand_insert(audit_params) - self._execute('INSERT INTO ip_net_log %s' % sql, params) + self._execute('INSERT INTO ip_net_log ' + sql, params) return updated_pools - - def search_pool(self, auth, query, search_options=None): """ Search pool list for pools matching `query`. @@ -2178,8 +2030,7 @@ def search_pool(self, auth, query, search_options=None): try: search_options['max_result'] = int(search_options['max_result']) except (ValueError, TypeError): - raise NipapValueError('Invalid value for option' + - ''' 'max_result'. Only integer values allowed.''') + raise NipapValueError("Invalid value for option 'max_result'. Only integer values allowed.") # offset if 'offset' not in search_options: @@ -2188,10 +2039,9 @@ def search_pool(self, auth, query, search_options=None): try: search_options['offset'] = int(search_options['offset']) except (ValueError, TypeError): - raise NipapValueError('Invalid value for option' + - ''' 'offset'. Only integer values allowed.''') + raise NipapValueError("Invalid value for option 'offset'. Only integer values allowed.") - self._logger.debug('search_pool search_options: %s' % unicode(search_options)) + self._logger.debug('search_pool search_options: %s', search_options) where, opt = self._expand_pool_query(query) sql = """SELECT DISTINCT (po.id), @@ -2225,7 +2075,7 @@ def search_pool(self, auth, query, search_options=None): LEFT OUTER JOIN ip_net_plan AS inp ON (inp.pool_id = po.id) LEFT OUTER JOIN ip_net_vrf AS vrf ON (vrf.id = inp.vrf_id) WHERE """ + where + """ ORDER BY po.name - LIMIT """ + unicode(search_options['max_result']) + """ OFFSET """ + unicode(search_options['offset']) + LIMIT %s OFFSET %s""" % (search_options['max_result'], search_options['offset']) self._execute(sql, opt) @@ -2233,9 +2083,7 @@ def search_pool(self, auth, query, search_options=None): for row in self._curs_pg: result.append(dict(row)) - return { 'search_options': search_options, 'result': result } - - + return {'search_options': search_options, 'result': result} def smart_search_pool(self, auth, query_str, search_options=None, extra_query=None): """ Perform a smart search on pool list. @@ -2281,7 +2129,7 @@ def smart_search_pool(self, auth, query_str, search_options=None, extra_query=No if search_options is None: search_options = {} - self._logger.debug("smart_search_pool query string: %s" % query_str) + self._logger.debug("smart_search_pool query string: %s", query_str) success, query = self._parse_pool_query(query_str) if not success: @@ -2290,17 +2138,13 @@ def smart_search_pool(self, auth, query_str, search_options=None, extra_query=No 'search_options': search_options, 'result': [], 'error': True, - 'error_message': 'query interpretation failed' - } + 'error_message': 'query interpretation failed', + } if extra_query is not None: - query = { - 'operator': 'and', - 'val1': query, - 'val2': extra_query - } + query = {'operator': 'and', 'val1': query, 'val2': extra_query} - self._logger.debug("smart_search_pool; query expanded to: %s" % unicode(query)) + self._logger.debug("smart_search_pool; query expanded to: %s", query) search_result = self.search_pool(auth, query, search_options) search_result['interpretation'] = query @@ -2308,7 +2152,6 @@ def smart_search_pool(self, auth, query_str, search_options=None, extra_query=No return search_result - def _parse_pool_query(self, query_str): """ Parse a smart search query for pools @@ -2319,22 +2162,20 @@ def _parse_pool_query(self, query_str): query = sp.parse(query_str) return query - - # # PREFIX FUNCTIONS # - def _expand_prefix_spec(self, spec, prefix = ''): + def _expand_prefix_spec(self, spec, prefix=''): """ Expand prefix specification to SQL. """ # sanity checks - if type(spec) is not dict: + if not isinstance(spec, dict): raise NipapInputError('invalid prefix specification') - for key in spec.keys(): + for key in spec: if key not in _prefix_spec: - raise NipapExtraneousInputError("Key '" + key + "' not allowed in prefix spec.") + raise NipapExtraneousInputError("Key '{}' not allowed in prefix spec.".format(key)) where = "" params = {} @@ -2347,7 +2188,7 @@ def _expand_prefix_spec(self, spec, prefix = ''): family = None if 'family' in spec: family = spec['family'] - del(spec['family']) + del spec['family'] # rename prefix columns spec2 = {} @@ -2363,15 +2204,15 @@ def _expand_prefix_spec(self, spec, prefix = ''): if prefix + 'vrf_name' in spec: spec['vrf.name'] = spec[prefix + 'vrf_name'] - del(spec[prefix + 'vrf_name']) + del spec[prefix + 'vrf_name'] if prefix + 'vrf_rt' in spec: spec['vrf.rt'] = spec[prefix + 'vrf_rt'] - del(spec[prefix + 'vrf_rt']) + del spec[prefix + 'vrf_rt'] if prefix + 'pool_name' in spec: spec['pool.name'] = spec[prefix + 'pool_name'] - del(spec[prefix + 'pool_name']) + del spec[prefix + 'pool_name'] where, params = self._sql_expand_where(spec) @@ -2384,19 +2225,17 @@ def _expand_prefix_spec(self, spec, prefix = ''): where += " AND family(" + prefix + "prefix) = %(family)s" params['family'] = family - self._logger.debug("_expand_prefix_spec; where: %s params: %s" % (where, unicode(params))) + self._logger.debug("_expand_prefix_spec; where: %s params: %s", where, params) return where, params - - - def _expand_prefix_query(self, query, table_name = None): + def _expand_prefix_query(self, query, table_name=None): """ Expand prefix query dict into a WHERE-clause. If you need to prefix each column reference with a table name, that can be supplied via the table_name argument. """ - where = unicode() + where = '' opt = list() # handle table name, can be None @@ -2405,21 +2244,21 @@ def _expand_prefix_query(self, query, table_name = None): else: col_prefix = table_name + "." - if 'val1' not in query: + if 'val1' not in query or query['val1'] is None: raise NipapMissingInputError("'val1' must be specified") - if 'val2' not in query: - raise NipapMissingInputError("'val2' must be specified") + if 'val2' not in query or query['val2'] is None: + raise NipapMissingInputError("Value (val2 in API) for '{}' must be specified".format(query['val1'])) - if type(query['val1']) == dict and type(query['val2']) == dict: + if isinstance(query['val1'], dict) and isinstance(query['val2'], dict): # Sub expression, recurse! This is used for boolean operators: AND OR - # add parantheses + # add parenthesis sub_where1, opt1 = self._expand_prefix_query(query['val1'], table_name) sub_where2, opt2 = self._expand_prefix_query(query['val2'], table_name) try: - where += unicode(" (%s %s %s) " % (sub_where1, _operation_map[query['operator']], sub_where2) ) + where += " (%s %s %s) " % (sub_where1, _operation_map[query['operator']], sub_where2) except KeyError: - raise NipapNoSuchOperatorError("No such operator %s" % unicode(query['operator'])) + raise NipapNoSuchOperatorError("No such operator {}".format(query['operator'])) opt += opt1 opt += opt2 @@ -2431,11 +2270,11 @@ def _expand_prefix_query(self, query, table_name = None): # val1 is key, val2 is value. if query['val1'] not in _prefix_spec: - raise NipapInputError('Search variable \'%s\' unknown' % unicode(query['val1'])) + raise NipapInputError("Search variable '{}' unknown".format(query['val1'])) # build where clause if query['operator'] not in _operation_map: - raise NipapNoSuchOperatorError("No such operator %s" % query['operator']) + raise NipapNoSuchOperatorError("No such operator {}".format(query['operator'])) if query['val1'] == 'vrf_id' and query['val2'] is None: query['val2'] = 0 @@ -2450,42 +2289,28 @@ def _expand_prefix_query(self, query, table_name = None): 'contains', 'contains_equals', 'contained_within', - 'contained_within_equals'): - - where = " iprange(prefix) %(operator)s %%s " % { - 'col_prefix': col_prefix, - 'operator': _operation_map[query['operator']] - } + 'contained_within_equals', + ): + # NOTE: removed col_prefix since it wasn't used + where = " iprange(prefix) " + _operation_map[query['operator']] + " %s " elif query['operator'] in ('equals_any',): - where = unicode(" %%s = ANY (%s%s::citext[]) " % - ( col_prefix, _prefix_spec[query['val1']]['column']) - ) - - elif query['operator'] in ( - 'like', - 'regex_match', - 'regex_not_match'): - # we COALESCE column with '' to allow for example a regexp - # search on '.*' to match columns which are NULL in the - # database - where = unicode(" COALESCE(%s%s, '') %s %%s " % - ( col_prefix, _prefix_spec[query['val1']]['column'], - _operation_map[query['operator']] ) - ) + where = " %s = ANY (" + col_prefix + _prefix_spec[query['val1']]['column'] + "::citext[]) " + + elif query['operator'] in ('like', 'regex_match', 'regex_not_match'): + # we COALESCE column with '' to allow for example a regexp search on '.*' to match columns + # which are NULL in the database + where = " COALESCE(" + col_prefix + _prefix_spec[query['val1']]['column'] + ", '') " + _operation_map[ + query['operator']] + " %s " else: - where = unicode(" %s%s %s %%s " % - ( col_prefix, _prefix_spec[query['val1']]['column'], - _operation_map[query['operator']] ) - ) + where = ' ' + col_prefix + _prefix_spec[query['val1']]['column'] + ' ' + _operation_map[ + query['operator']] + ' %s ' opt.append(query['val2']) return where, opt - - @requires_rw def add_prefix(self, auth, attr, args=None): """ Add a prefix and return its ID. @@ -2530,14 +2355,14 @@ def add_prefix(self, auth, attr, args=None): if args is None: args = {} - self._logger.debug("add_prefix called; attr: %s; args: %s" % (unicode(attr), unicode(args))) + self._logger.debug("add_prefix called; attr: %s; args: %s", attr, args) # args defined? if args is None: args = {} # attr must be a dict! - if type(attr) != dict: + if not isinstance(attr, dict): raise NipapInputError("'attr' must be a dict") # handle pool attributes - find correct one and remove bad pool keys @@ -2546,33 +2371,24 @@ def add_prefix(self, auth, attr, args=None): if 'pool_id' in attr or 'pool_name' in attr: if 'pool_id' in attr: if attr['pool_id'] is None: - pool = { - 'id': None, - 'name': None - } + pool = {'id': None, 'name': None} else: - pool = self._get_pool(auth, { 'id': attr['pool_id'] }) + pool = self._get_pool(auth, {'id': attr['pool_id']}) else: if attr['pool_name'] is None: - pool = { - 'id': None, - 'name': None - } + pool = {'id': None, 'name': None} else: # resolve pool name to pool id - pool = self._get_pool(auth, { 'name': attr['pool_name'] }) + pool = self._get_pool(auth, {'name': attr['pool_name']}) # and delete the pool_name attr - del(attr['pool_name']) + del attr['pool_name'] attr['pool_id'] = pool['id'] else: - pool = { - 'id': None, - 'name': None - } + pool = {'id': None, 'name': None} attr['authoritative_source'] = auth.authoritative_source @@ -2582,7 +2398,7 @@ def add_prefix(self, auth, attr, args=None): raise NipapExtraneousInputError("specify 'prefix' or 'from-prefix' or 'from-pool'") else: - if ('from-pool' not in args and 'from-prefix' not in args) or ('from-pool' in args and 'from-prefix' in args): + if ('from-pool' in args) == ('from-prefix' in args): raise NipapExtraneousInputError("specify 'prefix' or 'from-prefix' or 'from-pool'") # VRF handling for manually specified prefix @@ -2590,9 +2406,9 @@ def add_prefix(self, auth, attr, args=None): # handle VRF - find the correct one and remove bad VRF keys vrf = self._get_vrf(auth, attr) if 'vrf_rt' in attr: - del(attr['vrf_rt']) + del attr['vrf_rt'] if 'vrf_name' in attr: - del(attr['vrf_name']) + del attr['vrf_name'] attr['vrf_id'] = vrf['id'] # VRF handling for allocation from pool or parent prefix @@ -2602,9 +2418,9 @@ def add_prefix(self, auth, attr, args=None): # handle VRF - find the correct one and remove bad VRF keys vrf = self._get_vrf(auth, attr) if 'vrf_rt' in attr: - del(attr['vrf_rt']) + del attr['vrf_rt'] if 'vrf_name' in attr: - del(attr['vrf_name']) + del attr['vrf_name'] attr['vrf_id'] = vrf['id'] if 'from-pool' in args: @@ -2633,9 +2449,9 @@ def add_prefix(self, auth, attr, args=None): # handle VRF - find the correct one and remove bad VRF keys vrf = self._get_vrf(auth, attr) if 'vrf_rt' in attr: - del(attr['vrf_rt']) + del attr['vrf_rt'] if 'vrf_name' in attr: - del(attr['vrf_name']) + del attr['vrf_name'] attr['vrf_id'] = vrf['id'] # VRF fiddling @@ -2650,7 +2466,7 @@ def add_prefix(self, auth, attr, args=None): raise NipapNonExistentError("no free prefix found") # do we have all attributes? - req_attr = [ 'prefix', 'authoritative_source' ] + req_attr = ['prefix', 'authoritative_source'] self._check_attr(attr, req_attr, _prefix_attrs) if ('description' not in attr) and ('node' not in attr): raise NipapMissingInputError('Either description or node must be specified.') @@ -2663,7 +2479,7 @@ def add_prefix(self, auth, attr, args=None): self._execute(sql, params) prefix_id = self._lastrowid() - prefix = self.list_prefix(auth, { 'id': prefix_id })[0] + prefix = self.list_prefix(auth, {'id': prefix_id})[0] # write to audit table audit_params = { @@ -2676,23 +2492,22 @@ def add_prefix(self, auth, attr, args=None): 'authenticated_as': auth.authenticated_as, 'full_name': auth.full_name, 'authoritative_source': auth.authoritative_source, - 'description': 'Added prefix %s with attr: %s' % (prefix['prefix'], unicode(attr)) + 'description': 'Added prefix ' + prefix['prefix'] + ' with attr: ' + str(attr), } sql, params = self._sql_expand_insert(audit_params) - self._execute('INSERT INTO ip_net_log %s' % sql, params) + self._execute('INSERT INTO ip_net_log ' + sql, params) if pool['id'] is not None: audit_params['pool_id'] = pool['id'] audit_params['pool_name'] = pool['name'] - audit_params['description'] = 'Pool %s expanded with prefix %s in VRF %s' % (pool['name'], prefix['prefix'], unicode(prefix['vrf_rt'])) + audit_params['description'] = 'Pool ' + pool['name'] + ' expanded with prefix ' + prefix[ + 'prefix'] + ' in VRF ' + prefix['vrf_rt'] sql, params = self._sql_expand_insert(audit_params) - self._execute('INSERT INTO ip_net_log %s' % sql, params) + self._execute('INSERT INTO ip_net_log ' + sql, params) return prefix - - @requires_rw def edit_prefix(self, auth, spec, attr): """ Update prefix matching `spec` with attributes `attr`. @@ -2714,48 +2529,38 @@ def edit_prefix(self, auth, spec, attr): understanding. """ - self._logger.debug("edit_prefix called; spec: %s attr: %s" % - (unicode(spec), unicode(attr))) + self._logger.debug("edit_prefix called; spec: %s attr: %s", spec, attr) # Handle Pool - find correct one and remove bad pool keys pool = None if 'pool_id' in attr or 'pool_name' in attr: if 'pool_id' in attr: if attr['pool_id'] is None: - pool = { - 'id': None, - 'name': None - } + pool = {'id': None, 'name': None} else: - pool = self._get_pool(auth, { 'id': attr['pool_id'] }) + pool = self._get_pool(auth, {'id': attr['pool_id']}) else: if attr['pool_name'] is None: - pool = { - 'id': None, - 'name': None - } + pool = {'id': None, 'name': None} else: # resolve pool name to pool id - pool = self._get_pool(auth, { 'name': attr['pool_name'] }) + pool = self._get_pool(auth, {'name': attr['pool_name']}) # and delete the pool_name attr - del(attr['pool_name']) + del attr['pool_name'] attr['pool_id'] = pool['id'] else: - pool = { - 'id': None, - 'name': None - } + pool = {'id': None, 'name': None} # Handle VRF in attributes - find the correct one and remove bad VRF keys. if 'vrf_rt' in attr or 'vrf_name' in attr or 'vrf_id' in attr: vrf = self._get_vrf(auth, attr) if 'vrf_rt' in attr: - del(attr['vrf_rt']) + del attr['vrf_rt'] if 'vrf_name' in attr: - del(attr['vrf_name']) + del attr['vrf_name'] attr['vrf_id'] = vrf['id'] self._check_attr(attr, [], _prefix_attrs) @@ -2767,7 +2572,7 @@ def edit_prefix(self, auth, spec, attr): where, params1 = self._expand_prefix_spec(spec.copy()) update, params2 = self._sql_expand_update(attr) - params = dict(params2.items() + params1.items()) + params = dict(list(params2.items()) + list(params1.items())) sql = "UPDATE ip_net_plan SET " + update + " WHERE " + where sql += " RETURNING id" @@ -2789,9 +2594,9 @@ def edit_prefix(self, auth, spec, attr): audit_params['vrf_name'] = p['vrf_name'] audit_params['prefix_id'] = p['id'] audit_params['prefix_prefix'] = p['prefix'] - audit_params['description'] = 'Edited prefix %s attr: %s' % (p['prefix'], unicode(attr)) + audit_params['description'] = 'Edited prefix ' + p['prefix'] + ' attr: ' + str(attr) sql, params = self._sql_expand_insert(audit_params) - self._execute('INSERT INTO ip_net_log %s' % sql, params) + self._execute('INSERT INTO ip_net_log ' + sql, params) # Only add to log if something was changed if p['pool_id'] != pool['id']: @@ -2813,27 +2618,25 @@ def edit_prefix(self, auth, spec, attr): audit_params2['pool_id'] = pool['id'] audit_params2['pool_name'] = pool['name'] - audit_params2['description'] = 'Expanded pool %s with prefix %s' % (pool['name'], p['prefix']) + audit_params2['description'] = 'Expanded pool ' + pool['name'] + ' with prefix ' + p['prefix'] sql, params = self._sql_expand_insert(audit_params2) - self._execute('INSERT INTO ip_net_log %s' % sql, params) + self._execute('INSERT INTO ip_net_log ' + sql, params) # if prefix had pool set previously, prefix was removed from that pool if p['pool_id'] is not None: - pool2 = self._get_pool(auth, { 'id': p['pool_id'] }) + pool2 = self._get_pool(auth, {'id': p['pool_id']}) audit_params2['pool_id'] = pool2['id'] audit_params2['pool_name'] = pool2['name'] - audit_params2['description'] = 'Removed prefix %s from pool %s' % (p['prefix'], pool2['name']) + audit_params2['description'] = 'Removed prefix ' + p['prefix'] + ' from pool ' + pool2['name'] sql, params = self._sql_expand_insert(audit_params2) - self._execute('INSERT INTO ip_net_log %s' % sql, params) + self._execute('INSERT INTO ip_net_log ' + sql, params) return updated_prefixes - - def find_free_prefix(self, auth, vrf, args): """ Finds free prefixes in the sources given in `args`. @@ -2869,7 +2672,7 @@ def find_free_prefix(self, auth, vrf, args): Instead of specifying a pool, a prefix which will be searched for new prefixes can be specified. In `args`, the key :attr:`from-prefix` is set to list of prefixes you want to - allocate from and the key :attr:`prefix_length` is set to + allocate from and the key :attr:`prefix_length` is set to the wanted prefix length. Example:: @@ -2892,7 +2695,7 @@ def find_free_prefix(self, auth, vrf, args): """ # input sanity - if type(args) is not dict: + if not isinstance(args, dict): raise NipapInputError("invalid input, please provide dict as args") # TODO: find good default value for max_num @@ -2910,12 +2713,12 @@ def find_free_prefix(self, auth, vrf, args): if 'family' not in args: raise NipapMissingInputError("'family' must be specified with 'from-pool' mode") try: - assert int(args['family']) in [ 4, 6 ] + assert int(args['family']) in [4, 6] except (TypeError, AssertionError): raise NipapValueError("incorrect family specified, must be 4 or 6") elif 'from-prefix' in args: - if type(args['from-prefix']) is not list: + if not isinstance(args['from-prefix'], list): raise NipapInputError("from-prefix should be a list") if 'from-pool' in args: raise NipapInputError("specify 'from-pool' OR 'from-prefix'") @@ -2937,7 +2740,7 @@ def find_free_prefix(self, auth, vrf, args): if self._get_afi(p) == int(args['family']): prefixes.append(p) if len(prefixes) == 0: - raise NipapInputError('No prefixes of family %s in pool' % unicode(args['family'])) + raise NipapInputError('No prefixes of family {} in pool'.format(args['family'])) if 'prefix_length' not in args: if int(args['family']) == 4: wpl = pool_result[0]['ipv4_default_prefix_length'] @@ -2972,15 +2775,16 @@ def find_free_prefix(self, auth, vrf, args): params = {} # TODO: this makes me want to piss my pants # we should really write a patch to psycopg2 or something to - # properly adapt an python list of texts with values looking + # properly adapt a python list of texts with values looking # like prefixes to a postgresql array of inets - sql_prefix = ' UNION '.join('SELECT %(prefix' + unicode(prefixes.index(p)) + ')s AS prefix' for p in prefixes) + # UPDATE: This could actually be supported now, only I'm not comfortable messing with this + sql_prefix = ' UNION '.join('SELECT %(prefix' + str(prefixes.index(p)) + ')s AS prefix' for p in prefixes) for p in prefixes: - params['prefix' + unicode(prefixes.index(p))] = unicode(p) + params['prefix%s' % prefixes.index(p)] = p damp = 'SELECT array_agg((prefix::text)::inet) FROM (' + sql_prefix + ') AS a' - sql = """SELECT * FROM find_free_prefix(%(vrf_id)s, (""" + damp + """), %(prefix_length)s, %(max_result)s) AS prefix""" + sql = "SELECT * FROM find_free_prefix(%(vrf_id)s, (" + damp + "), %(prefix_length)s, %(max_result)s) AS prefix" v = self._get_vrf(auth, vrf or {}, '') @@ -2993,13 +2797,11 @@ def find_free_prefix(self, auth, vrf, args): res = list() for row in self._curs_pg: - res.append(unicode(row['prefix'])) + res.append(row['prefix']) return res - - - def list_prefix(self, auth, spec = None): + def list_prefix(self, auth, spec=None): """ List prefixes matching the `spec`. * `auth` [BaseAuth] @@ -3019,10 +2821,9 @@ def list_prefix(self, auth, spec = None): understanding. """ - self._logger.debug("list_prefix called; spec: %s" % unicode(spec)) - + self._logger.debug("list_prefix called; spec: %", spec) - if type(spec) is dict: + if isinstance(spec, dict): where, params = self._expand_prefix_spec(spec.copy(), 'inp.') else: raise NipapError("invalid prefix specification") @@ -3065,22 +2866,19 @@ def list_prefix(self, auth, spec = None): inp.expires FROM ip_net_plan inp JOIN ip_net_vrf vrf ON (inp.vrf_id = vrf.id) - LEFT JOIN ip_net_pool pool ON (inp.pool_id = pool.id) %s - ORDER BY vrf.rt NULLS FIRST, prefix""" % where + LEFT JOIN ip_net_pool pool ON (inp.pool_id = pool.id) """ + where + """ + ORDER BY vrf.rt NULLS FIRST, prefix""" self._execute(sql, params) res = list() for row in self._curs_pg: pref = dict(row) - pref['display_prefix'] = unicode(pref['display_prefix']) res.append(pref) return res - - - def _db_remove_prefix(self, spec, recursive = False): + def _db_remove_prefix(self, spec, recursive=False): """ Do the underlying database operations to delete a prefix """ if recursive: @@ -3093,13 +2891,11 @@ def _db_remove_prefix(self, spec, recursive = False): else: where, params = self._expand_prefix_spec(spec) - sql = "DELETE FROM ip_net_plan AS p WHERE %s" % where + sql = "DELETE FROM ip_net_plan AS p WHERE " + where self._execute(sql, params) - - @requires_rw - def remove_prefix(self, auth, spec, recursive = False): + def remove_prefix(self, auth, spec, recursive=False): """ Remove prefix matching `spec`. * `auth` [BaseAuth] @@ -3115,7 +2911,7 @@ def remove_prefix(self, auth, spec, recursive = False): understanding. """ - self._logger.debug("remove_prefix called; spec: %s" % unicode(spec)) + self._logger.debug("remove_prefix called; spec: %s", spec) # sanity check - do we have all attributes? if 'id' in spec: @@ -3149,35 +2945,33 @@ def remove_prefix(self, auth, spec, recursive = False): 'username': auth.username, 'authenticated_as': auth.authenticated_as, 'full_name': auth.full_name, - 'authoritative_source': auth.authoritative_source + 'authoritative_source': auth.authoritative_source, } for p in prefixes: audit_params['prefix_id'] = p['id'] audit_params['prefix_prefix'] = p['prefix'] - audit_params['description'] = 'Removed prefix %s' % p['prefix'] + audit_params['description'] = 'Removed prefix {}'.format(p['prefix']) audit_params['vrf_id'] = p['vrf_id'] audit_params['vrf_rt'] = p['vrf_rt'] audit_params['vrf_name'] = p['vrf_name'] sql, params = self._sql_expand_insert(audit_params) - self._execute('INSERT INTO ip_net_log %s' % sql, params) + self._execute('INSERT INTO ip_net_log ' + sql, params) if p['pool_id'] is not None: - pool = self._get_pool(auth, { 'id': p['pool_id'] }) + pool = self._get_pool(auth, {'id': p['pool_id']}) audit_params2 = { 'pool_id': pool['id'], 'pool_name': pool['name'], 'prefix_id': p['id'], 'prefix_prefix': p['prefix'], - 'description': 'Prefix %s removed from pool %s' % (p['prefix'], pool['name']), + 'description': 'Prefix ' + p['prefix'] + ' removed from pool ' + pool['name'], 'username': auth.username, 'authenticated_as': auth.authenticated_as, 'full_name': auth.full_name, - 'authoritative_source': auth.authoritative_source + 'authoritative_source': auth.authoritative_source, } sql, params = self._sql_expand_insert(audit_params2) - self._execute('INSERT INTO ip_net_log %s' % sql, params) - - + self._execute('INSERT INTO ip_net_log ' + sql, params) def search_prefix(self, auth, query, search_options=None): """ Search prefix list for prefixes matching `query`. @@ -3334,16 +3128,17 @@ def search_prefix(self, auth, query, search_options=None): search_options['include_all_parents'] = False else: if search_options['include_all_parents'] not in (True, False): - raise NipapValueError('Invalid value for option ' + - "'include_all_parents'. Only true and false valid. Supplied value :'%s'" % unicode(search_options['include_all_parents'])) + raise NipapValueError( + "Invalid value for option 'include_all_parents'. Only true and false valid. " + "Supplied value :'{}'".format(search_options['include_all_parents'])) # include_children if 'include_all_children' not in search_options: search_options['include_all_children'] = False else: if search_options['include_all_children'] not in (True, False): - raise NipapValueError('Invalid value for option ' + - "'include_all_children'. Only true and false valid. Supplied value: '%s'" % unicode(search_options['include_all_children'])) + raise NipapValueError("Invalid value for option 'include_all_children'. Only true and false valid. " + "Supplied value: '{}'".format(search_options['include_all_children'])) # parents_depth if 'parents_depth' not in search_options: @@ -3352,8 +3147,7 @@ def search_prefix(self, auth, query, search_options=None): try: search_options['parents_depth'] = int(search_options['parents_depth']) except (ValueError, TypeError): - raise NipapValueError('Invalid value for option' + - ''' 'parent_depth'. Only integer values allowed.''') + raise NipapValueError("Invalid value for option 'parent_depth'. Only integer values allowed.") # children_depth if 'children_depth' not in search_options: @@ -3362,16 +3156,15 @@ def search_prefix(self, auth, query, search_options=None): try: search_options['children_depth'] = int(search_options['children_depth']) except (ValueError, TypeError): - raise NipapValueError('Invalid value for option' + - ''' 'children_depth'. Only integer values allowed.''') + raise NipapValueError("Invalid value for option 'children_depth'. Only integer values allowed.") # include_neighbors if 'include_neighbors' not in search_options: search_options['include_neighbors'] = False else: if search_options['include_neighbors'] not in (True, False): - raise NipapValueError('Invalid value for option ' + - "'include_neighbors'. Only true and false valid. Supplied value: '%s'" % unicode(search_options['include_neighbors'])) + raise NipapValueError("Invalid value for option 'include_neighbors'. Only true and false valid. " + "Supplied value: '{}'".format(search_options['include_neighbors'])) # max_result if 'max_result' not in search_options: @@ -3383,8 +3176,7 @@ def search_prefix(self, auth, query, search_options=None): try: search_options['max_result'] = int(search_options['max_result']) except (ValueError, TypeError): - raise NipapValueError('Invalid value for option' + - ''' 'max_result'. Only integer values allowed.''') + raise NipapValueError("Invalid value for option 'max_result'. Only integer values allowed.") # offset if 'offset' not in search_options: @@ -3393,26 +3185,23 @@ def search_prefix(self, auth, query, search_options=None): try: search_options['offset'] = int(search_options['offset']) except (ValueError, TypeError): - raise NipapValueError('Invalid value for option' + - ''' 'offset'. Only integer values allowed.''') + raise NipapValueError("Invalid value for option 'offset'. Only integer values allowed.") # parent_prefix - if ('parent_prefix' not in search_options or - search_options['parent_prefix'] is None): + if 'parent_prefix' not in search_options or search_options['parent_prefix'] is None: search_options['parent_prefix'] = None else: try: _ = int(search_options['parent_prefix']) except ValueError: - raise NipapValueError( - "Invalid value '%s' for option 'parent_prefix'. Must be the ID of a prefix." - % search_options['parent_prefix']) + raise NipapValueError("Invalid value '{}' for option 'parent_prefix'. " + "Must be the ID of a prefix.".format(search_options['parent_prefix'])) try: - parent_prefix = self.list_prefix(auth, { 'id': search_options['parent_prefix'] })[0] + parent_prefix = self.list_prefix(auth, {'id': search_options['parent_prefix']})[0] except IndexError: - raise NipapNonExistentError("Parent prefix %s can not be found" % search_options['parent_prefix']) + raise NipapNonExistentError("Parent prefix {} can not be found".format(search_options['parent_prefix'])) - self._logger.debug('search_prefix search_options: %s' % unicode(search_options)) + self._logger.debug('search_prefix search_options: %s', search_options) # translate search options to SQL @@ -3439,7 +3228,8 @@ def search_prefix(self, auth, query, search_options=None): vrf_id = 0 if parent_prefix['vrf_id']: vrf_id = parent_prefix['vrf_id'] - where_parent_prefix = " WHERE (p1.vrf_id = %s AND iprange(p1.prefix) <<= iprange('%s') AND p1.indent <= %s) " % (vrf_id, parent_prefix['prefix'], parent_prefix['indent'] + 1) + where_parent_prefix = " WHERE (p1.vrf_id = %s AND iprange(p1.prefix) <<= iprange('%s') AND " \ + "p1.indent <= %s) " % (vrf_id, parent_prefix['prefix'], parent_prefix['indent'] + 1) left_join = 'LEFT OUTER' else: where_parent_prefix = '' @@ -3450,7 +3240,8 @@ def search_prefix(self, auth, query, search_options=None): else: limit_string = "LIMIT %d" % (search_options['max_result'] + search_options['offset']) - display = '(p1.prefix << p2.display_prefix OR p2.prefix <<= p1.prefix %s) OR (p2.prefix >>= p1.prefix %s)' % (where_parents, where_children) + display = '(p1.prefix << p2.display_prefix OR p2.prefix <<= p1.prefix %s) OR (p2.prefix >>= ' \ + 'p1.prefix %s)' % (where_parents, where_children) where, opt = self._expand_prefix_query(query) sql = """ @@ -3558,8 +3349,8 @@ def search_prefix(self, auth, query, search_options=None): LEFT JOIN ip_net_pool AS pool ON (p1.pool_id = pool.id) -- possible set where conditions, if we are doing a parent_prefix operation """ + where_parent_prefix + """ - ORDER BY vrf_rt_order(vrf.rt) NULLS FIRST, p1.prefix, CASE WHEN p1.prefix = p2.prefix THEN 0 ELSE 1 END OFFSET """ + unicode(search_options['offset']) + ") AS a ORDER BY vrf_rt_order(vrf_rt) NULLS FIRST, prefix" - + ORDER BY vrf_rt_order(vrf.rt) NULLS FIRST, p1.prefix, CASE WHEN p1.prefix = p2.prefix THEN 0 ELSE 1 END + OFFSET """ + str(search_options['offset']) + ") AS a ORDER BY vrf_rt_order(vrf_rt) NULLS FIRST, prefix" self._execute(sql, opt) @@ -3578,9 +3369,7 @@ def search_prefix(self, auth, query, search_options=None): if len(result) >= int(search_options['max_result']): break - return { 'search_options': search_options, 'result': result } - - + return {'search_options': search_options, 'result': result} def smart_search_prefix(self, auth, query_str, search_options=None, extra_query=None): """ Perform a smart search on prefix list. @@ -3627,7 +3416,7 @@ def smart_search_prefix(self, auth, query_str, search_options=None, extra_query= if search_options is None: search_options = {} - self._logger.debug("smart_search_prefix query string: %s" % query_str) + self._logger.debug("smart_search_prefix query string: %s", query_str) success, query = self._parse_prefix_query(query_str) if not success: @@ -3636,17 +3425,13 @@ def smart_search_prefix(self, auth, query_str, search_options=None, extra_query= 'search_options': search_options, 'result': [], 'error': True, - 'error_message': 'query interpretation failed' + 'error_message': 'query interpretation failed', } if extra_query is not None: - query = { - 'operator': 'and', - 'val1': query, - 'val2': extra_query - } + query = {'operator': 'and', 'val1': query, 'val2': extra_query} - self._logger.debug("smart_search_prefix: query expanded to: %s" % unicode(query)) + self._logger.debug("smart_search_prefix: query expanded to: %s", query) search_result = self.search_prefix(auth, query, search_options) search_result['interpretation'] = query @@ -3654,8 +3439,6 @@ def smart_search_prefix(self, auth, query_str, search_options=None, extra_query= return search_result - - def _parse_prefix_query(self, query_str): """ Parse a smart search query for prefixes @@ -3666,20 +3449,18 @@ def _parse_prefix_query(self, query_str): query = sp.parse(query_str) return query - - # # ASN functions # - def _expand_asn_query(self, query, table_name = None): + def _expand_asn_query(self, query, table_name=None): """ Expand ASN query dict into a WHERE-clause. If you need to prefix each column reference with a table name, that can be supplied via the table_name argument. """ - where = unicode() + where = str() opt = list() # handle table name, can be None @@ -3688,16 +3469,16 @@ def _expand_asn_query(self, query, table_name = None): else: col_prefix = table_name + "." - if type(query['val1']) == dict and type(query['val2']) == dict: + if isinstance(query['val1'], dict) and isinstance(query['val2'], dict): # Sub expression, recurse! This is used for boolean operators: AND OR # add parantheses sub_where1, opt1 = self._expand_asn_query(query['val1'], table_name) sub_where2, opt2 = self._expand_asn_query(query['val2'], table_name) try: - where += unicode(" (%s %s %s) " % (sub_where1, _operation_map[query['operator']], sub_where2) ) + where += " (%s %s %s) " % (sub_where1, _operation_map[query['operator']], sub_where2) except KeyError: - raise NipapNoSuchOperatorError("No such operator %s" % unicode(query['operator'])) + raise NipapNoSuchOperatorError("No such operator {}".format(query['operator'])) opt += opt1 opt += opt2 @@ -3712,7 +3493,7 @@ def _expand_asn_query(self, query, table_name = None): asn_attr['name'] = 'name' if query['val1'] not in asn_attr: - raise NipapInputError('Search variable \'%s\' unknown' % unicode(query['val1'])) + raise NipapInputError("Search variable '{}' unknown".format(query['val1'])) # workaround for handling equal matches of NULL-values if query['operator'] == 'equals' and query['val2'] is None: @@ -3722,19 +3503,14 @@ def _expand_asn_query(self, query, table_name = None): # build where clause if query['operator'] not in _operation_map: - raise NipapNoSuchOperatorError("No such operator %s" % query['operator']) + raise NipapNoSuchOperatorError("No such operator {}".format(query['operator'])) - where = unicode(" %s%s %s %%s " % - ( col_prefix, asn_attr[query['val1']], - _operation_map[query['operator']] ) - ) + where = " %s%s %s %%s " % (col_prefix, asn_attr[query['val1']], _operation_map[query['operator']]) opt.append(query['val2']) return where, opt - - def _expand_asn_spec(self, spec): """ Expand ASN specification to SQL. @@ -3745,21 +3521,21 @@ def _expand_asn_spec(self, spec): name of ASN """ - if type(spec) is not dict: + if not isinstance(spec, dict): raise NipapInputError("asn specification must be a dict") allowed_values = ['asn', 'name'] for a in spec: if a not in allowed_values: - raise NipapExtraneousInputError("extraneous specification key %s" % a) + raise NipapExtraneousInputError("extraneous specification key {}".format(a)) if 'asn' in spec: - if type(spec['asn']) not in (int, long): + if not isinstance(spec['asn'], int): raise NipapValueError("asn specification key 'asn' must be an integer") if 'name' in spec: raise NipapExtraneousInputError("asn specification contain both 'asn' and 'name', specify asn or name") elif 'name' in spec: - if type(spec['name']) != type(''): + if not isinstance(spec['name'], str): raise NipapValueError("asn specification key 'name' must be a string") if 'asn' in spec: raise NipapExtraneousInputError("asn specification contain both 'asn' and 'name', specify asn or name") @@ -3768,8 +3544,6 @@ def _expand_asn_spec(self, spec): return where, params - - def list_asn(self, auth, asn=None): """ List AS numbers matching `spec`. @@ -3790,7 +3564,7 @@ def list_asn(self, auth, asn=None): if asn is None: asn = {} - self._logger.debug("list_asn called; asn: %s" % unicode(asn)) + self._logger.debug("list_asn called; asn: %s", asn) sql = "SELECT * FROM ip_net_asn" params = list() @@ -3809,8 +3583,6 @@ def list_asn(self, auth, asn=None): return res - - @requires_rw def add_asn(self, auth, attr): """ Add AS number to NIPAP. @@ -3828,18 +3600,18 @@ def add_asn(self, auth, attr): understanding. """ - self._logger.debug("add_asn called; attr: %s" % unicode(attr)) + self._logger.debug("add_asn called; attr: %s", attr) # sanity check - do we have all attributes? - req_attr = [ 'asn', ] - allowed_attr = [ 'asn', 'name' ] + req_attr = ['asn',] + allowed_attr = ['asn', 'name'] self._check_attr(attr, req_attr, allowed_attr) insert, params = self._sql_expand_insert(attr) sql = "INSERT INTO ip_net_asn " + insert self._execute(sql, params) - asn = self.list_asn(auth, { 'asn': attr['asn'] })[0] + asn = self.list_asn(auth, {'asn': attr['asn']})[0] # write to audit table audit_params = { @@ -3847,16 +3619,14 @@ def add_asn(self, auth, attr): 'authenticated_as': auth.authenticated_as, 'full_name': auth.full_name, 'authoritative_source': auth.authoritative_source, - 'description': 'Added ASN %s with attr: %s' % (attr['asn'], unicode(attr)) + 'description': 'Added ASN %s with attr: %s' % (attr['asn'], attr) } sql, params = self._sql_expand_insert(audit_params) - self._execute('INSERT INTO ip_net_log %s' % sql, params) + self._execute('INSERT INTO ip_net_log ' + sql, params) return asn - - @requires_rw def edit_asn(self, auth, asn, attr): """ Edit AS number @@ -3874,19 +3644,18 @@ def edit_asn(self, auth, asn, attr): understanding. """ - self._logger.debug("edit_asn called; asn: %s attr: %s" % - (unicode(asn), unicode(attr))) + self._logger.debug("edit_asn called; asn: %s attr: %s", asn, attr) # sanity check - do we have all attributes? - req_attr = [ ] - allowed_attr = [ 'name', ] + req_attr = [] + allowed_attr = ['name',] self._check_attr(attr, req_attr, allowed_attr) asns = self.list_asn(auth, asn) where, params1 = self._expand_asn_spec(asn) update, params2 = self._sql_expand_update(attr) - params = dict(params2.items() + params1.items()) + params = dict(list(params2.items()) + list(params1.items())) sql = "UPDATE ip_net_asn SET " + update + " WHERE " + where sql += " RETURNING *" @@ -3902,17 +3671,15 @@ def edit_asn(self, auth, asn, attr): 'username': auth.username, 'authenticated_as': auth.authenticated_as, 'full_name': auth.full_name, - 'authoritative_source': auth.authoritative_source + 'authoritative_source': auth.authoritative_source, + 'description': 'Edited ASN %s attr: %s' % (a['asn'], attr), } - audit_params['description'] = 'Edited ASN %s attr: %s' % (unicode(a['asn']), unicode(attr)) sql, params = self._sql_expand_insert(audit_params) - self._execute('INSERT INTO ip_net_log %s' % sql, params) + self._execute('INSERT INTO ip_net_log ' + sql, params) return updated_asns - - @requires_rw def remove_asn(self, auth, asn): """ Remove an AS number. @@ -3930,7 +3697,7 @@ def remove_asn(self, auth, asn): understanding. """ - self._logger.debug("remove_asn called; asn: %s" % unicode(asn)) + self._logger.debug("remove_asn called; asn: %s", asn) # get list of ASNs to remove before removing them asns = self.list_asn(auth, asn) @@ -3944,15 +3711,13 @@ def remove_asn(self, auth, asn): for a in asns: audit_params = { 'username': auth.username, - 'authenticated_as': auth.authenticated_as, + 'authenticated_as': auth.authenticated_ass, 'full_name': auth.full_name, 'authoritative_source': auth.authoritative_source, - 'description': 'Removed ASN %s' % unicode(a['asn']) + 'description': 'Removed ASN %s' % a['asn'] } sql, params = self._sql_expand_insert(audit_params) - self._execute('INSERT INTO ip_net_log %s' % sql, params) - - + self._execute('INSERT INTO ip_net_log ' + sql, params) def search_asn(self, auth, query, search_options=None): """ Search ASNs for entries matching 'query' @@ -4029,10 +3794,9 @@ def search_asn(self, auth, query, search_options=None): try: search_options['offset'] = int(search_options['offset']) except (ValueError, TypeError): - raise NipapValueError('Invalid value for option' + - ''' 'offset'. Only integer values allowed.''') + raise NipapValueError("Invalid value for option'offset'. Only integer values allowed.") - self._logger.debug('search_asn search_options: %s' % unicode(search_options)) + self._logger.debug('search_asn search_options: %s', search_options) opt = None sql = """ SELECT * FROM ip_net_asn """ @@ -4043,16 +3807,14 @@ def search_asn(self, auth, query, search_options=None): where, opt = self._expand_asn_query(query) sql += " WHERE " + where - sql += " ORDER BY asn LIMIT " + unicode(search_options['max_result']) + sql += " ORDER BY asn LIMIT %s" % search_options['max_result'] self._execute(sql, opt) result = list() for row in self._curs_pg: result.append(dict(row)) - return { 'search_options': search_options, 'result': result } - - + return {'search_options': search_options, 'result': result} def smart_search_asn(self, auth, query_str, search_options=None, extra_query=None): """ Perform a smart search operation among AS numbers @@ -4095,26 +3857,22 @@ def smart_search_asn(self, auth, query_str, search_options=None, extra_query=Non if search_options is None: search_options = {} - self._logger.debug("smart_search_asn called; query_str: %s" % query_str) + self._logger.debug("smart_search_asn called; query_str: %s", query_str) success, query = self._parse_asn_query(query_str) if not success: return { - 'interpretation': query, - 'search_options': search_options, - 'result': [], - 'error': True, - 'error_message': 'query interpretaion failed' + 'interpretation': query, + 'search_options': search_options, + 'result': [], + 'error': True, + 'error_message': 'query interpretaion failed', } if extra_query is not None: - query = { - 'operator': 'and', - 'val1': query, - 'val2': extra_query - } + query = {'operator': 'and', 'val1': query, 'val2': extra_query} - self._logger.debug("smart_search_asn; query expanded to: %s" % unicode(query)) + self._logger.debug("smart_search_asn; query expanded to: %s", query) search_result = self.search_asn(auth, query, search_options) search_result['interpretation'] = query @@ -4122,8 +3880,6 @@ def smart_search_asn(self, auth, query_str, search_options=None, extra_query=Non return search_result - - def _parse_asn_query(self, query_str): """ Parse a smart search query for ASNs @@ -4185,25 +3941,23 @@ def _parse_asn_query(self, query_str): }, 'operator': 'and', 'val1': query_part, - 'val2': query + 'val2': query, } return True, query - - # # Tag functions # - def _expand_tag_query(self, query, table_name = None): + def _expand_tag_query(self, query, table_name=None): """ Expand Tag query dict into a WHERE-clause. If you need to prefix each column reference with a table name, that can be supplied via the table_name argument. """ - where = unicode() + where = str() opt = list() # handle table name, can be None @@ -4212,16 +3966,16 @@ def _expand_tag_query(self, query, table_name = None): else: col_prefix = table_name + "." - if type(query['val1']) == dict and type(query['val2']) == dict: + if isinstance(query['val1'], dict) and isinstance(query['val2'], dict): # Sub expression, recurse! This is used for boolean operators: AND OR # add parantheses sub_where1, opt1 = self._expand_tag_query(query['val1'], table_name) sub_where2, opt2 = self._expand_tag_query(query['val2'], table_name) try: - where += unicode(" (%s %s %s) " % (sub_where1, _operation_map[query['operator']], sub_where2) ) + where += " (%s %s %s) " % (sub_where1, _operation_map[query['operator']], sub_where2) except KeyError: - raise NipapNoSuchOperatorError("No such operator %s" % unicode(query['operator'])) + raise NipapNoSuchOperatorError("No such operator {}".format(query['operator'])) opt += opt1 opt += opt2 @@ -4235,7 +3989,7 @@ def _expand_tag_query(self, query, table_name = None): tag_attr['name'] = 'name' if query['val1'] not in tag_attr: - raise NipapInputError('Search variable \'%s\' unknown' % unicode(query['val1'])) + raise NipapInputError("Search variable '{}' unknown".format(query['val1'])) # workaround for handling equal matches of NULL-values if query['operator'] == 'equals' and query['val2'] is None: @@ -4245,19 +3999,14 @@ def _expand_tag_query(self, query, table_name = None): # build where clause if query['operator'] not in _operation_map: - raise NipapNoSuchOperatorError("No such operator %s" % query['operator']) + raise NipapNoSuchOperatorError("No such operator {}".format(query['operator'])) - where = unicode(" %s%s %s %%s " % - ( col_prefix, tag_attr[query['val1']], - _operation_map[query['operator']] ) - ) + where = " %s%s %s %%s " % (col_prefix, tag_attr[query['val1']], _operation_map[query['operator']]) opt.append(query['val2']) return where, opt - - def search_tag(self, auth, query, search_options=None): """ Search Tags for entries matching 'query' @@ -4323,8 +4072,7 @@ def search_tag(self, auth, query, search_options=None): try: search_options['max_result'] = int(search_options['max_result']) except (ValueError, TypeError): - raise NipapValueError('Invalid value for option' + - ''' 'max_result'. Only integer values allowed.''') + raise NipapValueError("Invalid value for option 'max_result'. Only integer values allowed.") # offset if 'offset' not in search_options: @@ -4333,32 +4081,26 @@ def search_tag(self, auth, query, search_options=None): try: search_options['offset'] = int(search_options['offset']) except (ValueError, TypeError): - raise NipapValueError('Invalid value for option' + - ''' 'offset'. Only integer values allowed.''') + raise NipapValueError("Invalid value for option 'offset'. Only integer values allowed.") - self._logger.debug('search_tag search_options: %s' % unicode(search_options)) + self._logger.debug('search_tag search_options: %s', search_options) opt = None - sql = """ SELECT * FROM (SELECT DISTINCT unnest(tags) AS name FROM - ip_net_plan) AS a """ + sql = """ SELECT * FROM (SELECT DISTINCT unnest(tags) AS name FROM ip_net_plan) AS a """ # add where clause if we have any search terms if query != {}: - where, opt = self._expand_tag_query(query) sql += " WHERE " + where - sql += " ORDER BY name LIMIT " + unicode(search_options['max_result']) + sql += " ORDER BY name LIMIT %s" % search_options['max_result'] self._execute(sql, opt) result = list() for row in self._curs_pg: result.append(dict(row)) - return { 'search_options': search_options, 'result': result } - - - + return {'search_options': search_options, 'result': result} # vim: et ts=4 : diff --git a/nipap/nipap/daemon.py b/nipap/nipap/daemon.py index c721d027f..215e9d3b3 100644 --- a/nipap/nipap/daemon.py +++ b/nipap/nipap/daemon.py @@ -22,8 +22,8 @@ __version__ = "0.2" # Standard Python modules. -import os # Miscellaneous OS interfaces. -import sys # System-specific parameters and functions. +import os # Miscellaneous OS interfaces. +import sys # System-specific parameters and functions. # Default daemon parameters. # File mode creation mask of the daemon. @@ -36,161 +36,163 @@ MAXFD = 1024 # The standard I/O file descriptors are redirected to /dev/null by default. -if (hasattr(os, "devnull")): - REDIRECT_TO = os.devnull +if hasattr(os, "devnull"): + REDIRECT_TO = os.devnull else: - REDIRECT_TO = "/dev/null" + REDIRECT_TO = "/dev/null" + def createDaemon(): - """Detach a process from the controlling terminal and run it in the - background as a daemon. - """ - - try: - # Fork a child process so the parent can exit. This returns control to - # the command-line or shell. It also guarantees that the child will not - # be a process group leader, since the child receives a new process ID - # and inherits the parent's process group ID. This step is required - # to insure that the next call to os.setsid is successful. - pid = os.fork() - except OSError as exc: - raise Exception, "%s [%d]" % (exc.strerror, exc.errno) - - if (pid == 0): # The first child. - # To become the session leader of this new session and the process group - # leader of the new process group, we call os.setsid(). The process is - # also guaranteed not to have a controlling terminal. - os.setsid() - - # Is ignoring SIGHUP necessary? - # - # It's often suggested that the SIGHUP signal should be ignored before - # the second fork to avoid premature termination of the process. The - # reason is that when the first child terminates, all processes, e.g. - # the second child, in the orphaned group will be sent a SIGHUP. - # - # "However, as part of the session management system, there are exactly - # two cases where SIGHUP is sent on the death of a process: - # - # 1) When the process that dies is the session leader of a session that - # is attached to a terminal device, SIGHUP is sent to all processes - # in the foreground process group of that terminal device. - # 2) When the death of a process causes a process group to become - # orphaned, and one or more processes in the orphaned group are - # stopped, then SIGHUP and SIGCONT are sent to all members of the - # orphaned group." [2] - # - # The first case can be ignored since the child is guaranteed not to have - # a controlling terminal. The second case isn't so easy to dismiss. - # The process group is orphaned when the first child terminates and - # POSIX.1 requires that every STOPPED process in an orphaned process - # group be sent a SIGHUP signal followed by a SIGCONT signal. Since the - # second child is not STOPPED though, we can safely forego ignoring the - # SIGHUP signal. In any case, there are no ill-effects if it is ignored. - # - # import signal # Set handlers for asynchronous events. - # signal.signal(signal.SIGHUP, signal.SIG_IGN) - - try: - # Fork a second child and exit immediately to prevent zombies. This - # causes the second child process to be orphaned, making the init - # process responsible for its cleanup. And, since the first child is - # a session leader without a controlling terminal, it's possible for - # it to acquire one by opening a terminal in the future (System V- - # based systems). This second fork guarantees that the child is no - # longer a session leader, preventing the daemon from ever acquiring - # a controlling terminal. - pid = os.fork() # Fork a second child. - except OSError as exc: - raise Exception, "%s [%d]" % (exc.strerror, exc.errno) - - if (pid == 0): # The second child. - # Since the current working directory may be a mounted filesystem, we - # avoid the issue of not being able to unmount the filesystem at - # shutdown time by changing it to the root directory. - os.chdir(WORKDIR) - # We probably don't want the file mode creation mask inherited from - # the parent, so we give the child complete control over permissions. - os.umask(UMASK) - else: - # exit() or _exit()? See below. - os._exit(0) # Exit parent (the first child) of the second child. - else: - # exit() or _exit()? - # _exit is like exit(), but it doesn't call any functions registered - # with atexit (and on_exit) or any registered signal handlers. It also - # closes any open file descriptors. Using exit() may cause all stdio - # streams to be flushed twice and any temporary files may be unexpectedly - # removed. It's therefore recommended that child branches of a fork() - # and the parent branch(es) of a daemon use _exit(). - os._exit(0) # Exit parent of the first child. - - # Close all open file descriptors. This prevents the child from keeping - # open any file descriptors inherited from the parent. There is a variety - # of methods to accomplish this task. Three are listed below. - # - # Try the system configuration variable, SC_OPEN_MAX, to obtain the maximum - # number of open file descriptors to close. If it doesn't exists, use - # the default value (configurable). - # - # try: - # maxfd = os.sysconf("SC_OPEN_MAX") - # except (AttributeError, ValueError): - # maxfd = MAXFD - # - # OR - # - # if (os.sysconf_names.has_key("SC_OPEN_MAX")): - # maxfd = os.sysconf("SC_OPEN_MAX") - # else: - # maxfd = MAXFD - # - # OR - # - # Use the getrlimit method to retrieve the maximum file descriptor number - # that can be opened by this process. If there is not limit on the - # resource, use the default value. - # - import resource # Resource usage information. - maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1] - if (maxfd == resource.RLIM_INFINITY): - maxfd = MAXFD - - # FIXME: this breaks our tpxmld, so it's commented for now //kll - # Iterate through and close all file descriptors. -# for fd in range(0, maxfd): -# try: -# os.close(fd) -# except OSError: # ERROR, fd wasn't open to begin with (ignored) -# pass - - # Redirect the standard I/O file descriptors to the specified file. Since - # the daemon has no controlling terminal, most daemons redirect stdin, - # stdout, and stderr to /dev/null. This is done to prevent side-effects - # from reads and writes to the standard I/O file descriptors. - - # This call to open is guaranteed to return the lowest file descriptor, - # which will be 0 (stdin), since it was closed above. - os.open(REDIRECT_TO, os.O_RDWR) # standard input (0) - - # Duplicate standard input to standard output and standard error. - os.dup2(0, 1) # standard output (1) - os.dup2(0, 2) # standard error (2) - - return(0) + """Detach a process from the controlling terminal and run it in the + background as a daemon. + """ + + try: + # Fork a child process so the parent can exit. This returns control to + # the command-line or shell. It also guarantees that the child will not + # be a process group leader, since the child receives a new process ID + # and inherits the parent's process group ID. This step is required + # to insure that the next call to os.setsid is successful. + pid = os.fork() + except OSError as exc: + raise Exception("{} [{0:d}]".format(exc.strerror, exc.errno)) + + if pid == 0: # The first child. + # To become the session leader of this new session and the process group + # leader of the new process group, we call os.setsid(). The process is + # also guaranteed not to have a controlling terminal. + os.setsid() + + # Is ignoring SIGHUP necessary? + # + # It's often suggested that the SIGHUP signal should be ignored before + # the second fork to avoid premature termination of the process. The + # reason is that when the first child terminates, all processes, e.g. + # the second child, in the orphaned group will be sent a SIGHUP. + # + # "However, as part of the session management system, there are exactly + # two cases where SIGHUP is sent on the death of a process: + # + # 1) When the process that dies is the session leader of a session that + # is attached to a terminal device, SIGHUP is sent to all processes + # in the foreground process group of that terminal device. + # 2) When the death of a process causes a process group to become + # orphaned, and one or more processes in the orphaned group are + # stopped, then SIGHUP and SIGCONT are sent to all members of the + # orphaned group." [2] + # + # The first case can be ignored since the child is guaranteed not to have + # a controlling terminal. The second case isn't so easy to dismiss. + # The process group is orphaned when the first child terminates and + # POSIX.1 requires that every STOPPED process in an orphaned process + # group be sent a SIGHUP signal followed by a SIGCONT signal. Since the + # second child is not STOPPED though, we can safely forego ignoring the + # SIGHUP signal. In any case, there are no ill-effects if it is ignored. + # + # import signal # Set handlers for asynchronous events. + # signal.signal(signal.SIGHUP, signal.SIG_IGN) + + try: + # Fork a second child and exit immediately to prevent zombies. This + # causes the second child process to be orphaned, making the init + # process responsible for its cleanup. And, since the first child is + # a session leader without a controlling terminal, it's possible for + # it to acquire one by opening a terminal in the future (System V- + # based systems). This second fork guarantees that the child is no + # longer a session leader, preventing the daemon from ever acquiring + # a controlling terminal. + pid = os.fork() # Fork a second child. + except OSError as exc: + raise Exception("{} [{0:d}]".format(exc.strerror, exc.errno)) + + if pid == 0: # The second child. + # Since the current working directory may be a mounted filesystem, we + # avoid the issue of not being able to unmount the filesystem at + # shutdown time by changing it to the root directory. + os.chdir(WORKDIR) + # We probably don't want the file mode creation mask inherited from + # the parent, so we give the child complete control over permissions. + os.umask(UMASK) + else: + # exit() or _exit()? See below. + os._exit(0) # Exit parent (the first child) of the second child. + else: + # exit() or _exit()? + # _exit is like exit(), but it doesn't call any functions registered + # with atexit (and on_exit) or any registered signal handlers. It also + # closes any open file descriptors. Using exit() may cause all stdio + # streams to be flushed twice and any temporary files may be unexpectedly + # removed. It's therefore recommended that child branches of a fork() + # and the parent branch(es) of a daemon use _exit(). + os._exit(0) # Exit parent of the first child. + + # Close all open file descriptors. This prevents the child from keeping + # open any file descriptors inherited from the parent. There is a variety + # of methods to accomplish this task. Three are listed below. + # + # Try the system configuration variable, SC_OPEN_MAX, to obtain the maximum + # number of open file descriptors to close. If it doesn't exists, use + # the default value (configurable). + # + # try: + # maxfd = os.sysconf("SC_OPEN_MAX") + # except (AttributeError, ValueError): + # maxfd = MAXFD + # + # OR + # + # if (os.sysconf_names.has_key("SC_OPEN_MAX")): + # maxfd = os.sysconf("SC_OPEN_MAX") + # else: + # maxfd = MAXFD + # + # OR + # + # Use the getrlimit method to retrieve the maximum file descriptor number + # that can be opened by this process. If there is not limit on the + # resource, use the default value. + # + import resource # Resource usage information. + + maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1] + if maxfd == resource.RLIM_INFINITY: + maxfd = MAXFD + + # FIXME: this breaks our tpxmld, so it's commented for now //kll + # Iterate through and close all file descriptors. + # for fd in range(0, maxfd): + # try: + # os.close(fd) + # except OSError: # ERROR, fd wasn't open to begin with (ignored) + # pass + + # Redirect the standard I/O file descriptors to the specified file. Since + # the daemon has no controlling terminal, most daemons redirect stdin, + # stdout, and stderr to /dev/null. This is done to prevent side-effects + # from reads and writes to the standard I/O file descriptors. + + # This call to open is guaranteed to return the lowest file descriptor, + # which will be 0 (stdin), since it was closed above. + os.open(REDIRECT_TO, os.O_RDWR) # standard input (0) + + # Duplicate standard input to standard output and standard error. + os.dup2(0, 1) # standard output (1) + os.dup2(0, 2) # standard error (2) + + return 0 -if __name__ == "__main__": - retCode = createDaemon() +if __name__ == "__main__": + retCode = createDaemon() - # The code, as is, will create a new file in the root directory, when - # executed with superuser privileges. The file will contain the following - # daemon related process parameters: return code, process ID, parent - # process group ID, session ID, user ID, effective user ID, real group ID, - # and the effective group ID. Notice the relationship between the daemon's - # process ID, process group ID, and its parent's process ID. + # The code, as is, will create a new file in the root directory, when + # executed with superuser privileges. The file will contain the following + # daemon related process parameters: return code, process ID, parent + # process group ID, session ID, user ID, effective user ID, real group ID, + # and the effective group ID. Notice the relationship between the daemon's + # process ID, process group ID, and its parent's process ID. - procParams = """ + procParams = """ return code = %s process ID = %s parent process ID = %s @@ -201,9 +203,8 @@ def createDaemon(): real group ID = %s effective group ID = %s """ % (retCode, os.getpid(), os.getppid(), os.getpgrp(), os.getsid(0), - os.getuid(), os.geteuid(), os.getgid(), os.getegid()) - - open("createDaemon.log", "w").write(procParams + "\n") + os.getuid(), os.geteuid(), os.getgid(), os.getegid()) - sys.exit(retCode) + open("createDaemon.log", "w").write(procParams + "\n") + sys.exit(retCode) diff --git a/nipap/nipap/errors.py b/nipap/nipap/errors.py index 6cecf4475..b91514f8b 100644 --- a/nipap/nipap/errors.py +++ b/nipap/nipap/errors.py @@ -1,4 +1,3 @@ - class NipapError(Exception): """ NIPAP base error class. """ diff --git a/nipap/nipap/nipapconfig.py b/nipap/nipap/nipapconfig.py index ca89c7349..00591bf6f 100644 --- a/nipap/nipap/nipapconfig.py +++ b/nipap/nipap/nipapconfig.py @@ -1,12 +1,12 @@ -import ConfigParser +import configparser -class NipapConfig(ConfigParser.SafeConfigParser): +class NipapConfig(configparser.ConfigParser): """ Makes configuration data available. Implemented as a class with a shared state; once an instance has been created, new instances with the same state can be obtained by calling - the custructor again. + the constructor again. """ __shared_state = {} @@ -26,12 +26,10 @@ def __init__(self, cfg_path=None, default=None): # First time - create new instance! self._cfg_path = cfg_path - ConfigParser.SafeConfigParser.__init__(self, default) + configparser.ConfigParser.__init__(self, default, inline_comment_prefixes=";#") self.read_file() - - def read_file(self): """ Read the configuration file """ @@ -41,12 +39,10 @@ def read_file(self): return try: - cfg_fp = open(self._cfg_path, 'r') - self.readfp(cfg_fp) + self.read([self._cfg_path]) except IOError as exc: raise NipapConfigError(str(exc)) - class NipapConfigError(Exception): pass diff --git a/nipap/nipap/smart_parsing.py b/nipap/nipap/smart_parsing.py index 01468c744..1bd91e09c 100644 --- a/nipap/nipap/smart_parsing.py +++ b/nipap/nipap/smart_parsing.py @@ -1,15 +1,16 @@ -#!/usr/bin/python +#!/usr/bin/python3 # -*- coding: utf-8 -*- -from itertools import izip_longest +from itertools import zip_longest import logging import re import IPy -from pyparsing import Combine, Forward, Group, Literal, nestedExpr, OneOrMore, ParseResults, quotedString, Regex, QuotedString, Word, ZeroOrMore, alphanums, nums, oneOf +from pyparsing import Combine, Forward, Group, Literal, nestedExpr, OneOrMore, ParseResults, quotedString, Regex, \ + QuotedString, Word, ZeroOrMore, alphanums, nums, oneOf -from errors import * +from .errors import * class SmartParser: @@ -21,7 +22,6 @@ class SmartParser: def __init__(self): self._logger = logging.getLogger(self.__class__.__name__) - def _is_ipv4(self, ip): """ Return true if given arg is a valid IPv4 address """ @@ -34,7 +34,6 @@ def _is_ipv4(self, ip): return True return False - def _is_ipv6(self, ip): """ Return true if given arg is a valid IPv6 address """ @@ -47,12 +46,11 @@ def _is_ipv6(self, ip): return True return False - def _get_afi(self, ip): """ Return address-family (4 or 6) for IP or None if invalid address """ - parts = unicode(ip).split("/") + parts = ip.split("/") if len(parts) == 1: # just an address if self._is_ipv4(ip): @@ -66,17 +64,17 @@ def _get_afi(self, ip): try: pl = int(parts[1]) except ValueError: - # if casting parts[1] to int failes, this is not a prefix.. + # if casting parts[1] to int fails, this is not a prefix.. return None if self._is_ipv4(parts[0]): - if pl >= 0 and pl <= 32: + if 0 <= pl <= 32: # prefix mask must be between 0 and 32 return 4 # otherwise error return None elif self._is_ipv6(parts[0]): - if pl >= 0 and pl <= 128: + if 0 <= pl <= 128: # prefix mask must be between 0 and 128 return 6 # otherwise error @@ -87,7 +85,6 @@ def _get_afi(self, ip): # more than two parts.. this is neither an address or a prefix return None - def _string_to_ast(self, input_string): """ Parse a smart search string and return it in an AST like form """ @@ -98,26 +95,44 @@ def _string_to_ast(self, input_string): # we try to be Unicode / internationally friendly we need to match much # much more. Trying to expand a word class to catch it all seems futile # so we match on everything *except* a few things, like our operators - comp_word = Regex("[^*\s=><~!]+") - word = Regex("[^*\s=><~!]+").setResultsName('word') + comp_word = Regex(r"[^*\s=><~!]+") + word = Regex(r"[^*\s=><~!]+").setResultsName('word') # numbers comp_number = Word(nums) number = Word(nums).setResultsName('number') # IPv4 address ipv4_oct = Regex("((2(5[0-5]|[0-4][0-9])|[01]?[0-9][0-9]?))") - comp_ipv4_address = Combine(ipv4_oct + ('.' + ipv4_oct*3)) - ipv4_address = Combine(ipv4_oct + ('.' + ipv4_oct*3)).setResultsName('ipv4_address') + comp_ipv4_address = Combine(ipv4_oct + ('.' + ipv4_oct * 3)) + ipv4_address = Combine(ipv4_oct + ('.' + ipv4_oct * 3)).setResultsName('ipv4_address') # IPv6 address - ipv6_address = Regex("((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:)))(%.+)?").setResultsName('ipv6_address') - ipv6_prefix = Combine(ipv6_address + Regex("/(12[0-8]|1[01][0-9]|[0-9][0-9]?)")).setResultsName('ipv6_prefix') + ipv6_address = Regex( + r"((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|" + r"(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)" + r"(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|" + r":((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|" + r"(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3})|" + r"((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|" + r":))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|" + r"((:[0-9A-Fa-f]{1,4}){0,2}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|" + r":))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|" + r"((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|" + r":))|(([0-9A-Fa-f]{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|" + r"((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|" + r":))|(:(((:[0-9A-Fa-f]{1,4}){1,7})|" + r"((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|" + r":)))(%.+)?" + ).setResultsName('ipv6_address') + ipv6_prefix = Combine( + ipv6_address + Regex("/(12[0-8]|1[01][0-9]|[0-9][0-9]?)") + ).setResultsName('ipv6_prefix') # VRF RTs of the form number:number vrf_rt = Combine((comp_ipv4_address | comp_number) + Literal(':') + comp_number).setResultsName('vrf_rt') # tags - tags = Combine( Literal('#') + comp_word).setResultsName('tag') + tags = Combine(Literal('#') + comp_word).setResultsName('tag') # operators for matching match_op = oneOf(' '.join(self.match_operators)).setResultsName('operator') @@ -135,26 +150,21 @@ def _string_to_ast(self, input_string): enclosed = Forward() parens = nestedExpr('(', ')', content=enclosed) - enclosed << ( - parens | atom - ).setResultsName('nested') + enclosed << (parens | atom).setResultsName('nested') content = Forward() - content << ( - ZeroOrMore(enclosed) - ) + content << (ZeroOrMore(enclosed)) res = content.parseString(input_string) return res - def _ast_to_dictsql(self, input_ast): """ """ # Add implicit AND operator between expressions if there is no explicit # operator specified. ast = [] - for token, lookahead in izip_longest(input_ast, input_ast[1:]): + for token, lookahead in zip_longest(input_ast, input_ast[1:]): if token.getName() == "boolean": # only add boolean operator if it is NOT the last token if lookahead is not None: @@ -169,27 +179,22 @@ def _ast_to_dictsql(self, input_ast): # if next token is NOT a boolean, add implicit AND ast.append(ParseResults('and', 'boolean')) - # dictSql stack - dss = { - 'operator': None, - 'val1': None, - 'val2': None - } + dss = {'operator': None, 'val1': None, 'val2': None} success = True dse = None - for part, lookahead in izip_longest(ast, ast[1:]): - self._logger.debug("part: %s %s" % (part, part.getName())) + for part, lookahead in zip_longest(ast, ast[1:]): + self._logger.debug("part: {} {}".format(part, part.getName())) # handle operators joining together expressions if part.getName() == 'boolean': op = part[0].lower() dss['operator'] = op dss['interpretation'] = { - 'interpretation': op, - 'operator': op, - 'error': False - } + 'interpretation': op, + 'operator': op, + 'error': False, + } continue # string expr that we expand to dictsql expression @@ -206,25 +211,20 @@ def _ast_to_dictsql(self, input_ast): elif part.getName() in ('ipv6_prefix', 'ipv6_address', 'word', 'tag', 'vrf_rt', 'quoted_string'): # dict sql expression dse = self._string_to_dictsql(part) - self._logger.debug('string part: %s => %s' % (part, dse)) + self._logger.debug('string part: %s => %s', part, dse) else: - raise ParserError("Unhandled part in AST: %s %s" % (part, - part.getName())) + raise ParserError("Unhandled part in AST: {} {}".format(part, part.getName())) if dss['val1'] is None: - self._logger.debug('val1 not set, using dse: %s' % unicode(dse)) + self._logger.debug('val1 not set, using dse: %s', dse) dss['val1'] = dse else: - self._logger.debug("val1 is set, operator is '%s', val2 = dst: %s" % (dss['operator'], unicode(dse))) + self._logger.debug("val1 is set, operator is '%s', val2 = dst: %s", dss['operator'], dse) dss['val2'] = dse if lookahead is not None: if dss['val1'] is not None and dss['val2'] is not None: - dss = { - 'operator': None, - 'val1': dss, - 'val2': None - } + dss = {'operator': None, 'val1': dss, 'val2': None} # special handling when AST is only one expression, then we overwrite # the dss with dse @@ -236,13 +236,11 @@ def _ast_to_dictsql(self, input_ast): # return the final composed stack of dictsql expressions return success, dss - def _string_to_dictsql(self, string): """ Do magic matching of single words or quoted string """ raise NotImplementedError() - def _parse_expr(self, part): """ Parse matching expression in form key value @@ -250,25 +248,25 @@ def _parse_expr(self, part): vlan > 1 node = FOO-BAR """ - self._logger.debug("parsing expression: " + unicode(part)) + self._logger.debug("parsing expression: %s", part) key, op, val = part success = True dictsql = { + 'operator': op, + 'val1': key, + 'val2': val, + 'interpretation': { + 'string': key + op + val, + 'interpretation': 'expression', + 'attribute': key, 'operator': op, - 'val1': key, - 'val2': unicode(val), - 'interpretation': { - 'string': key + op + val, - 'interpretation': 'expression', - 'attribute': key, - 'operator': op, - 'error': False - } - } + 'error': False, + }, + } if key in self.attributes: - if type(self.attributes[key]) is list: + if isinstance(self.attributes[key], list): if val not in self.attributes[key]: dictsql['interpretation']['error'] = True dictsql['interpretation']['error_message'] = 'invalid value' @@ -281,8 +279,6 @@ def _parse_expr(self, part): return success, dictsql - - def parse(self, input_string): # check for unclosed quotes/parentheses paired_exprs = nestedExpr('(', ')') | quotedString @@ -298,8 +294,8 @@ def parse(self, input_string): 'attribute': 'text', 'operator': None, 'error': True, - 'error_message': None - } + 'error_message': None, + }, } if '"' in stripped_line or "'" in stripped_line: @@ -313,7 +309,6 @@ def parse(self, input_string): return self._ast_to_dictsql(ast) - class PoolSmartParser(SmartParser): attributes = { 'default_type': True, @@ -336,94 +331,88 @@ class PoolSmartParser(SmartParser): 'used_prefixes_v4': True, 'used_prefixes_v6': True, 'vrf': True, - } - + } def _string_to_dictsql(self, part): """ Do magic matching of single words or quoted string """ - self._logger.debug("parsing string: " + unicode(part[0]) + " of type: " + part.getName()) + self._logger.debug("parsing string: %s of type: %s", part[0], part.getName()) if part.getName() == 'tag': - self._logger.debug("Query part '" + part[0] + "' interpreted as tag") + self._logger.debug("Query part '%s' interpreted as tag", part[0]) dictsql = { - 'interpretation': { - 'string': part[0], - 'interpretation': 'tag', - 'attribute': 'tag', - 'operator': 'equals_any', - 'error': False - }, + 'interpretation': { + 'string': part[0], + 'interpretation': 'tag', + 'attribute': 'tag', 'operator': 'equals_any', - 'val1': 'tags', - 'val2': part[0][1:] - } + 'error': False, + }, + 'operator': 'equals_any', + 'val1': 'tags', + 'val2': part[0][1:], + } elif part.getName() == 'vrf_rt': - self._logger.debug("Query part '" + part.vrf_rt + "' interpreted as VRF RT") + self._logger.debug("Query part '%s' interpreted as VRF RT", part.vrf_rt) # TODO: enable this, our fancy new interpretation - dictsql = { - 'interpretation': { - 'attribute': 'VRF RT', - 'interpretation': 'vrf_rt', - 'operator': 'equals', - 'string': part.vrf_rt, - 'error': False - }, - 'operator': 'equals', - 'val1': 'vrf_rt', - 'val2': part.vrf_rt - } + # dictsql = { + # 'interpretation': { + # 'attribute': 'VRF RT', + # 'interpretation': 'vrf_rt', + # 'operator': 'equals', + # 'string': part.vrf_rt, + # 'error': False, + # }, + # 'operator': 'equals', + # 'val1': 'vrf_rt', + # 'val2': part.vrf_rt, + # } # using old interpretation for the time being to make sure we align # with old smart search interpreter dictsql = { - 'interpretation': { - 'attribute': 'name or description', - 'interpretation': 'text', - 'operator': 'regex', - 'string': part.vrf_rt, - 'error': False - }, - 'operator': 'or', - 'val1': { - 'operator': 'regex_match', - 'val1': 'name', - 'val2': part.vrf_rt - }, - 'val2': { - 'operator': 'regex_match', - 'val1': 'description', - 'val2': part.vrf_rt - } - } + 'interpretation': { + 'attribute': 'name or description', + 'interpretation': 'text', + 'operator': 'regex', + 'string': part.vrf_rt, + 'error': False, + }, + 'operator': 'or', + 'val1': { + 'operator': 'regex_match', + 'val1': 'name', + 'val2': part.vrf_rt, + }, + 'val2': { + 'operator': 'regex_match', + 'val1': 'description', + 'val2': part.vrf_rt, + }, + } else: - self._logger.debug("Query part '" + part[0] + "' interpreted as text") + self._logger.debug("Query part '%s' interpreted as text", part[0]) dictsql = { - 'interpretation': { - 'attribute': 'name or description', - 'interpretation': 'text', - 'operator': 'regex', - 'string': part[0], - 'error': False - }, - 'operator': 'or', - 'val1': { - 'operator': 'regex_match', - 'val1': 'name', - 'val2': part[0] - }, - 'val2': { - 'operator': 'regex_match', - 'val1': 'description', - 'val2': part[0] - } - } + 'interpretation': { + 'attribute': 'name or description', + 'interpretation': 'text', + 'operator': 'regex', + 'string': part[0], + 'error': False, + }, + 'operator': 'or', + 'val1': {'operator': 'regex_match', 'val1': 'name', 'val2': part[0]}, + 'val2': { + 'operator': 'regex_match', + 'val1': 'description', + 'val2': part[0], + }, + } return dictsql - class PrefixSmartParser(SmartParser): attributes = { 'added': True, @@ -455,77 +444,73 @@ class PrefixSmartParser(SmartParser): 'used_addreses': True, 'vlan': True, 'vrf': True, - } + } def _string_to_dictsql(self, part): """ Do magic matching of single words or quoted string """ - self._logger.debug("parsing string: " + unicode(part[0]) + " of type: " + part.getName()) + self._logger.debug("parsing string: %s of type: %s", part[0], part.getName()) if part.getName() == 'tag': - self._logger.debug("Query part '" + part[0] + "' interpreted as tag") + self._logger.debug("Query part '%s' interpreted as tag", part[0]) dictsql = { - 'interpretation': { - 'string': part[0], - 'interpretation': '(inherited) tag', - 'attribute': 'tag', - 'operator': 'equals_any', - 'error': False - }, - 'operator': 'or', - 'val1': { - 'operator': 'equals_any', - 'val1': 'tags', - 'val2': part[0][1:] - }, - 'val2': { - 'operator': 'equals_any', - 'val1': 'inherited_tags', - 'val2': part[0][1:] - } + 'interpretation': { + 'string': part[0], + 'interpretation': '(inherited) tag', + 'attribute': 'tag', + 'operator': 'equals_any', + 'error': False, + }, + 'operator': 'or', + 'val1': {'operator': 'equals_any', 'val1': 'tags', 'val2': part[0][1:]}, + 'val2': { + 'operator': 'equals_any', + 'val1': 'inherited_tags', + 'val2': part[0][1:], + }, } elif part.getName() == 'vrf_rt': - self._logger.debug("Query part '" + part.vrf_rt + "' interpreted as VRF RT") + self._logger.debug("Query part '%s' interpreted as VRF RT", part.vrf_rt) dictsql = { - 'interpretation': { - 'attribute': 'VRF RT', - 'interpretation': 'vrf_rt', - 'operator': 'equals', - 'string': part.vrf_rt, - 'error': False - }, + 'interpretation': { + 'attribute': 'VRF RT', + 'interpretation': 'vrf_rt', 'operator': 'equals', - 'val1': 'vrf_rt', - 'val2': part.vrf_rt - } + 'string': part.vrf_rt, + 'error': False, + }, + 'operator': 'equals', + 'val1': 'vrf_rt', + 'val2': part.vrf_rt, + } elif part.getName() == 'ipv6_address': - self._logger.debug("Query part '" + part.ipv6_address + "' interpreted as IPv6 address") + self._logger.debug("Query part '%s' interpreted as IPv6 address", part.ipv6_address) dictsql = { 'interpretation': { 'string': part.ipv6_address, 'interpretation': 'IPv6 address', 'attribute': 'prefix', 'operator': 'contains_equals', - 'error': False + 'error': False, }, 'operator': 'contains_equals', 'val1': 'prefix', - 'val2': part.ipv6_address + 'val2': part.ipv6_address, } elif part.getName() == 'ipv6_prefix': - self._logger.debug("Query part '" + part.ipv6_prefix[0] + "' interpreted as IPv6 prefix") + self._logger.debug("Query part '%' interpreted as IPv6 prefix", part.ipv6_prefix[0]) - strict_prefix = unicode(IPy.IP(part.ipv6_prefix[0], make_net=True)) + strict_prefix = str(IPy.IP(part.ipv6_prefix[0], make_net=True)) interp = { - 'string': part.ipv6_prefix[0], - 'interpretation': 'IPv6 prefix', - 'attribute': 'prefix', - 'operator': 'contained_within_equals', - 'error': False - } + 'string': part.ipv6_prefix[0], + 'interpretation': 'IPv6 prefix', + 'attribute': 'prefix', + 'operator': 'contained_within_equals', + 'error': False, + } if part.ipv6_prefix[0] != strict_prefix: interp['strict_prefix'] = strict_prefix @@ -533,7 +518,7 @@ def _string_to_dictsql(self, part): 'interpretation': interp, 'operator': 'contained_within_equals', 'val1': 'prefix', - 'val2': strict_prefix + 'val2': strict_prefix, } else: @@ -541,7 +526,7 @@ def _string_to_dictsql(self, part): # using pyparsing we do a bit of good ol parsing here if self._get_afi(part[0]) == 4 and len(part[0].split('/')) == 2: - self._logger.debug("Query part '" + part[0] + "' interpreted as prefix") + self._logger.debug("Query part '%s' interpreted as prefix", part[0]) address, prefix_length = part[0].split('/') # complete a prefix to it's fully expanded form @@ -551,15 +536,15 @@ def _string_to_dictsql(self, part): address += '.0' prefix = address + '/' + prefix_length - strict_prefix = unicode(IPy.IP(part[0], make_net=True)) + strict_prefix = str(IPy.IP(part[0], make_net=True)) interp = { - 'string': part[0], - 'interpretation': 'IPv4 prefix', - 'attribute': 'prefix', - 'operator': 'contained_within_equals', - 'error': False - } + 'string': part[0], + 'interpretation': 'IPv4 prefix', + 'attribute': 'prefix', + 'operator': 'contained_within_equals', + 'error': False, + } if prefix != part[0]: interp['expanded'] = prefix @@ -571,80 +556,79 @@ def _string_to_dictsql(self, part): 'interpretation': interp, 'operator': 'contained_within_equals', 'val1': 'prefix', - 'val2': strict_prefix + 'val2': strict_prefix, } # IPv4 address # split on dot to make sure we have all four octets before we do a # search elif self._get_afi(part[0]) == 4 and len(part[0].split('.')) == 4: - self._logger.debug("Query part '" + part[0] + "' interpreted as prefix") - address = unicode(IPy.IP(part[0])) + self._logger.debug("Query part '%s' interpreted as prefix", part[0]) + address = str(IPy.IP(part[0])) dictsql = { 'interpretation': { 'string': address, 'interpretation': 'IPv4 address', 'attribute': 'prefix', 'operator': 'contains_equals', - 'error': False + 'error': False, }, 'operator': 'contains_equals', 'val1': 'prefix', - 'val2': address + 'val2': address, } else: # Description or comment - self._logger.debug("Query part '" + part[0] + "' interpreted as text") + self._logger.debug("Query part '%s' interpreted as text", part[0]) dictsql = { - 'interpretation': { - 'string': part[0], - 'interpretation': 'text', - 'attribute': 'description or comment or node or order_id or customer_id', - 'operator': 'regex', - 'error': False - }, + 'interpretation': { + 'string': part[0], + 'interpretation': 'text', + 'attribute': 'description or comment or node or order_id or customer_id', + 'operator': 'regex', + 'error': False, + }, + 'operator': 'or', + 'val1': { 'operator': 'or', 'val1': { 'operator': 'or', 'val1': { 'operator': 'or', 'val1': { - 'operator': 'or', - 'val1': { - 'operator': 'regex_match', - 'val1': 'comment', - 'val2': part[0] - }, - 'val2': { - 'operator': 'regex_match', - 'val1': 'description', - 'val2': part[0] - } - }, + 'operator': 'regex_match', + 'val1': 'comment', + 'val2': part[0], + }, 'val2': { 'operator': 'regex_match', - 'val1': 'node', - 'val2': part[0] - } + 'val1': 'description', + 'val2': part[0], }, + }, 'val2': { 'operator': 'regex_match', - 'val1': 'order_id', - 'val2': part[0] - }, + 'val1': 'node', + 'val2': part[0], }, + }, 'val2': { 'operator': 'regex_match', - 'val1': 'customer_id', - 'val2': part[0] - } - } + 'val1': 'order_id', + 'val2': part[0], + }, + }, + 'val2': { + 'operator': 'regex_match', + 'val1': 'customer_id', + 'val2': part[0], + }, + } return dictsql - class VrfSmartParser(SmartParser): attributes = { 'description': True, @@ -663,110 +647,100 @@ class VrfSmartParser(SmartParser): def _string_to_dictsql(self, part): """ Do magic matching of single words or quoted string """ - self._logger.debug("parsing string: " + unicode(part[0]) + " of type: " + part.getName()) + self._logger.debug("parsing string: %s of type: %s", part[0], part.getName()) if part.getName() == 'tag': - self._logger.debug("Query part '" + part[0] + "' interpreted as tag") + self._logger.debug("Query part '%s' interpreted as tag", part[0]) dictsql = { - 'interpretation': { - 'string': part[0], - 'interpretation': 'tag', - 'attribute': 'tag', - 'operator': 'equals_any', - 'error': False - }, + 'interpretation': { + 'string': part[0], + 'interpretation': 'tag', + 'attribute': 'tag', 'operator': 'equals_any', - 'val1': 'tags', - 'val2': part[0][1:] - } + 'error': False, + }, + 'operator': 'equals_any', + 'val1': 'tags', + 'val2': part[0][1:], + } elif part.getName() == 'vrf_rt': - self._logger.debug("Query part '" + part.vrf_rt + "' interpreted as VRF RT") + self._logger.debug("Query part '%s' interpreted as VRF RT", part.vrf_rt) # TODO: enable this, our fancy new interpretation - dictsql = { - 'interpretation': { - 'attribute': 'VRF RT', - 'interpretation': 'vrf_rt', - 'operator': 'equals', - 'string': part.vrf_rt, - 'error': False - }, - 'operator': 'equals', - 'val1': 'vrf_rt', - 'val2': part.vrf_rt - } + # dictsql = { + # 'interpretation': { + # 'attribute': 'VRF RT', + # 'interpretation': 'vrf_rt', + # 'operator': 'equals', + # 'string': part.vrf_rt, + # 'error': False, + # }, + # 'operator': 'equals', + # 'val1': 'vrf_rt', + # 'val2': part.vrf_rt, + # s} # using old interpretation for the time being to make sure we align # with old smart search interpreter dictsql = { - 'interpretation': { - 'string': part.vrf_rt, - 'interpretation': 'text', - 'attribute': 'vrf or name or description', - 'operator': 'regex', - 'error': False - }, + 'interpretation': { + 'string': part.vrf_rt, + 'interpretation': 'text', + 'attribute': 'vrf or name or description', + 'operator': 'regex', + 'error': False, + }, + 'operator': 'or', + 'val1': { 'operator': 'or', 'val1': { - 'operator': 'or', - 'val1': { - 'operator': 'regex_match', - 'val1': 'name', - 'val2': part.vrf_rt - }, - 'val2': { - 'operator': 'regex_match', - 'val1': 'description', - 'val2': part.vrf_rt - } + 'operator': 'regex_match', + 'val1': 'name', + 'val2': part.vrf_rt, }, 'val2': { 'operator': 'regex_match', - 'val1': 'rt', - 'val2': part.vrf_rt - } - } + 'val1': 'description', + 'val2': part.vrf_rt, + }, + }, + 'val2': {'operator': 'regex_match', 'val1': 'rt', 'val2': part.vrf_rt}, + } else: - self._logger.debug("Query part '" + part[0] + "' interpreted as text") + self._logger.debug("Query part '%s' interpreted as text", part[0]) dictsql = { - 'interpretation': { - 'string': part[0], - 'interpretation': 'text', - 'attribute': 'vrf or name or description', - 'operator': 'regex', - 'error': False - }, + 'interpretation': { + 'string': part[0], + 'interpretation': 'text', + 'attribute': 'vrf or name or description', + 'operator': 'regex', + 'error': False, + }, + 'operator': 'or', + 'val1': { 'operator': 'or', 'val1': { - 'operator': 'or', - 'val1': { - 'operator': 'regex_match', - 'val1': 'name', - 'val2': part[0] - }, - 'val2': { - 'operator': 'regex_match', - 'val1': 'description', - 'val2': part[0] - } + 'operator': 'regex_match', + 'val1': 'name', + 'val2': part[0], }, 'val2': { 'operator': 'regex_match', - 'val1': 'rt', - 'val2': part[0] - } - } + 'val1': 'description', + 'val2': part[0], + }, + }, + 'val2': {'operator': 'regex_match', 'val1': 'rt', 'val2': part[0]}, + } return dictsql - class ParserError(Exception): """ General parser error """ - if __name__ == '__main__': # set logging format LOG_FORMAT = "%(asctime)s: %(module)-10s %(levelname)-8s %(message)s" @@ -776,12 +750,14 @@ class ParserError(Exception): logger.setLevel(logging.DEBUG) p = VrfSmartParser() - #dictsql, interpretation = p.parse('core (country=SE or country = NL OR (damp AND "foo bar")') - #dictsql, interpretation = p.parse('core (country=SE or country = NL OR (damp AND "foo bar"))') + # dictsql, interpretation = p.parse('core (country=SE or country = NL OR (damp AND "foo bar")') + # dictsql, interpretation = p.parse('core (country=SE or country = NL OR (damp AND "foo bar"))') import sys + dictsql = p.parse(' '.join(sys.argv[1:])) import pprint - print "----------" - pp = pprint.PrettyPrinter(indent = 4) + + print("----------") + pp = pprint.PrettyPrinter(indent=4) pp.pprint(dictsql) - print "----------" + print("----------") diff --git a/nipap/nipap/xmlrpc.py b/nipap/nipap/xmlrpc.py index 86ff65f25..b4c11fcd8 100755 --- a/nipap/nipap/xmlrpc.py +++ b/nipap/nipap/xmlrpc.py @@ -10,14 +10,15 @@ import time import pytz from functools import wraps -from flask import Flask +from flask import Flask, current_app from flask import request, Response -from flaskext.xmlrpc import XMLRPCHandler, Fault +from flask_xmlrpcre.xmlrpcre import XMLRPCHandler, Fault +from flask_compress import Compress -from nipapconfig import NipapConfig -from backend import Nipap, NipapError +from .nipapconfig import NipapConfig +from .backend import Nipap, NipapError import nipap -from authlib import AuthFactory, AuthError +from .authlib import AuthFactory, AuthError def setup(app): @@ -29,14 +30,13 @@ def setup(app): return app - def _mangle_prefix(res): """ Mangle prefix result """ # fugly cast from large numbers to string to deal with XML-RPC - res['total_addresses'] = unicode(res['total_addresses']) - res['used_addresses'] = unicode(res['used_addresses']) - res['free_addresses'] = unicode(res['free_addresses']) + res['total_addresses'] = str(res['total_addresses']) + res['used_addresses'] = str(res['used_addresses']) + res['free_addresses'] = str(res['free_addresses']) # postgres has notion of infinite while datetime hasn't, if expires # is equal to the max datetime we assume it is infinity and instead @@ -62,11 +62,11 @@ def requires_auth(f): """ Class decorator for XML-RPC functions that requires auth """ @wraps(f) - def decorated(self, *args, **kwargs): """ """ + self.logger.debug("authenticating call with args %s and kwargs %s", args, kwargs) # Fetch auth options from args auth_options = {} nipap_args = {} @@ -75,18 +75,18 @@ def decorated(self, *args, **kwargs): if len(args) == 1: nipap_args = args[0] else: - self.logger.debug("Malformed request: got %d parameters" % len(args)) - raise Fault(1000, ("NIPAP API functions take exactly 1 argument (%d given)") % len(args)) + self.logger.debug("Malformed request: got %s parameters", len(args)) + raise Fault(1000, "NIPAP API functions take exactly 1 argument ({} given)".format(len(args))) - if type(nipap_args) != dict: + if not isinstance(nipap_args, dict): self.logger.debug("Function argument is not struct") - raise Fault(1000, ("Function argument must be XML-RPC struct/Python dict (Python %s given)." % - type(nipap_args).__name__ )) + raise Fault(1000, ("Function argument must be XML-RPC struct/Python dict (Python {} given).".format( + type(nipap_args).__name__ ))) # fetch auth options try: auth_options = nipap_args['auth'] - if type(auth_options) is not dict: + if not isinstance(auth_options, dict): raise ValueError() except (KeyError, ValueError): self.logger.debug("Missing/invalid authentication options in request.") @@ -134,11 +134,48 @@ def decorated(self, *args, **kwargs): new_args = dict(args[0]) new_args['auth'] = auth + self.logger.debug('Call authenticated - calling.. with new_args: %s', new_args) return f(self, *(new_args,), **kwargs) return decorated +def xmlrpc_bignum2str(res, keys=['num_prefixes', 'total_addresses', 'used_addresses', 'free_addresses']): + """ + Cast from large numbers to string to deal with XML-RPC - + Performance seems equivalent to preexisting blocks, and improves readability IMO + Since targeted keys (quantity) all start with ['num_', 'total_', 'used_', 'free_'] a different version was tried + using .startswith(), however this proved less performing.. + + :param dict[str, dict] res: psql result to cast + :param list[str] keys: list of keys to cast to string if required + :rtype: dict[str, dict] + """ + if isinstance(res, dict): + if 'result' in res: + for entry in res['result']: + for v in ['_v4', '_v6']: + for key in [k+v for k in keys]: + if entry[key] is not None and not isinstance(entry[key], str): + entry[key] = str(entry[key]) + elif 'id' in res: + for v in ['_v4', '_v6']: + for key in [k + v for k in keys]: + if res[key] is not None and not isinstance(res[key], str): + res[key] = str(res[key]) + else: + raise ValueError('Illegal result: {}'.format(res)) + + elif isinstance(res, list): + for entry in res: + for v in ['_v4', '_v6']: + for key in [k+v for k in keys]: + if entry[key] is not None and not isinstance(entry[key], str): + entry[key] = str(entry[key]) + else: + raise ValueError('Illegal result: {}'.format(res)) + return res + class NipapXMLRPC: """ NIPAP XML-RPC API @@ -146,8 +183,7 @@ class NipapXMLRPC: def __init__(self): self.nip = Nipap() self.logger = logging.getLogger() - - + self.logger.setLevel(logging.DEBUG) @requires_auth def echo(self, args): @@ -172,8 +208,6 @@ def echo(self, args): if args.get('message') is not None: return args.get('message') - - @requires_auth def version(self, args): """ Returns nipapd version @@ -182,8 +216,6 @@ def version(self, args): """ return nipap.__version__ - - @requires_auth def db_version(self, args): """ Returns schema version of nipap psql db @@ -192,8 +224,6 @@ def db_version(self, args): """ return self.nip._get_db_version() - - # # VRF FUNCTIONS # @@ -212,20 +242,12 @@ def add_vrf(self, args): """ try: res = self.nip.add_vrf(args.get('auth'), args.get('attr')) - # fugly cast from large numbers to string to deal with XML-RPC - for val in ( 'num_prefixes_v4', 'num_prefixes_v6', - 'total_addresses_v4', 'total_addresses_v6', - 'used_addresses_v4', 'used_addresses_v6', 'free_addresses_v4', - 'free_addresses_v6'): - res[val] = unicode(res[val]) - + res = xmlrpc_bignum2str(res, ['num_prefixes', 'total_addresses', 'used_addresses', 'free_addresses']) return res except (AuthError, NipapError) as exc: - self.logger.debug(unicode(exc)) - raise Fault(exc.error_code, unicode(exc)) - - + self.logger.debug(str(exc)) + raise Fault(exc.error_code, str(exc)) @requires_auth def remove_vrf(self, args): @@ -241,14 +263,12 @@ def remove_vrf(self, args): try: self.nip.remove_vrf(args.get('auth'), args.get('vrf')) except (AuthError, NipapError) as exc: - self.logger.debug(unicode(exc)) - raise Fault(exc.error_code, unicode(exc)) - - + self.logger.debug(str(exc)) + raise Fault(exc.error_code, str(exc)) @requires_auth def list_vrf(self, args): - """ List VRFs. + """List VRFs. Valid keys in the `args`-struct: @@ -263,19 +283,12 @@ def list_vrf(self, args): res = self.nip.list_vrf(args.get('auth'), args.get('vrf')) # fugly cast from large numbers to string to deal with XML-RPC - for vrf in res: - for val in ( 'num_prefixes_v4', 'num_prefixes_v6', - 'total_addresses_v4', 'total_addresses_v6', - 'used_addresses_v4', 'used_addresses_v6', 'free_addresses_v4', - 'free_addresses_v6'): - vrf[val] = unicode(vrf[val]) + res = xmlrpc_bignum2str(res, ['num_prefixes', 'total_addresses', 'used_addresses', 'free_addresses']) return res except (AuthError, NipapError) as exc: - self.logger.debug(unicode(exc)) - raise Fault(exc.error_code, unicode(exc)) - - + self.logger.debug(str(exc)) + raise Fault(exc.error_code, str(exc)) @requires_auth def edit_vrf(self, args): @@ -294,19 +307,12 @@ def edit_vrf(self, args): res = self.nip.edit_vrf(args.get('auth'), args.get('vrf'), args.get('attr')) # fugly cast from large numbers to string to deal with XML-RPC - for vrf in res: - for val in ( 'num_prefixes_v4', 'num_prefixes_v6', - 'total_addresses_v4', 'total_addresses_v6', - 'used_addresses_v4', 'used_addresses_v6', 'free_addresses_v4', - 'free_addresses_v6'): - vrf[val] = unicode(vrf[val]) + res = xmlrpc_bignum2str(res, ['num_prefixes', 'total_addresses', 'used_addresses', 'free_addresses']) return res except (AuthError, NipapError) as exc: - self.logger.debug(unicode(exc)) - raise Fault(exc.error_code, unicode(exc)) - - + self.logger.debug(str(exc)) + raise Fault(exc.error_code, str(exc)) @requires_auth def search_vrf(self, args): @@ -329,19 +335,12 @@ def search_vrf(self, args): res = self.nip.search_vrf(args.get('auth'), args.get('query'), args.get('search_options') or {}) # fugly cast from large numbers to string to deal with XML-RPC - for vrf in res['result']: - for val in ( 'num_prefixes_v4', 'num_prefixes_v6', - 'total_addresses_v4', 'total_addresses_v6', - 'used_addresses_v4', 'used_addresses_v6', 'free_addresses_v4', - 'free_addresses_v6'): - vrf[val] = unicode(vrf[val]) + res = xmlrpc_bignum2str(res, ['num_prefixes', 'total_addresses', 'used_addresses', 'free_addresses']) return res except (AuthError, NipapError) as exc: - self.logger.debug(unicode(exc)) - raise Fault(exc.error_code, unicode(exc)) - - + self.logger.debug(str(exc)) + raise Fault(exc.error_code, str(exc)) @requires_auth def smart_search_vrf(self, args): @@ -361,24 +360,20 @@ def smart_search_vrf(self, args): search string and the search options used. """ try: - res = self.nip.smart_search_vrf(args.get('auth'), - args.get('query_string'), args.get('search_options', {}), - args.get('extra_query')) + res = self.nip.smart_search_vrf( + args.get('auth'), + args.get('query_string'), + args.get('search_options', {}), + args.get('extra_query'), + ) # fugly cast from large numbers to string to deal with XML-RPC - for vrf in res['result']: - for val in ( 'num_prefixes_v4', 'num_prefixes_v6', - 'total_addresses_v4', 'total_addresses_v6', - 'used_addresses_v4', 'used_addresses_v6', 'free_addresses_v4', - 'free_addresses_v6'): - vrf[val] = unicode(vrf[val]) + res = xmlrpc_bignum2str(res, ['num_prefixes', 'total_addresses', 'used_addresses', 'free_addresses']) return res except (AuthError, NipapError) as exc: - self.logger.debug(unicode(exc)) - raise Fault(exc.error_code, unicode(exc)) - - + self.logger.debug(str(exc)) + raise Fault(exc.error_code, str(exc)) # # POOL FUNCTIONS @@ -400,22 +395,13 @@ def add_pool(self, args): res = self.nip.add_pool(args.get('auth'), args.get('attr')) # fugly cast from large numbers to string to deal with XML-RPC - for val in ( 'member_prefixes_v4', 'member_prefixes_v6', - 'used_prefixes_v4', 'used_prefixes_v6', 'free_prefixes_v4', - 'free_prefixes_v6', 'total_prefixes_v4', - 'total_prefixes_v6', 'total_addresses_v4', - 'total_addresses_v6', 'used_addresses_v4', - 'used_addresses_v6', 'free_addresses_v4', - 'free_addresses_v6'): - if res[val] is not None: - res[val] = unicode(res[val]) + res = xmlrpc_bignum2str(res, ['member_prefixes', 'used_prefixes', 'free_prefixes', 'total_prefixes', + 'total_addresses', 'used_addresses', 'free_addresses']) return res except (AuthError, NipapError) as exc: - self.logger.debug(unicode(exc)) - raise Fault(exc.error_code, unicode(exc)) - - + self.logger.debug(str(exc)) + raise Fault(exc.error_code, str(exc)) @requires_auth def remove_pool(self, args): @@ -431,10 +417,8 @@ def remove_pool(self, args): try: self.nip.remove_pool(args.get('auth'), args.get('pool')) except (AuthError, NipapError) as exc: - self.logger.debug(unicode(exc)) - raise Fault(exc.error_code, unicode(exc)) - - + self.logger.debug(str(exc)) + raise Fault(exc.error_code, str(exc)) @requires_auth def list_pool(self, args): @@ -453,23 +437,13 @@ def list_pool(self, args): res = self.nip.list_pool(args.get('auth'), args.get('pool')) # fugly cast from large numbers to string to deal with XML-RPC - for pool in res: - for val in ( 'member_prefixes_v4', 'member_prefixes_v6', - 'used_prefixes_v4', 'used_prefixes_v6', - 'free_prefixes_v4', 'free_prefixes_v6', - 'total_prefixes_v4', 'total_prefixes_v6', - 'total_addresses_v4', 'total_addresses_v6', - 'used_addresses_v4', 'used_addresses_v6', - 'free_addresses_v4', 'free_addresses_v6'): - if pool[val] is not None: - pool[val] = unicode(pool[val]) + res = xmlrpc_bignum2str(res, ['member_prefixes', 'used_prefixes', 'free_prefixes', 'total_prefixes', + 'total_addresses', 'used_addresses', 'free_addresses']) return res except (AuthError, NipapError) as exc: - self.logger.debug(unicode(exc)) - raise Fault(exc.error_code, unicode(exc)) - - + self.logger.debug(str(exc)) + raise Fault(exc.error_code, str(exc)) @requires_auth def edit_pool(self, args): @@ -488,23 +462,13 @@ def edit_pool(self, args): res = self.nip.edit_pool(args.get('auth'), args.get('pool'), args.get('attr')) # fugly cast from large numbers to string to deal with XML-RPC - for pool in res: - for val in ( 'member_prefixes_v4', 'member_prefixes_v6', - 'used_prefixes_v4', 'used_prefixes_v6', 'free_prefixes_v4', - 'free_prefixes_v6', 'total_prefixes_v4', - 'total_prefixes_v6', 'total_addresses_v4', - 'total_addresses_v6', 'used_addresses_v4', - 'used_addresses_v6', 'free_addresses_v4', - 'free_addresses_v6'): - if pool[val] is not None: - pool[val] = unicode(pool[val]) + res = xmlrpc_bignum2str(res, ['member_prefixes', 'used_prefixes', 'free_prefixes', 'total_prefixes', + 'total_addresses', 'used_addresses', 'free_addresses']) return res except (AuthError, NipapError) as exc: - self.logger.debug(unicode(exc)) - raise Fault(exc.error_code, unicode(exc)) - - + self.logger.debug(str(exc)) + raise Fault(exc.error_code, str(exc)) @requires_auth def search_pool(self, args): @@ -527,23 +491,13 @@ def search_pool(self, args): res = self.nip.search_pool(args.get('auth'), args.get('query'), args.get('search_options') or {}) # fugly cast from large numbers to string to deal with XML-RPC - for pool in res['result']: - for val in ( 'member_prefixes_v4', 'member_prefixes_v6', - 'used_prefixes_v4', 'used_prefixes_v6', - 'free_prefixes_v4', 'free_prefixes_v6', - 'total_prefixes_v4', 'total_prefixes_v6', - 'total_addresses_v4', 'total_addresses_v6', - 'used_addresses_v4', 'used_addresses_v6', - 'free_addresses_v4', 'free_addresses_v6'): - if pool[val] is not None: - pool[val] = unicode(pool[val]) + res = xmlrpc_bignum2str(res, ['member_prefixes', 'used_prefixes', 'free_prefixes', 'total_prefixes', + 'total_addresses', 'used_addresses', 'free_addresses']) return res except (AuthError, NipapError) as exc: - self.logger.debug(unicode(exc)) - raise Fault(exc.error_code, unicode(exc)) - - + self.logger.debug(str(exc)) + raise Fault(exc.error_code, str(exc)) @requires_auth def smart_search_pool(self, args): @@ -563,28 +517,21 @@ def smart_search_pool(self, args): query string and the search options used. """ try: - res = self.nip.smart_search_pool(args.get('auth'), - args.get('query_string'), args.get('search_options') or {}, - args.get('extra_query')) + res = self.nip.smart_search_pool( + args.get('auth'), + args.get('query_string'), + args.get('search_options') or {}, + args.get('extra_query'), + ) # fugly cast from large numbers to string to deal with XML-RPC - for pool in res['result']: - for val in ( 'member_prefixes_v4', 'member_prefixes_v6', - 'used_prefixes_v4', 'used_prefixes_v6', - 'free_prefixes_v4', 'free_prefixes_v6', - 'total_prefixes_v4', 'total_prefixes_v6', - 'total_addresses_v4', 'total_addresses_v6', - 'used_addresses_v4', 'used_addresses_v6', - 'free_addresses_v4', 'free_addresses_v6'): - if pool[val] is not None: - pool[val] = unicode(pool[val]) + res = xmlrpc_bignum2str(res, ['member_prefixes', 'used_prefixes', 'free_prefixes', 'total_prefixes', + 'total_addresses', 'used_addresses', 'free_addresses']) return res except (AuthError, NipapError) as exc: - self.logger.debug(unicode(exc)) - raise Fault(exc.error_code, unicode(exc)) - - + self.logger.debug(str(exc)) + raise Fault(exc.error_code, str(exc)) # # PREFIX FUNCTIONS @@ -611,10 +558,8 @@ def add_prefix(self, args): res = _mangle_prefix(res) return res except (AuthError, NipapError) as exc: - self.logger.debug(unicode(exc)) - raise Fault(exc.error_code, unicode(exc)) - - + self.logger.debug(str(exc)) + raise Fault(exc.error_code, str(exc)) @requires_auth def list_prefix(self, args): @@ -639,14 +584,12 @@ def list_prefix(self, args): prefix = _mangle_prefix(prefix) return res except (AuthError, NipapError) as exc: - self.logger.debug(unicode(exc)) - raise Fault(exc.error_code, unicode(exc)) - - + self.logger.debug(str(exc)) + raise Fault(exc.error_code, str(exc)) @requires_auth def edit_prefix(self, args): - """ Edit prefix. + """Edit prefix. Valid keys in the `args`-struct: @@ -655,7 +598,7 @@ def edit_prefix(self, args): * `prefix` [struct] Prefix attributes which describes what prefix(es) to edit. * `attr` [struct] - Attribuets to set on the new prefix. + Attributes to set on the new prefix. """ try: res = self.nip.edit_prefix(args.get('auth'), args.get('prefix'), args.get('attr')) @@ -664,10 +607,8 @@ def edit_prefix(self, args): prefix = _mangle_prefix(prefix) return res except (AuthError, NipapError) as exc: - self.logger.debug(unicode(exc)) - raise Fault(exc.error_code, unicode(exc)) - - + self.logger.debug(str(exc)) + raise Fault(exc.error_code, str(exc)) @requires_auth def remove_prefix(self, args): @@ -685,10 +626,8 @@ def remove_prefix(self, args): try: return self.nip.remove_prefix(args.get('auth'), args.get('prefix'), args.get('recursive')) except (AuthError, NipapError) as exc: - self.logger.debug(unicode(exc)) - raise Fault(exc.error_code, unicode(exc)) - - + self.logger.debug(str(exc)) + raise Fault(exc.error_code, str(exc)) @requires_auth def search_prefix(self, args): @@ -717,10 +656,8 @@ def search_prefix(self, args): prefix = _mangle_prefix(prefix) return res except (AuthError, NipapError) as exc: - self.logger.debug(unicode(exc)) - raise Fault(exc.error_code, unicode(exc)) - - + self.logger.debug(str(exc)) + raise Fault(exc.error_code, str(exc)) @requires_auth def smart_search_prefix(self, args): @@ -747,17 +684,23 @@ def smart_search_prefix(self, args): """ try: - res = self.nip.smart_search_prefix(args.get('auth'), - args.get('query_string'), args.get('search_options') or {}, - args.get('extra_query')) + self.logger.debug('Entering ssp') + res = self.nip.smart_search_prefix( + args.get('auth'), + args.get('query_string'), + args.get('search_options') or {}, + args.get('extra_query'), + ) # mangle result for prefix in res['result']: prefix = _mangle_prefix(prefix) return res except (AuthError, NipapError) as exc: - self.logger.debug(unicode(exc)) - raise Fault(exc.error_code, unicode(exc)) - + self.logger.debug(str(exc)) + self.logger.exception('unhandled..', exc) + raise Fault(exc.error_code, str(exc)) + except Exception as e: + self.logger.exception('unhandled..', e) @requires_auth @@ -776,10 +719,8 @@ def find_free_prefix(self, args): try: return self.nip.find_free_prefix(args.get('auth'), args.get('vrf'), args.get('args')) except NipapError as exc: - self.logger.debug(unicode(exc)) - raise Fault(exc.error_code, unicode(exc)) - - + self.logger.debug(str(exc)) + raise Fault(exc.error_code, str(exc)) # # ASN FUNCTIONS @@ -801,10 +742,8 @@ def add_asn(self, args): try: return self.nip.add_asn(args.get('auth'), args.get('attr')) except (AuthError, NipapError) as exc: - self.logger.debug(unicode(exc)) - raise Fault(exc.error_code, unicode(exc)) - - + self.logger.debug(str(exc)) + raise Fault(exc.error_code, str(exc)) @requires_auth def remove_asn(self, args): @@ -821,10 +760,8 @@ def remove_asn(self, args): try: self.nip.remove_asn(args.get('auth'), args.get('asn')) except (AuthError, NipapError) as exc: - self.logger.debug(unicode(exc)) - raise Fault(exc.error_code, unicode(exc)) - - + self.logger.debug(str(exc)) + raise Fault(exc.error_code, str(exc)) @requires_auth def list_asn(self, args): @@ -843,10 +780,8 @@ def list_asn(self, args): try: return self.nip.list_asn(args.get('auth'), args.get('asn') or {}) except (AuthError, NipapError) as exc: - self.logger.debug(unicode(exc)) - raise Fault(exc.error_code, unicode(exc)) - - + self.logger.debug(str(exc)) + raise Fault(exc.error_code, str(exc)) @requires_auth def edit_asn(self, args): @@ -865,10 +800,8 @@ def edit_asn(self, args): try: return self.nip.edit_asn(args.get('auth'), args.get('asn'), args.get('attr')) except (AuthError, NipapError) as exc: - self.logger.debug(unicode(exc)) - raise Fault(exc.error_code, unicode(exc)) - - + self.logger.debug(str(exc)) + raise Fault(exc.error_code, str(exc)) @requires_auth def search_asn(self, args): @@ -891,10 +824,8 @@ def search_asn(self, args): try: return self.nip.search_asn(args.get('auth'), args.get('query'), args.get('search_options') or {}) except (AuthError, NipapError) as exc: - self.logger.debug(unicode(exc)) - raise Fault(exc.error_code, unicode(exc)) - - + self.logger.debug(str(exc)) + raise Fault(exc.error_code, str(exc)) @requires_auth def smart_search_asn(self, args): @@ -915,14 +846,18 @@ def smart_search_asn(self, args): """ try: - return self.nip.smart_search_asn(args.get('auth'), - args.get('query_string'), args.get('search_options') or {}, - args.get('extra_query')) + return self.nip.smart_search_asn( + args.get('auth'), + args.get('query_string'), + args.get('search_options') or {}, + args.get('extra_query'), + ) except (AuthError, NipapError) as exc: - self.logger.debug(unicode(exc)) - raise Fault(exc.error_code, unicode(exc)) - + self.logger.debug(str(exc)) + raise Fault(exc.error_code, str(exc)) if __name__ == '__main__': + if 'app' not in locals() and 'app' not in globals(): + app = current_app() app.run() diff --git a/nipap/nipapd b/nipap/nipapd index b0a1c6ae4..d52b26b22 100755 --- a/nipap/nipapd +++ b/nipap/nipapd @@ -1,13 +1,13 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # vim: et sw=4 sts=4 : import fcntl -import logging import logging.handlers +import logging import argparse import os import sys -import ConfigParser +import configparser import ssl from tornado.netutil import bind_sockets @@ -41,11 +41,7 @@ def exit_cleanup(): except psutil.NoSuchProcess: return - # Handle API change in psutil 2.0.0 - if int(psutil.__version__[0]) <= 2: - children = p.get_children - else: - children = p.children + children = p.children for pid in children(recursive=True): os.kill(pid.pid, signal.SIGTERM) @@ -61,18 +57,19 @@ def handle_sigterm(sig, frame): """ exit_cleanup() # and make a clean exit ourselves - #sys.exit(0) + # sys.exit(0) + # register signal handler for SIGTERM signal.signal(signal.SIGTERM, handle_sigterm) - def drop_privileges(uid_name='nobody', gid_name='nogroup'): if os.getuid() != 0: raise NipapError("non-root user cannot drop privileges") - import pwd, grp + import pwd + import grp # Get the uid/gid from the name uid = pwd.getpwnam(uid_name).pw_uid gid = grp.getgrnam(gid_name).gr_gid @@ -93,23 +90,29 @@ if __name__ == '__main__': parser.add_argument('--auto-install-db', action='store_true', help='automatically install db schema') parser.add_argument('--auto-upgrade-db', action='store_true', help='automatically upgrade db schema') parser.add_argument('-d', '--debug', action='store_true', default=None, dest='debug', help='enable debugging') - parser.add_argument('-f', '--foreground', action='store_true', default=None, dest='foreground', help='run in foreground and log to stdout') + parser.add_argument('-f', '--foreground', action='store_true', default=None, dest='foreground', + help='run in foreground and log to stdout') parser.add_argument('-l', '--listen', type=str, metavar='ADDRESS', help='listen to IPv4/6 ADDRESS') parser.add_argument('-p', '--port', dest='port', type=int, help='listen on TCP port PORT') - parser.add_argument('-s', '--ssl-port', dest='ssl_port', type=int, help='listen with SSL on TCP port PORT') - parser.add_argument('-c', '--config', dest='config_file', type=str, default='/etc/nipap/nipap.conf', help='read configuration from file CONFIG_FILE') + parser.add_argument('-s', '--ssl-port', dest='ssl_port', type=int, + help='listen with SSL on TCP port PORT') + parser.add_argument('-c', '--config', dest='config_file', type=str, default='/etc/nipap/nipap.conf', + help='read configuration from file CONFIG_FILE') parser.add_argument('-P', '--pid-file', type=str, help='write a PID file to PID_FILE') - parser.add_argument('--no-pid-file', action='store_true', default=False, help='turn off writing PID file (overrides config file)') + parser.add_argument('--no-pid-file', action='store_true', default=False, + help='turn off writing PID file (overrides config file)') parser.add_argument('--version', action='store_true', help='display version information and exit') - parser.add_argument("--db-version", dest="dbversion", action="store_true", help="display database schema version information and exit") + parser.add_argument("--db-version", dest="dbversion", action="store_true", + help="display database schema version information and exit") # Arguments overwriting config settings - cfg_args = [ 'debug', 'foreground', 'port', 'config_file' ] + cfg_args = ['debug', 'foreground', 'port', 'config_file'] args = parser.parse_args() if args.version: import nipap - print "nipapd version:", nipap.__version__ + + print("nipapd version:", nipap.__version__) sys.exit(0) # set logging format @@ -142,9 +145,9 @@ if __name__ == '__main__': try: cfg = NipapConfig(args.config_file, default) - except NipapConfigError, exc: + except NipapConfigError as exc: if args.config_file: - print >> sys.stderr, "The specified configuration file ('" + args.config_file + "') does not exist" + print("The specified configuration file ('" + args.config_file + "') does not exist", file=sys.stderr) sys.exit(1) # if no config file is specified, we'll live with our defaults @@ -154,9 +157,9 @@ if __name__ == '__main__': for arg_dest in cfg_args: if arg_dest in args_dict and args_dict[arg_dest] is not None: try: - cfg.set('nipapd', arg_dest, unicode(args_dict[arg_dest])) - except ConfigParser.NoSectionError as exc: - print >> sys.stderr, "The configuration file contains errors:", exc + cfg.set('nipapd', arg_dest, str(args_dict[arg_dest])) + except configparser.NoSectionError as exc: + print("The configuration file contains errors:", exc, file=sys.stderr) sys.exit(1) # Validate configuration before forking, to be able to print error message to user @@ -179,52 +182,57 @@ if __name__ == '__main__': try: drop_privileges(run_user, run_group) except NipapError: - print >> sys.stderr, ("nipapd is configured to drop privileges and run as user '%s' and group '%s', \n" - "but was not started as root and can therefore not drop privileges") % (run_user, run_group) + print(("nipapd is configured to drop privileges and run as user '%s' and group '%s', \n" + "but was not started as root and can therefore not drop privileges") % (run_user, run_group), + file=sys.stderr) sys.exit(1) except KeyError: - print >> sys.stderr, "Could not drop privileges to user '%s' and group '%s'" % (run_user, run_group) + print("Could not drop privileges to user '%s' and group '%s'" % (run_user, run_group), file=sys.stderr) sys.exit(1) from nipap.backend import Nipap + try: nip = Nipap(args.auto_install_db, args.auto_upgrade_db) except NipapDatabaseSchemaError as exc: - print >> sys.stderr, "ERROR:", str(exc) - print >> sys.stderr, "HINT: You can automatically install required extensions and the nipap schema with --auto-install-db" + print("ERROR:", str(exc), file=sys.stderr) + print("HINT: You can automatically install required extensions and the nipap schema with --auto-install-db", + file=sys.stderr) sys.exit(1) except NipapError as exc: - print >> sys.stderr, "ERROR:", str(exc) + print("ERROR:", str(exc), file=sys.stderr) sys.exit(1) if args.dbversion: - print "nipap db schema:", nip._get_db_version() + print("nipap db schema:", nip._get_db_version()) sys.exit(0) # check local auth db version from nipap import authlib + a = authlib.SqliteAuth('local', 'a', 'b', 'c') try: latest = a._latest_db_version() if not latest: - print >> sys.stderr, "It seems your Sqlite database for local auth is out of date" - print >> sys.stderr, "Please run 'nipap-passwd --upgrade-database' to upgrade your database." + print("It seems your Sqlite database for local auth is out of date", file=sys.stderr) + print("Please run 'nipap-passwd --upgrade-database' to upgrade your database.", file=sys.stderr) sys.exit(2) - except authlib.AuthSqliteError, e: - print >> sys.stderr, "Error checking version of Sqlite database for local auth: %s" % e + except authlib.AuthSqliteError as e: + print("Error checking version of Sqlite database for local auth: %s" % e, file=sys.stderr) sys.exit(1) del a if not cfg.getboolean('nipapd', 'foreground'): import nipap.daemon + ret = nipap.daemon.createDaemon() # pid file handling if cfg.get('nipapd', 'pid_file') and not args.no_pid_file: # need a+ to be able to read PID from file try: - lf = open(cfg.get('nipapd', 'pid_file'), 'a+', 0) - except IOError, exc: + lf = open(cfg.get('nipapd', 'pid_file'), 'r+', 300) + except IOError as exc: logger.error("Unable to open PID file '" + str(exc.filename) + "': " + str(exc.strerror)) sys.exit(1) try: @@ -245,6 +253,7 @@ if __name__ == '__main__': rest = nipap.rest.setup(app) import nipap.xmlrpc + nipapxml = nipap.xmlrpc.setup(app) if not cfg.getboolean('nipapd', 'foreground'): @@ -261,7 +270,7 @@ if __name__ == '__main__': rest.logger.setLevel(logging.DEBUG) if cfg.getboolean('nipapd', 'syslog'): - log_syslog = logging.handlers.SysLogHandler(address = '/dev/log') + log_syslog = logging.handlers.SysLogHandler(address='/dev/log') log_syslog.setFormatter(logging.Formatter("%(levelname)-8s %(message)s")) logger.addHandler(log_syslog) nipapxml.logger.addHandler(log_syslog) diff --git a/nipap/requirements.txt b/nipap/requirements.txt index 5ab4e0b23..dfba709b9 100644 --- a/nipap/requirements.txt +++ b/nipap/requirements.txt @@ -2,7 +2,7 @@ zipp==1.2.0 importlib_metadata==2.1.3 Flask==1.1.2 Flask-Compress==1.9.0 -Flask-XML-RPC==0.1.2 +flask-xml-rpc-re==0.1.4 Flask-RESTful==0.3.8 requests==2.25.1 IPy==1.01 @@ -13,14 +13,15 @@ backports.ssl-match-hostname==3.7.0.1 certifi==2020.12.5 itsdangerous==1.1.0 parsedatetime==2.6 -psutil==1.2.1 # rq.filter: >= 1.0,<2.0 +psutil==5.7.2 psycopg2==2.8.6 --no-binary psycopg2 pyparsing==2.4.7 python-dateutil==2.8.1 # optional dependency on ldap -#python-ldap==2.4.19 +python-ldap==3.3.1 pytz==2021.1 pysqlite==2.8.3 -tornado==5.1 +tornado==6.0.4 wsgiref==0.1.2 -pyjwt==1.5.3 \ No newline at end of file +pyjwt==1.5.3 +docutils==0.16 diff --git a/nipap/setup.py b/nipap/setup.py index 57847c850..1e2e64741 100644 --- a/nipap/setup.py +++ b/nipap/setup.py @@ -1,9 +1,10 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 -from distutils.core import setup -import subprocess +from setuptools import setup +from docutils.core import publish_cmdline +from docutils.writers import manpage import sys - +import re import nipap @@ -11,50 +12,55 @@ def get_data_files(): # generate man pages using rst2man try: - subprocess.call(["rst2man", "nipapd.man.rst", "nipapd.8"]) - subprocess.call(["rst2man", "nipap-passwd.man.rst", "nipap-passwd.1"]) + publish_cmdline(writer=manpage.Writer(), argv=["nipapd.man.rst", "nipapd.8"]) + publish_cmdline(writer=manpage.Writer(), argv=["nipap-passwd.man.rst", "nipap-passwd.1"]) except OSError as exc: - print >> sys.stderr, "rst2man failed to run:", str(exc) + print("rst2man failed to run: %s" % str(exc), file=sys.stderr) sys.exit(1) files = [ - ('/etc/nipap/', ['nipap.conf.dist']), - ('/usr/sbin/', ['nipapd', 'nipap-passwd']), - ('/usr/share/nipap/sql/', [ - 'sql/upgrade-1-2.plsql', - 'sql/upgrade-2-3.plsql', - 'sql/upgrade-3-4.plsql', - 'sql/upgrade-4-5.plsql', - 'sql/upgrade-5-6.plsql', - 'sql/upgrade-6-7.plsql', - 'sql/functions.plsql', - 'sql/triggers.plsql', - 'sql/ip_net.plsql' - ]), - ('/usr/share/man/man8/', ['nipapd.8']), - ('/usr/share/man/man1/', ['nipap-passwd.1']) - ] + ('/etc/nipap/', ['nipap.conf.dist']), + ('/usr/sbin/', ['nipapd', 'nipap-passwd']), + ('/usr/share/nipap/sql/', [ + 'sql/upgrade-1-2.plsql', + 'sql/upgrade-2-3.plsql', + 'sql/upgrade-3-4.plsql', + 'sql/upgrade-4-5.plsql', + 'sql/upgrade-5-6.plsql', + 'sql/upgrade-6-7.plsql', + 'sql/functions.plsql', + 'sql/triggers.plsql', + 'sql/ip_net.plsql', + ], + ), + ('/usr/share/man/man8/', ['nipapd.8']), + ('/usr/share/man/man1/', ['nipap-passwd.1']), + ] return files long_desc = open('README.rst').read() short_desc = long_desc.split('\n')[0].split(' - ')[1].strip() +with open('requirements.txt', 'r') as f: + + requires = [re.sub(r'\s*([\w_\-\.\d]+([<>=]+\S+|)).*', r'\1', x.strip()) for x in f if + x.strip() and re.match(r'^\s*\w+', x.strip())] setup( - name = 'nipap', - version = nipap.__version__, - description = short_desc, - long_description = long_desc, - author = nipap.__author__, - author_email = nipap.__author_email__, - license = nipap.__license__, - url = nipap.__url__, - packages = ['nipap'], - keywords = ['nipap'], - requires = ['ldap', 'sqlite3', 'IPy', 'psycopg2', 'parsedatetime'], - data_files = get_data_files(), - classifiers = [ + name='nipap', + version=nipap.__version__, + description=short_desc, + long_description=long_desc, + author=nipap.__author__, + author_email=nipap.__author_email__, + license=nipap.__license__, + url=nipap.__url__, + packages=['nipap'], + keywords=['nipap'], + install_requires=requires, + data_files=get_data_files(), + classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Intended Audience :: System Administrators', @@ -62,7 +68,7 @@ def get_data_files(): 'License :: OSI Approved :: MIT License', 'Natural Language :: English', 'Operating System :: POSIX :: Linux', - 'Programming Language :: Python :: 2.6', - 'Topic :: Internet :: WWW/HTTP :: WSGI :: Middleware' - ] + 'Programming Language :: Python :: 3.6', + 'Topic :: Internet :: WWW/HTTP :: WSGI :: Middleware', + ], ) diff --git a/nipap/wait-for-it.sh b/nipap/wait-for-it.sh new file mode 100755 index 000000000..071c2bee3 --- /dev/null +++ b/nipap/wait-for-it.sh @@ -0,0 +1,178 @@ +#!/usr/bin/env bash +# Use this script to test if a given TCP host/port are available + +WAITFORIT_cmdname=${0##*/} + +echoerr() { if [[ $WAITFORIT_QUIET -ne 1 ]]; then echo "$@" 1>&2; fi } + +usage() +{ + cat << USAGE >&2 +Usage: + $WAITFORIT_cmdname host:port [-s] [-t timeout] [-- command args] + -h HOST | --host=HOST Host or IP under test + -p PORT | --port=PORT TCP port under test + Alternatively, you specify the host and port as host:port + -s | --strict Only execute subcommand if the test succeeds + -q | --quiet Don't output any status messages + -t TIMEOUT | --timeout=TIMEOUT + Timeout in seconds, zero for no timeout + -- COMMAND ARGS Execute command with args after the test finishes +USAGE + exit 1 +} + +wait_for() +{ + if [[ $WAITFORIT_TIMEOUT -gt 0 ]]; then + echoerr "$WAITFORIT_cmdname: waiting $WAITFORIT_TIMEOUT seconds for $WAITFORIT_HOST:$WAITFORIT_PORT" + else + echoerr "$WAITFORIT_cmdname: waiting for $WAITFORIT_HOST:$WAITFORIT_PORT without a timeout" + fi + WAITFORIT_start_ts=$(date +%s) + while : + do + if [[ $WAITFORIT_ISBUSY -eq 1 ]]; then + nc -z $WAITFORIT_HOST $WAITFORIT_PORT + WAITFORIT_result=$? + else + (echo > /dev/tcp/$WAITFORIT_HOST/$WAITFORIT_PORT) >/dev/null 2>&1 + WAITFORIT_result=$? + fi + if [[ $WAITFORIT_result -eq 0 ]]; then + WAITFORIT_end_ts=$(date +%s) + echoerr "$WAITFORIT_cmdname: $WAITFORIT_HOST:$WAITFORIT_PORT is available after $((WAITFORIT_end_ts - WAITFORIT_start_ts)) seconds" + break + fi + sleep 1 + done + return $WAITFORIT_result +} + +wait_for_wrapper() +{ + # In order to support SIGINT during timeout: http://unix.stackexchange.com/a/57692 + if [[ $WAITFORIT_QUIET -eq 1 ]]; then + timeout $WAITFORIT_BUSYTIMEFLAG $WAITFORIT_TIMEOUT $0 --quiet --child --host=$WAITFORIT_HOST --port=$WAITFORIT_PORT --timeout=$WAITFORIT_TIMEOUT & + else + timeout $WAITFORIT_BUSYTIMEFLAG $WAITFORIT_TIMEOUT $0 --child --host=$WAITFORIT_HOST --port=$WAITFORIT_PORT --timeout=$WAITFORIT_TIMEOUT & + fi + WAITFORIT_PID=$! + trap "kill -INT -$WAITFORIT_PID" INT + wait $WAITFORIT_PID + WAITFORIT_RESULT=$? + if [[ $WAITFORIT_RESULT -ne 0 ]]; then + echoerr "$WAITFORIT_cmdname: timeout occurred after waiting $WAITFORIT_TIMEOUT seconds for $WAITFORIT_HOST:$WAITFORIT_PORT" + fi + return $WAITFORIT_RESULT +} + +# process arguments +while [[ $# -gt 0 ]] +do + case "$1" in + *:* ) + WAITFORIT_hostport=(${1//:/ }) + WAITFORIT_HOST=${WAITFORIT_hostport[0]} + WAITFORIT_PORT=${WAITFORIT_hostport[1]} + shift 1 + ;; + --child) + WAITFORIT_CHILD=1 + shift 1 + ;; + -q | --quiet) + WAITFORIT_QUIET=1 + shift 1 + ;; + -s | --strict) + WAITFORIT_STRICT=1 + shift 1 + ;; + -h) + WAITFORIT_HOST="$2" + if [[ $WAITFORIT_HOST == "" ]]; then break; fi + shift 2 + ;; + --host=*) + WAITFORIT_HOST="${1#*=}" + shift 1 + ;; + -p) + WAITFORIT_PORT="$2" + if [[ $WAITFORIT_PORT == "" ]]; then break; fi + shift 2 + ;; + --port=*) + WAITFORIT_PORT="${1#*=}" + shift 1 + ;; + -t) + WAITFORIT_TIMEOUT="$2" + if [[ $WAITFORIT_TIMEOUT == "" ]]; then break; fi + shift 2 + ;; + --timeout=*) + WAITFORIT_TIMEOUT="${1#*=}" + shift 1 + ;; + --) + shift + WAITFORIT_CLI=("$@") + break + ;; + --help) + usage + ;; + *) + echoerr "Unknown argument: $1" + usage + ;; + esac +done + +if [[ "$WAITFORIT_HOST" == "" || "$WAITFORIT_PORT" == "" ]]; then + echoerr "Error: you need to provide a host and port to test." + usage +fi + +WAITFORIT_TIMEOUT=${WAITFORIT_TIMEOUT:-15} +WAITFORIT_STRICT=${WAITFORIT_STRICT:-0} +WAITFORIT_CHILD=${WAITFORIT_CHILD:-0} +WAITFORIT_QUIET=${WAITFORIT_QUIET:-0} + +# check to see if timeout is from busybox? +WAITFORIT_TIMEOUT_PATH=$(type -p timeout) +WAITFORIT_TIMEOUT_PATH=$(realpath $WAITFORIT_TIMEOUT_PATH 2>/dev/null || readlink -f $WAITFORIT_TIMEOUT_PATH) +if [[ $WAITFORIT_TIMEOUT_PATH =~ "busybox" ]]; then + WAITFORIT_ISBUSY=1 + WAITFORIT_BUSYTIMEFLAG="-t" + +else + WAITFORIT_ISBUSY=0 + WAITFORIT_BUSYTIMEFLAG="" +fi + +if [[ $WAITFORIT_CHILD -gt 0 ]]; then + wait_for + WAITFORIT_RESULT=$? + exit $WAITFORIT_RESULT +else + if [[ $WAITFORIT_TIMEOUT -gt 0 ]]; then + wait_for_wrapper + WAITFORIT_RESULT=$? + else + wait_for + WAITFORIT_RESULT=$? + fi +fi + +if [[ $WAITFORIT_CLI != "" ]]; then + if [[ $WAITFORIT_RESULT -ne 0 && $WAITFORIT_STRICT -eq 1 ]]; then + echoerr "$WAITFORIT_cmdname: strict mode, refusing to execute subprocess" + exit $WAITFORIT_RESULT + fi + exec "${WAITFORIT_CLI[@]}" +else + exit $WAITFORIT_RESULT +fi diff --git a/nipap/xml-test.py b/nipap/xml-test.py index a3078e7f6..249049eb6 100755 --- a/nipap/xml-test.py +++ b/nipap/xml-test.py @@ -1,14 +1,14 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # coding: utf-8 -import xmlrpclib +import xmlrpc.client import argparse import time import sys parser = argparse.ArgumentParser() -parser.add_argument('-p', '--port', dest='port', type='int', default='1337', help="TCP port") +parser.add_argument('-p', '--port', dest='port', type=int, default='1337', help="TCP port") parser.add_argument('-U', '--user') parser.add_argument('-P', '--password') @@ -16,52 +16,44 @@ cred = '' if args.user and args.password: - cred = args.user + ':' + args.password + '@' - -server_url = 'http://%(cred)s127.0.0.1:%(port)d/XMLRPC' % { 'port': args.port, 'cred': cred } -server = xmlrpclib.Server(server_url, allow_none=1); - -ad = { 'authoritative_source': 'nipap' } -query = { - 'val1': 'name', - 'operator': 'regex_match', - 'val2': '(foo|b.*)' - } - -res = server.list_vrf({ 'auth': ad, 'spec': {} }) -print res -#res = server.smart_search_prefix({ 'auth': ad, 'query_string': '', 'search_options': { 'include_all_parents': True } }) -#res = server.smart_search_prefix({ 'auth': ad, 'query_string': 'foo', 'search_options': { 'include_all_parents': True } }) -#res = server.smart_search_prefix({ 'auth': ad, 'query_string': 'foo', 'search_options': { 'include_all_parents': True } }) -#res = server.add_prefix({ 'spec': { 'prefix': '2.0.0.0/8' } }) -#print res -#res = server.smart_search_prefix({ 'auth': ad, 'query_string': 'test1', 'search_options': { 'include_all_parents': True, 'root_prefix': '1.0.4.0/24' } }) -#res = server.smart_search_prefix({ 'auth': ad, 'query_string': 'THISWILLNEVERMATCH', 'search_options': { 'include_all_parents': True, 'parent_prefix': 11963 } }) -#res = server.smart_search_prefix({ 'auth': ad, 'query_string': 'test1', 'search_options': { 'include_all_parents': True, 'parent_prefix': 'bajs' } }) + cred = args.user + ':' + args.password + '@' + +server_url = 'http://%(cred)s127.0.0.1:%(port)d/XMLRPC' % { + 'port': args.port, + 'cred': cred, +} +server = xmlrpc.client.Server(server_url, allow_none=1) + +ad = {'authoritative_source': 'nipap'} +query = {'val1': 'name', 'operator': 'regex_match', 'val2': '(foo|b.*)'} + +res = server.list_vrf({'auth': ad, 'spec': {}}) +print(res) +# res = server.smart_search_prefix({ 'auth': ad, 'query_string': '', 'search_options': { 'include_all_parents': True } }) +# res = server.smart_search_prefix({ 'auth': ad, 'query_string': 'foo', 'search_options': { 'include_all_parents': True } }) +# res = server.smart_search_prefix({ 'auth': ad, 'query_string': 'foo', 'search_options': { 'include_all_parents': True } }) +# res = server.add_prefix({ 'spec': { 'prefix': '2.0.0.0/8' } }) +# print res +# res = server.smart_search_prefix({ 'auth': ad, 'query_string': 'test1', 'search_options': { 'include_all_parents': True, 'root_prefix': '1.0.4.0/24' } }) +# res = server.smart_search_prefix({ 'auth': ad, 'query_string': 'THISWILLNEVERMATCH', 'search_options': { 'include_all_parents': True, 'parent_prefix': 11963 } }) +# res = server.smart_search_prefix({ 'auth': ad, 'query_string': 'test1', 'search_options': { 'include_all_parents': True, 'parent_prefix': 'bajs' } }) for p in res['result']: - print p -#for p in res: + print(p) +# for p in res: # print res[p] - #print "".join(" " for i in xrange(p['indent'])), p['prefix'], p['match'] +# print "".join(" " for i in xrange(p['indent'])), p['prefix'], p['match'] -#res = server.list_pool({ 'auth': ad, 'pool': { 'id': 1003 } }) -#res = server.version() +# res = server.list_pool({ 'auth': ad, 'pool': { 'id': 1003 } }) +# res = server.version() sys.exit(0) -remove_query = { - 'auth': { - 'authoritative_source': 'kll' - }, - 'schema': { - 'id': 1 - } - } -#server.remove_schema(remove_query) -#print server.list_vrf({ 'auth': ad }) -#sys.exit(0) -#print server.add_vrf({ 'auth': { 'authoritative_source': 'kll' }, +remove_query = {'auth': {'authoritative_source': 'kll'}, 'schema': {'id': 1}} +# server.remove_schema(remove_query) +# print server.list_vrf({ 'auth': ad }) +# sys.exit(0) +# print server.add_vrf({ 'auth': { 'authoritative_source': 'kll' }, # 'attr': { # 'vrf': '1257:124', # 'name': 'test2', @@ -69,77 +61,69 @@ # } # } # ) -#print server.list_vrf({ 'auth': ad, 'vrf': {} }) -#print server.add_prefix({ 'auth': ad, 'attr': { +# print server.list_vrf({ 'auth': ad, 'vrf': {} }) +# print server.add_prefix({ 'auth': ad, 'attr': { # 'prefix': '1.0.0.0/24', # 'type': 'assignment', # 'description': 'test' # } # }) # -#print "All VRFs:" -#res = server.list_prefix({ 'auth': ad }) -#for p in res: +# print "All VRFs:" +# res = server.list_prefix({ 'auth': ad }) +# for p in res: # print "%10s %s" % (p['vrf_name'], p['prefix']) # -#print "VRF: test2" -#res = server.list_prefix({ 'auth': ad, +# print "VRF: test2" +# res = server.list_prefix({ 'auth': ad, # 'prefix': { # 'vrf': '1257:124' # } # }) -#for p in res: +# for p in res: # print "%10s %s" % (p['vrf_name'], p['prefix']) -#t0 = time.time() -#import sys -#ss = u'ballong' -#print "Type of search string:", type(ss) -#print ss -#res = server.search_schema({ 'operator': 'regex_match', 'val1': 'name', 'val2': 'test' }, { 'max_result': 500 }) +# t0 = time.time() +# import sys +# ss = u'ballong' +# print "Type of search string:", type(ss) +# print ss +# res = server.search_schema({ 'operator': 'regex_match', 'val1': 'name', 'val2': 'test' }, { 'max_result': 500 }) a = { - 'auth': { - 'authoritative_source': 'kll' - }, - 'query_string': 'test', - 'search_options': { - 'include_all_parents': True, - 'root_prefix': '1.3.0.0/16' - } - } + 'auth': {'authoritative_source': 'kll'}, + 'query_string': 'test', + 'search_options': {'include_all_parents': True, 'root_prefix': '1.3.0.0/16'}, +} res = server.smart_search_prefix(a) for p in res['result']: - print p['vrf_rt'], p['display_prefix'], p['description'], p['match'] -#res = server.smart_search_prefix('test', { 'root_prefix': '1.3.0.0/8', 'max_result': 500 }) -#t1 = time.time() -#d1 = t1-t0 -#print "Timing:", d1 -#print res + print((p['vrf_rt'], p['display_prefix'], p['description'], p['match'])) +# res = server.smart_search_prefix('test', { 'root_prefix': '1.3.0.0/8', 'max_result': 500 }) +# t1 = time.time() +# d1 = t1-t0 +# print "Timing:", d1 +# print res # # echo test # -#print "try the echo function without args" -#args = {} -#print "ARGS:", args -#print "RESULT:", server.echo() -#print "" +# print "try the echo function without args" +# args = {} +# print "ARGS:", args +# print "RESULT:", server.echo() +# print "" # -#print "try the echo function with a message argument" -#args = { 'message': 'Please reply to me, Obi-Wan Kenobi, you are my only hope!' } -#print "ARGS:", args -#print "RESULT:", server.echo( args ) -#print "" +# print "try the echo function with a message argument" +# args = { 'message': 'Please reply to me, Obi-Wan Kenobi, you are my only hope!' } +# print "ARGS:", args +# print "RESULT:", server.echo( args ) +# print "" # # try list function # -#print "try the list prefix function with a node argument" -#args = { 'node': 'kst5-core-3' } -#print "ARGS:", args -#print "RESULT:", server.list_prefix( args ) -#print "" - - - +# print "try the list prefix function with a node argument" +# args = { 'node': 'kst5-core-3' } +# print "ARGS:", args +# print "RESULT:", server.list_prefix( args ) +# print "" diff --git a/nipap/xmlbench.py b/nipap/xmlbench.py index 274ce883f..1b9fd9983 100755 --- a/nipap/xmlbench.py +++ b/nipap/xmlbench.py @@ -1,65 +1,68 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 from twisted.web.xmlrpc import Proxy from twisted.internet import reactor import sys import datetime -class Request(): + + +class Request: def __init__(self, url, method, params): self.url = url self.method = method self.params = params self.start_time = 0 self.end_time = 0 - self.value = "" + self.value = "" self.error = "" self.finished = False - self.error_file = open('errors.csv','w+') - + self.error_file = open('errors.csv', 'w+') def addCallback(self, callback): self.callback = callback - def addErrback(self, errback): self.errback = errback - def makeRequest(self): proxy = Proxy(self.url) - proxy.callRemote(self.method,*self.params).addCallbacks(self.retSuccess, self.retFail) + proxy.callRemote(self.method, *self.params).addCallbacks( + self.retSuccess, self.retFail + ) self.start_time = datetime.datetime.now() - def __returned(self): self.end_time = datetime.datetime.now() - def retSuccess(self, value): self.__returned() self.finished = True self.value = value - self.callback(self,value) - + self.callback(self, value) def retFail(self, error): self.__returned() self.finished = True self.error = error self.error_file.write("Error: %s" % error) - self.callback(self,error) - + self.callback(self, error) def isFinished(self): return self.finished - def getTime(self): - return (self.end_time - self.start_time) # this should be a timedelta - - -class Benchmark(): - def __init__(self, concurrent = 10, total = 100, url = 'http://localhost:7080/XMLRPC', method = 'date', params=None): + return self.end_time - self.start_time # this should be a timedelta + + +class Benchmark: + def __init__( + self, + concurrent=10, + total=100, + url='http://localhost:7080/XMLRPC', + method='date', + params=None, + ): if params is None: params = {} self.url = url @@ -69,13 +72,11 @@ def __init__(self, concurrent = 10, total = 100, url = 'http://localhost:7080/XM self.total_reqs = total self.open_reqs = 0 self.current_reqs = 0 - self.error_file = open('errors.csv','w+') - self.req_times_file = open('times.csv','w+') - + self.error_file = open('errors.csv', 'w+') + self.req_times_file = open('times.csv', 'w+') def makeLog(self, filename): - self.log_file = open(filename,'w+') - + self.log_file = open(filename, 'w+') def makeRequest(self): req = Request(self.url, self.method, self.params) @@ -84,47 +85,49 @@ def makeRequest(self): req.makeRequest() self.open_reqs = self.open_reqs + 1 - def printReqDetail(self, req): - #print "Request time: %d ms" % req.getTime().microseconds + # print "Request time: %d ms" % req.getTime().microseconds delta = req.getTime() - print delta - + print(delta) def reqFinished(self, req): self.printReqDetail(req) self.open_reqs = self.open_reqs - 1 - self.current_reqs = self.current_reqs + 1 # completed requests - if ((self.current_reqs + self.open_reqs) < self.total_reqs): + self.current_reqs = self.current_reqs + 1 # completed requests + if (self.current_reqs + self.open_reqs) < self.total_reqs: self.makeRequest() else: if self.open_reqs == 0: - reactor.stop() # made as many requests as we wanted to + reactor.stop() # made as many requests as we wanted to - - def reqSuccess(self,req,value): + def reqSuccess(self, req, value): self.reqFinished(req) - print repr(value) - + print(repr(value)) - def reqError(self,req, error): + def reqError(self, req, error): self.reqFinished(req) - #print 'error', error - + # print 'error', error def setupReqs(self): - for i in range(0,self.concurrent_reqs): # make the initial pool of requests + for i in range(0, self.concurrent_reqs): # make the initial pool of requests self.makeRequest() if __name__ == '__main__': import argparse + parser = argparse.ArgumentParser() - parser.add_argument('-p', '--port', dest='port', type='int', default='1337', help="TCP port") + parser.add_argument( + '-p', '--port', dest='port', type='int', default='1337', help="TCP port" + ) parser.add_argument('-U', '--user') parser.add_argument('-P', '--password') - parser.add_argument('--concurrent', type='int', default=10, help="Concurrent requests") - parser.add_argument('--total', type='int', default=100, help="Total number of requests") + parser.add_argument( + '--concurrent', type='int', default=10, help="Concurrent requests" + ) + parser.add_argument( + '--total', type='int', default=100, help="Total number of requests" + ) parser.add_argument('--method', help="XML-RPC method to benchmark") parser.add_argument('--args', help="Args to XML-RPC method") @@ -133,12 +136,20 @@ def setupReqs(self): cred = '' if args.user and args.password: cred = args.user + ':' + args.password + '@' - server_url = 'http://%(cred)s127.0.0.1:%(port)d/XMLRPC' % { 'port': args.port, 'cred': cred } - - ad = { 'authoritative_source': 'nipap' } - args = [{ 'auth': ad, 'message': 'test', 'sleep': 0.1 }] - args = [{ 'auth': ad, 'query_string': 'foo' }] - b = Benchmark(concurrent = args.concurrent, total = args.total, url = - server_url, method = args.method, params = args) + server_url = 'http://%(cred)s127.0.0.1:%(port)d/XMLRPC' % { + 'port': args.port, + 'cred': cred, + } + + ad = {'authoritative_source': 'nipap'} + args = [{'auth': ad, 'message': 'test', 'sleep': 0.1}] + args = [{'auth': ad, 'query_string': 'foo'}] + b = Benchmark( + concurrent=args.concurrent, + total=args.total, + url=server_url, + method=args.method, + params=args, + ) b.setupReqs() reactor.run() diff --git a/tests/nipapbase.py b/tests/nipapbase.py index 1cdfb917e..363d4eaf7 100755 --- a/tests/nipapbase.py +++ b/tests/nipapbase.py @@ -10,6 +10,7 @@ from nipap.authlib import SqliteAuth from nipap.nipapconfig import NipapConfig + class NipapTest(unittest.TestCase): """ Tests the NIPAP class """