diff --git a/.coveragerc b/.coveragerc index 3039eccf6b..dd95d415a0 100644 --- a/.coveragerc +++ b/.coveragerc @@ -4,8 +4,6 @@ omit = insights/contrib/* insights/*/__main__.py # Exclude test files from coverage. insights/tests/* - insights/parsers/tests/* - insights/combiners/tests/* branch = True [report] diff --git a/.flake8 b/.flake8 index 54a83cb32f..170a6256c1 100644 --- a/.flake8 +++ b/.flake8 @@ -1,3 +1,3 @@ [flake8] -ignore = E501,E126,E127,E128,E722,E741 -exclude = insights/contrib,bin,docs,include,lib,lib64,.git,.collections.py +ignore = E501,E126,E127,E128,E722,E741,W605,W504 +exclude = insights/contrib,bin,docs,include,lib,lib64,.git,.collections.py,insights/parsers/tests/lvm_test_data.py,insights/client/apps/ansible/playbook_verifier/contrib diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 0000000000..8ff08196cc --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,20 @@ +### All Pull Requests: + +Check all that apply: + +* [ ] Have you followed the guidelines in our Contributing document, including the instructions about commit messages? +* [ ] Is this PR to correct an issue? +* [ ] Is this PR an enhancement? + +### Complete Description of Additions/Changes: + + +*Add your description here* diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml new file mode 100644 index 0000000000..dad85eebae --- /dev/null +++ b/.github/workflows/main.yml @@ -0,0 +1,138 @@ +name: Insights Core Test + +on: + push: + branches: [ master, '3.0'] + pull_request: + branches: [ master ] + +jobs: + code-test: + + runs-on: ubuntu-20.04 + strategy: + matrix: + python-versions: [3.6, 3.9] + + steps: + - uses: actions/checkout@v3 + - name: Set up Python ${{ matrix.python-versions }} + uses: actions/setup-python@v4 + with: + python-version: ${{ matrix.python-versions }} + - name: Install dependencies + run: | + python -m pip install --upgrade pip + - name: flake8 + run: | + pip install -e .[linting] + flake8 . + - name: pytest + run: | + pip install 'urllib3<2' + pip install -e .[testing] + pytest + + python27-test: + + runs-on: ubuntu-latest + container: + image: python:2.7.18-buster + + steps: + - uses: actions/checkout@v3 + - name: Set up Python 2.7 + uses: actions/setup-python@v4 + - name: Install dependencies + run: | + python -m pip install --upgrade pip + - name: flake8 + run: | + pip install -e .[linting] + flake8 . + - name: pytest + run: | + pip install 'urllib3<2' + pip install -e .[testing] + pytest + + python26-test: + + runs-on: ubuntu-latest + container: ubuntu:18.04 + + steps: + - uses: actions/checkout@v3 + - name: Install sudo package + run: apt update && apt install sudo + - name: install dependencies + run: | + sudo apt-get install -y --no-install-recommends software-properties-common + sudo apt-get install -y --no-install-recommends libssl1.0-dev zlib1g-dev + sudo apt-get install -y build-essential git curl unzip file wget tar + - name: build python26 + run: | + CUR_DIR=$(pwd) + cd ../ + wget https://www.python.org/ftp/python/2.6.9/Python-2.6.9.tgz + tar zxf ./Python-2.6.9.tgz + sed -e "s:'\/usr\/lib64',:'\/usr\/lib64', '\/usr\/lib/x86_64-linux-gnu',:" ./Python-2.6.9/setup.py -i + mkdir ./Python-2.6.9/build && cd ./Python-2.6.9/build + ../configure --prefix=$HOME/python26 && make && make install + sudo update-alternatives --install /usr/bin/python python $HOME/python26/bin/python2.6 1 + sudo update-alternatives --set python $HOME/python26/bin/python2.6 + cd ${CUR_DIR} + - name: build setuptools and pip + run: | + export PATH=$PATH:/github/home/.local/bin + CUR_DIR=$(pwd) + mkdir ../tools && cd ../tools + curl -L -O https://files.pythonhosted.org/packages/b8/04/be569e393006fa9a2c10ef72ea33133c2902baa115dd1d4279dae55c3b3b/setuptools-36.8.0.zip + unzip setuptools-36.8.0.zip && cd setuptools-36.8.0 + python setup.py install --user && cd .. + curl -L -O https://github.com/pypa/pip/archive/refs/tags/9.0.3.tar.gz + tar -xvzf 9.0.3.tar.gz && cd pip-9.0.3 + python setup.py install --user + cd ${CUR_DIR} + - name: get depenencies + run: | + export PATH=$PATH:/github/home/.local/bin + CUR_DIR=$(pwd) + cd ../ + git clone https://github.com/SteveHNH/jenkins-s2i-example.git pips + (cd ./pips/slave26/pip_packages && test -f coverage-4.3.4-cp26-cp26mu-manylinux1_x86_64.whl && mv coverage-4.3.4-cp26-cp26mu-manylinux1_x86_64.whl coverage-4.3.4-py2.py3-none-any.whl) + pip install --user --no-index -f ./pips/slave26/pip_packages -r ./pips/slave26/ci_requirements.txt + cd ${CUR_DIR} + mkdir ../collections_module + sudo curl -L -o ./../collections_module/collections.py https://raw.githubusercontent.com/RedHatInsights/insights-core/5c8ca0f2fb3de45908e8d931d40758af34a7997a/.collections.py + - name: flake8 + run: | + export PATH=$PATH:/github/home/.local/bin + pip install --user -e .[linting] -f ./pips/slave26/pip_packages + flake8 . + - name: pytest + run: | + export PATH=$PATH:/github/home/.local/bin + pip install --user -e .[testing] -f ./pips/slave26/pip_packages + export PYTHONPATH=${PYTHONPATH}:./../collections_module + pytest + + docs-test: + + runs-on: ubuntu-22.04 + + steps: + - uses: actions/checkout@v3 + - name: Set up Python 3.8 + uses: actions/setup-python@v4 + with: + python-version: 3.8 + - name: install dependencies + run: | + sudo apt-get install pandoc + python -m pip install --upgrade pip + - name: docs Test + run: | + pip install docutils==0.17 + pip install -e .[docs] + sphinx-build -W -b html -qa -E docs docs/_build/html diff --git a/.gitignore b/.gitignore index eca490f494..38cf8d8ef4 100644 --- a/.gitignore +++ b/.gitignore @@ -25,6 +25,7 @@ pylint.out cover/ docs/plugin_catalog/*.rst docs/components.rst +docs/specs_catalog.rst build/ .idea* dist/ @@ -48,4 +49,8 @@ share/ .Python .python* .pytest_cache -.vscode \ No newline at end of file +.vscode +insights/filters.yaml +.nox/ +.venv/ + diff --git a/.tito/packages/.readme b/.tito/packages/.readme new file mode 100644 index 0000000000..b9411e2d11 --- /dev/null +++ b/.tito/packages/.readme @@ -0,0 +1,3 @@ +the .tito/packages directory contains metadata files +named after their packages. Each file has the latest tagged +version and the project's relative directory. diff --git a/.tito/tito.props b/.tito/tito.props new file mode 100644 index 0000000000..e89e1c7015 --- /dev/null +++ b/.tito/tito.props @@ -0,0 +1,9 @@ +[buildconfig] +builder = tito.builder.Builder +tagger = tito.tagger.VersionTagger +changelog_do_not_remove_cherrypick = 0 +changelog_format = %s (%ae) +tag_format = {component}-{version} +[version_template] +template_file = ./.tito/version_template_file +destination_file = ./insights/VERSION diff --git a/.tito/version_template_file b/.tito/version_template_file new file mode 100644 index 0000000000..f43b9c6674 --- /dev/null +++ b/.tito/version_template_file @@ -0,0 +1 @@ +$version diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index 073c0e4acc..0000000000 --- a/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -language: python -cache: - directories: - - "/home/travis/virtualenv/python2.7.9/" -python: - - "2.6" - - "2.7" -install: pip install -e .[develop] -script: ./build.sh -services: - - docker -sudo: required -env: - global: - - COMMIT=${TRAVIS_COMMIT::8} diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 81e2fcc461..927e34ac6c 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -16,24 +16,24 @@ phase). ## Rule Development Setup -Clone the project:: +Clone the project: git clone git@github.com:RedHatInsights/insights-core.git -Or, alternatively, using HTTPS:: +Or, alternatively, using HTTPS: git clone https://github.com/RedHatInsights/insights-core.git -Initialize a virtualenv:: +Initialize a virtualenv: cd insights-core virtualenv . -Install the project and its dependencies:: +Install the project and its dependencies: bin/pip install -e . -Install a rule repository:: +Install a rule repository: bin/pip install -e path/to/rule/repo @@ -42,16 +42,16 @@ Install a rule repository:: If you wish to contribute to the insights-core project you'll need to create a fork in github. -1. Clone your fork:: +1. Clone your fork: git clone git@github.com:your-user/insights-core.git -2. Reference the original project as "upstream":: +2. Reference the original project as "upstream": git remote add upstream git@github.com:RedHatInsights/insights-core.git At this point, you would synchronize your fork with the upstream project -using the following commands:: +using the following commands: git pull upstream master git push origin master @@ -133,7 +133,11 @@ from the current master branch of the upstream project. of the topic branch. Again, such manipulations change history and require a `--force` push. -6. When ready, use the github UI to submit a pull request. +6. When ready, use the github UI to submit a pull request. Fill out + the information requested in the PR template. If your PR fixes an + issue make sure to reference the issue using a + [keyword](https://docs.github.com/en/issues/tracking-your-work-with-issues/creating-issues/linking-a-pull-request-to-an-issue#linking-a-pull-request-to-an-issue-using-a-keyword) + so that it will be closed once your PR is merged. 7. Repeat steps 4 and 5 as necessary. Note that a forced push to the topic branch will work as expected. The pull request will be diff --git a/Dockerfile b/Dockerfile index 20e693d49e..f60ca762f3 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ FROM centos:7 RUN yum install -y python-devel file zip gcc libffi-devel && yum clean all -RUN curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py && python get-pip.py && rm get-pip.py +RUN curl https://bootstrap.pypa.io/pip/2.7/get-pip.py -o get-pip.py && python get-pip.py && rm get-pip.py COPY . /src RUN pip install /src diff --git a/Jenkinsfile b/Jenkinsfile index 5508ab3565..ea42da6d48 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -21,18 +21,18 @@ pipeline { sh """ virtualenv .testenv source .testenv/bin/activate - pip install "idna<=2.7" - pip install "pycparser<=2.18" - pip install -e .[testing] + pip install /pip_packages/pip-9.0.3-py2.py3-none-any.whl + pip install -r /var/lib/jenkins/ci_requirements.txt -f /pip_packages + pip install -e .[testing] -f /pip_packages pytest """ echo "Testing with Linter..." sh """ virtualenv .lintenv source .lintenv/bin/activate - pip install "idna<=2.7" - pip install "pycparser<=2.18" - pip install -e .[linting] + pip install /pip_packages/pip-9.0.3-py2.py3-none-any.whl + pip install -r /var/lib/jenkins/ci_requirements.txt -f /pip_packages + pip install -e .[linting] -f /pip_packages flake8 """ } @@ -69,14 +69,14 @@ pipeline { steps { echo "Testing with Pytest..." sh """ - /bin/python36 -m venv .testenv + /bin/python3 -m venv .testenv source .testenv/bin/activate pip install -e .[testing] pytest """ echo "Testing with Linter..." sh """ - /bin/python36 -m venv .lintenv + /bin/python3 -m venv .lintenv source .lintenv/bin/activate pip install -e .[linting] flake8 @@ -99,7 +99,7 @@ pipeline { steps { echo "Building Docs..." sh """ - /bin/python36 -m venv .docenv + /bin/python3 -m venv .docenv source .docenv/bin/activate pip install -e .[docs] sphinx-build -W -b html -qa -E docs docs/_build/html diff --git a/MANIFEST.in.client b/MANIFEST.in.client index 98720a49be..7acbb045f1 100644 --- a/MANIFEST.in.client +++ b/MANIFEST.in.client @@ -1,4 +1,6 @@ include insights/defaults.yaml +include insights/revoked_playbooks.yaml +include insights/compliance_obfuscations.yaml include insights/NAME include insights/VERSION include insights/COMMIT diff --git a/MANIFEST.in.core b/MANIFEST.in.core new file mode 100644 index 0000000000..bd9dcfee7e --- /dev/null +++ b/MANIFEST.in.core @@ -0,0 +1,17 @@ +include insights/defaults.yaml +include insights/NAME +include insights/VERSION +include insights/COMMIT +include insights/RELEASE +prune examples +prune insights/client +prune insights/combiners/tests +prune insights/components/tests +prune insights/parsers/tests +prune insights/parsr/tests +prune insights/plugins +prune insights/parsr/examples/tests +prune insights/parsr/query/tests +prune insights/tests +include insights/parsers/__init__.py +include insights/combiners/__init__.py diff --git a/build_core_rpm.sh b/build_core_rpm.sh new file mode 100755 index 0000000000..d83536c75b --- /dev/null +++ b/build_core_rpm.sh @@ -0,0 +1,10 @@ +#!/bin/bash +PYTHON=${1:-python} + +rm -rf BUILD BUILDROOT RPMS SRPMS +rm -rf insights_core.egg-info +cp MANIFEST.in.core MANIFEST.in +$PYTHON setup.py sdist +rpmbuild -ba -D "_topdir $PWD" -D "_sourcedir $PWD/dist" insights-core.spec +rm -rf dist BUILD BUILDROOT +git checkout MANIFEST.in diff --git a/conftest.py b/conftest.py index c59a44df00..0fbe320384 100644 --- a/conftest.py +++ b/conftest.py @@ -28,7 +28,7 @@ def run_rule(): internal support rules that will not be used in the customer facing Insights product. """ - def _run_rule(rule, input_data): + def _run_rule(rule, input_data, return_make_none=False): """ Fixture for rule integration testing @@ -47,11 +47,15 @@ def test_myrule(run_rule): rule (object): Your rule function object. data (InputData): InputData obj containing all of the necessary data for the test. + return_make_none (bool): Set to true if you are testing for ``make_none()`` + results in your CI tests instead of ``None``. """ - result = run_test(rule, input_data) + result = run_test(rule, input_data, return_make_none=return_make_none) # Check result for skip to be compatible with archive_provider decorator # Return None instead of result indicating missing component(s) - if result is not None and 'type' in result and result['type'] == 'skip': + if (result is not None and 'type' in result and + (result['type'] == 'skip' or + (result['type'] == 'none' and not return_make_none))): return None else: return result diff --git a/docs/api.rst b/docs/api.rst index 6a0d8a9542..d83d48cd09 100644 --- a/docs/api.rst +++ b/docs/api.rst @@ -14,7 +14,7 @@ SOSReports A SOSReport_ is a command-line tool for Red Hat Enterprise Linux (and other systems) to collect configuration and diagnostic information from the system. -.. _SOSReport: https://github.com/sos/sosreport +.. _SOSReport: https://github.com/sosreport/sos Insights Archives ----------------- @@ -93,6 +93,8 @@ each unique context, and also provide a default set of data sources that are common among one or more contexts. All available contexts are defined in the module :py:mod:`insights.core.context`. +.. _datasources-ref: + Data Sources ============ @@ -462,7 +464,7 @@ Functions from insights.util import deprecated def old_feature(arguments): - deprecated(old_feature, "Use the new_feature() function instead") + deprecated(old_feature, "Use the new_feature() function instead", "3.1.25") ... Class methods @@ -476,7 +478,7 @@ Class methods ... def old_method(self, *args, **kwargs): - deprecated(self.old_method, "Use the new_method() method instead") + deprecated(self.old_method, "Use the new_method() method instead", "3.1.25") self.new_method(*args, **kwargs) ... @@ -489,7 +491,7 @@ Class class ThingParser(Parser): def __init__(self, *args, **kwargs): - deprecated(ThingParser, "Use the new_feature() function instead") + deprecated(ThingParser, "Use the new_feature() function instead", "3.1.25") super(ThingParser, self).__init__(*args, **kwargs) ... @@ -506,3 +508,30 @@ The :py:func:`insights.util.deprecated` function takes three arguments: ``new_parser`` module." - For a specific method being replaced by a general mechanism: "Please use the ``search`` method with the arguments ``state="LISTEN"``." +- The last ``version`` of insights-core that the functions will be available + before it is removed. For example: + + - For version 3.1.0 the last revision will be 3.1.25. If the deprecation + message indicate that the last version is 3.1.25, the function will be + removed in 3.2.0. + + +Insights-core release timeline +------------------------------ + +.. table:: + :widths: auto + + ======= ===================== + Version Expected release date + ======= ===================== + 3.0.300 December 2022 (Initial release) + 3.1.0 December 2022 + 3.2.0 June 2023 + 3.3.0 December 2023 + 3.4.0 June 2024 + ======= ===================== + +.. note:: + - We bump the insights-core revision every week. Please refer the `CHANGELOG.md file `_ for more info. + - The minor version will be bumped after every 25 revisions. For example, after 3.1.25, we would move to 3.2.0 except for 3.0.300 which marks the first planned release. After 3.0.300, we bump the minor version to 3.1.0. diff --git a/docs/api_index.rst b/docs/api_index.rst index 1eecb8acb6..da0a097d89 100644 --- a/docs/api_index.rst +++ b/docs/api_index.rst @@ -30,6 +30,14 @@ insights.core.dr :members: :exclude-members: requires, optional, metadata, group, tags +insights.core.exceptions +------------------------ + +.. automodule:: insights.core.exceptions + :members: + :show-inheritance: + :undoc-members: + insights.core.filters --------------------- @@ -74,8 +82,8 @@ insights.parsers ---------------- .. automodule:: insights.parsers - :members: ParseException, SkipException, calc_offset, get_active_lines, - keyword_search, optlist_to_dict, parse_delimited_table, + :members: calc_offset, get_active_lines, keyword_search, + optlist_to_dict, parse_delimited_table, parse_fixed_table, split_kv_pairs, unsplit_lines :show-inheritance: :undoc-members: @@ -183,3 +191,21 @@ insights.util :members: :show-inheritance: :undoc-members: + +.. automodule:: insights.util.autology + :members: + :show-inheritance: + :undoc-members: + +.. automodule:: insights.util.autology.datasources + :members: + :show-inheritance: + :undoc-members: + +insights +-------- + +.. automodule:: insights.collect + :members: default_manifest, collect + :show-inheritance: + :undoc-members: \ No newline at end of file diff --git a/docs/conf.py b/docs/conf.py index 7acda2e5a9..5989f6d8e7 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -18,7 +18,7 @@ import sys import os import insights -from insights.util import component_graph +from insights.util import component_graph, specs_catalog # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the @@ -59,7 +59,7 @@ # General information about the project. project = u'insights-core' -copyright = u'2016, 2017, 2018 Red Hat, Inc' +copyright = u'2016, 2017, 2018, 2019, 2020 Red Hat, Inc' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -366,3 +366,7 @@ def setup(app): # Dynamically generate cross reference for components prior to doc build filename = os.path.join(app.confdir, "components.rst") component_graph.main(filename) + + # Dynamically generate datasource documentation prior to doc build + filename = os.path.join(app.confdir, "specs_catalog.rst") + specs_catalog.main(filename) diff --git a/docs/custom_datasources_index.rst b/docs/custom_datasources_index.rst new file mode 100644 index 0000000000..c60106c9bd --- /dev/null +++ b/docs/custom_datasources_index.rst @@ -0,0 +1,277 @@ +.. _custom-datasources: + +Custom Datasources Catalog +========================== + +insights.specs.datasources +-------------------------- + +.. automodule:: insights.specs.datasources + :members: + :show-inheritance: + :undoc-members: + +insights.specs.datasources.aws +------------------------------ + +.. automodule:: insights.specs.datasources.aws + :members: aws_imdsv2_token, LocalSpecs + :show-inheritance: + :undoc-members: + +insights.specs.datasources.awx_manage +------------------------------------- + +.. automodule:: insights.specs.datasources.awx_manage + :members: awx_manage_check_license_data_datasource, LocalSpecs + :show-inheritance: + :undoc-members: + +insights.specs.datasources.candlepin_broker +------------------------------------------- + +.. automodule:: insights.specs.datasources.candlepin_broker + :members: candlepin_broker, LocalSpecs + :show-inheritance: + :undoc-members: + +insights.specs.datasources.cloud_init +------------------------------------- + +.. automodule:: insights.specs.datasources.cloud_init + :members: cloud_cfg, LocalSpecs + :show-inheritance: + :undoc-members: + +insights.specs.datasources.container.containers_inspect +------------------------------------------------------- + +.. automodule:: insights.specs.datasources.container.containers_inspect + :members: running_rhel_containers_id, containers_inspect_data_datasource + :show-inheritance: + :undoc-members: + +insights.specs.datasources.container +------------------------------------ + +.. automodule:: insights.specs.datasources.container + :members: running_rhel_containers + :show-inheritance: + :undoc-members: + +insights.specs.datasources.container.nginx_conf +----------------------------------------------- + +.. automodule:: insights.specs.datasources.container.nginx_conf + :members: nginx_conf, LocalSpecs + :show-inheritance: + :undoc-members: + +insights.specs.datasources.corosync +----------------------------------- + +.. automodule:: insights.specs.datasources.corosync + :members: corosync_cmapctl_cmds + :show-inheritance: + :undoc-members: + +insights.specs.datasources.dir_list +----------------------------------- + +.. automodule:: insights.specs.datasources.dir_list + :members: du_dir_list + :show-inheritance: + :undoc-members: + +insights.specs.datasources.ethernet +----------------------------------- + +.. automodule:: insights.specs.datasources.ethernet + :members: interfaces, LocalSpecs + :show-inheritance: + :undoc-members: + +insights.specs.datasources.httpd +-------------------------------- + +.. automodule:: insights.specs.datasources.httpd + :members: httpd_cmds, httpd_on_nfs + :show-inheritance: + :undoc-members: + +insights.specs.datasources.ipcs +------------------------------- + +.. automodule:: insights.specs.datasources.ipcs + :members: semid + :show-inheritance: + :undoc-members: + +insights.specs.datasources.kernel +--------------------------------- + +.. automodule:: insights.specs.datasources.kernel + :members: current_version, default_version + :show-inheritance: + :undoc-members: + +insights.specs.datasources.kernel_module_list +--------------------------------------------- + +.. automodule:: insights.specs.datasources.kernel_module_list + :members: kernel_module_filters + :show-inheritance: + :undoc-members: + +insights.specs.datasources.leapp +-------------------------------- + +.. automodule:: insights.specs.datasources.leapp + :members: leapp_report + :show-inheritance: + :undoc-members: + + +insights.specs.datasources.lpstat +--------------------------------- + +.. automodule:: insights.specs.datasources.lpstat + :members: lpstat_protocol_printers_info, LocalSpecs + :show-inheritance: + :undoc-members: + + +insights.specs.datasources.luks_devices +--------------------------------------- + +.. automodule:: insights.specs.datasources.luks_devices + :members: luks_block_devices, luks_data_sources, LocalSpecs + :show-inheritance: + :undoc-members: + +insights.specs.datasources.machine_ids +-------------------------------------- + +.. automodule:: insights.specs.datasources.machine_ids + :members: dup_machine_id_info + :show-inheritance: + :undoc-members: + +insights.specs.datasources.malware_detection +-------------------------------------------- + +.. automodule:: insights.specs.datasources.malware_detection + :show-inheritance: + :undoc-members: + +insights.specs.datasources.md5chk +--------------------------------- + +.. automodule:: insights.specs.datasources.md5chk + :members: files + :show-inheritance: + :undoc-members: + +insights.specs.datasources.package_provides +------------------------------------------- + +.. automodule:: insights.specs.datasources.package_provides + :members: cmd_and_pkg, get_package + :show-inheritance: + :undoc-members: + +insights.specs.datasources.pcp +------------------------------ + +.. automodule:: insights.specs.datasources.pcp + :members: pcp_enabled, pmlog_summary_args + :show-inheritance: + :undoc-members: + +insights.specs.datasources.ps +----------------------------- + +.. automodule:: insights.specs.datasources.ps + :members: jboss_runtime_versions, ps_eo_cmd, LocalSpecs + :show-inheritance: + :undoc-members: + +insights.specs.datasources.rsyslog_confs +---------------------------------------- + +.. automodule:: insights.specs.datasources.rsyslog_confs + :members: rsyslog_errorfile + :show-inheritance: + :undoc-members: + +insights.specs.datasources.sap +------------------------------ + +.. automodule:: insights.specs.datasources.sap + :members: sap_sid, sap_hana_sid, sap_hana_sid_SID_nr, ld_library_path_of_user, LocalSpecs + :show-inheritance: + :undoc-members: + +insights.specs.datasources.satellite_missed_queues +-------------------------------------------------- + +.. automodule:: insights.specs.datasources.satellite_missed_queues + :members: satellite_missed_pulp_agent_queues, LocalSpecs + :show-inheritance: + :undoc-members: + +insights.specs.datasources.semanage +----------------------------------- + +.. automodule:: insights.specs.datasources.semanage + :members: users_count_map_selinux_user, LocalSpecs + :show-inheritance: + :undoc-members: + +insights.specs.datasources.ssl_certificate +------------------------------------------ + +.. automodule:: insights.specs.datasources.ssl_certificate + :members: httpd_certificate_info_in_nss, httpd_ssl_certificate_files, nginx_ssl_certificate_files, mssql_tls_cert_file + :show-inheritance: + :undoc-members: + +insights.specs.datasources.sys_fs_cgroup_memory +----------------------------------------------- + +.. automodule:: insights.specs.datasources.sys_fs_cgroup_memory + :members: sys_fs_cgroup_uniq_memory_swappiness + :show-inheritance: + :undoc-members: + +insights.specs.datasources.sys_fs_cgroup_memory_tasks_number +------------------------------------------------------------ + +.. automodule:: insights.specs.datasources.sys_fs_cgroup_memory_tasks_number + :members: sys_fs_cgroup_memory_tasks_number_data_datasource, LocalSpecs + :show-inheritance: + :undoc-members: + +insights.specs.datasources.rpm_pkgs +----------------------------------- + +.. automodule:: insights.specs.datasources.rpm_pkgs + :members: pkgs_with_writable_dirs + :show-inheritance: + :undoc-members: + +insights.specs.datasources.user_group +------------------------------------- + +.. automodule:: insights.specs.datasources.user_group + :members: group_filters + :show-inheritance: + :undoc-members: + +insights.specs.datasources.yum_updates +-------------------------------------- + +.. automodule:: insights.specs.datasources.yum_updates + :members: yum_updates + :show-inheritance: + :undoc-members: diff --git a/docs/docs_guidelines.rst b/docs/docs_guidelines.rst index c010d76431..79f9e8aa45 100644 --- a/docs/docs_guidelines.rst +++ b/docs/docs_guidelines.rst @@ -126,24 +126,27 @@ Description .. code-block:: python :linenos: - :lineno-start: 4 - This module provides plugins access to the PCI device information gathered from - the ``/usr/sbin/lspci`` command. + """ + lspci - Command + =============== - Typical output of the ``lspci`` command is:: + This module provides plugins access to the PCI device information gathered from + the ``/usr/sbin/lspci`` command. - 00:00.0 Host bridge: Intel Corporation 2nd Generation Core Processor Family DRAM Controller (rev 09) - 00:02.0 VGA compatible controller: Intel Corporation 2nd Generation Core Processor Family Integrated Graphics Controller (rev 09) - 03:00.0 Network controller: Intel Corporation Centrino Advanced-N 6205 [Taylor Peak] (rev 34) - 0d:00.0 System peripheral: Ricoh Co Ltd PCIe SDXC/MMC Host Controller (rev 07) + Typical output of the ``lspci`` command is:: - The data is exposed via the ``obj.lines`` attribute which is a list containing - each line in the output. The data may also be filtered using the - ``obj.get("filter string")`` method. This method will return a list of lines - containing only "filter string". The ``in`` operator may also be used to test - whether a particular string is in the ``lspci`` output. Other methods/operators - are also supported, see the :py:class:`insights.core.LogFileOutput` class for more information. + 00:00.0 Host bridge: Intel Corporation 2nd Generation Core Processor Family DRAM Controller (rev 09) + 00:02.0 VGA compatible controller: Intel Corporation 2nd Generation Core Processor Family Integrated Graphics Controller (rev 09) + 03:00.0 Network controller: Intel Corporation Centrino Advanced-N 6205 [Taylor Peak] (rev 34) + 0d:00.0 System peripheral: Ricoh Co Ltd PCIe SDXC/MMC Host Controller (rev 07) + + The data is exposed via the ``obj.lines`` attribute which is a list containing + each line in the output. The data may also be filtered using the + ``obj.get("filter string")`` method. This method will return a list of lines + containing only "filter string". The ``in`` operator may also be used to test + whether a particular string is in the ``lspci`` output. Other methods/operators + are also supported, see the :py:class:`insights.core.LogFileOutput` class for more information. Next comes the description of the module. Since this description is the first thing a developer will see when viewing @@ -162,13 +165,27 @@ Notes/References .. code-block:: python :linenos: - :lineno-start: 22 + :lineno-start: 20 + :force: + + """ Note: The examples in this module may be executed with the following command: ``python -m insights.parsers.lspci`` + Examples: + >>> pci_info.get("Intel Corporation") + ['00:00.0 Host bridge: Intel Corporation 2nd Generation Core Processor Family DRAM Controller (rev 09)', '00:02.0 VGA compatible controller: Intel Corporation 2nd Generation Core Processor Family Integrated Graphics Controller (rev 09)', '03:00.0 Network controller: Intel Corporation Centrino Advanced-N 6205 [Taylor Peak] (rev 34)'] + >>> len(pci_info.get("Network controller")) + 1 + >>> "Centrino Advanced-N 6205" in pci_info + True + >>> "0d:00.0" in pci_info + True + """ + Module notes and/or references are not necessary unless there is information that should be included to aid a developer in understanding the parser. In this particular case this information is only provided as an aid to the @@ -181,8 +198,10 @@ Examples .. code-block:: python :linenos: - :lineno-start: 27 + :lineno-start: 25 + """ + Examples: >>> pci_info.get("Intel Corporation") ['00:00.0 Host bridge: Intel Corporation 2nd Generation Core Processor Family DRAM Controller (rev 09)', '00:02.0 VGA compatible controller: Intel Corporation 2nd Generation Core Processor Family Integrated Graphics Controller (rev 09)', '03:00.0 Network controller: Intel Corporation Centrino Advanced-N 6205 [Taylor Peak] (rev 34)'] diff --git a/docs/exception_model.rst b/docs/exception_model.rst index d422071bae..95a9358d41 100644 --- a/docs/exception_model.rst +++ b/docs/exception_model.rst @@ -41,10 +41,10 @@ indicating the data was not present) and attempt to catch via python mechanisms issues that could reasonably be expected (conversion of a character to a number, missing values, etc.). When a parser makes the determination that the data is not usable, then it should explicitly raise a -:py:class:`insights.parsers.ParseException` and provide as much +:py:class:`insights.core.exceptions.ParseException` and provide as much useful information as is possible to help the Insights team and parser developer understand what happened. If any exception is expected to be raised it should be -caught, and the :py:class:`insights.parsers.ParseException` raised in its place. +caught, and the :py:class:`insights.core.exceptions.ParseException` raised in its place. No data will be made available to other parsers, combiners or rules in this case. It will be as if the data was not present in the input. @@ -82,7 +82,7 @@ any exceptions in the data (“dirty parser”). This allows rules that don’t exceptions to rely on only the first parser, and those rules will not run if valid data is not present. If the dirty parser identifies errors in the data then it will save information regarding the errors for use by rules. If no errors are found in the data -then the dirty parser will raise :py:class:`insights.parsers.SkipException` +then the dirty parser will raise :py:class:`insights.core.exceptions.SkipComponent` to indicate to the engine that it should be removed from the dependency hierarchy. Other Exceptions from Parsers @@ -99,15 +99,13 @@ types aren’t important and such checks may limit expressiveness and flexibilit Parsers should not use the assert statement in place of error handling code. Asserts are for debugging purposes only. -SkipComponent and SkipException -=============================== +SkipComponent +============= -Any component may raise `insights.SkipComponent` to signal to the engine that +Any component may raise `SkipComponent` to signal to the engine that nothing is wrong but that the component should be taken out of dependency resolution. This is useful if a component's dependencies are met but it's still unable to produce a meaningful result. -:py:class:`insights.parsers.SkipException` is a specialization of this for the -dirty parser use case above, but it's treated the same as `SkipComponent`. Exception Recognition by the Insights Engine ============================================ @@ -115,8 +113,8 @@ Exception Recognition by the Insights Engine Exceptions that are raised by parsers and combiners will be collected by the engine in order to determine whether to remove the component from the dependency hierarchy, for data metrics, and to help identify issues with the parsing code or with the data. -Specific use of :py:class:`insights.parsers.ParseException`, -:py:class:`insights.parsers.SkipException`, and `insights.SkipComponent` will +Specific use of :py:class:`insights.core.exceptions.ParseException`, +:py:class:`insights.core.exceptions.SkipException`, and `SkipComponent` will make it much easier for the engine to identify and quickly deal with known conditions versus unanticipated conditions (i.e., other exceptions being raised) which could indicate errors in the parsing code, errors in data collection, or diff --git a/docs/index.rst b/docs/index.rst index 43b4748aa0..613380abb7 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -11,10 +11,13 @@ Contents: api exception_model api_index + specs_catalog parsers_index combiners_index components_index + custom_datasources_index ocp + shell docs_guidelines components embedded_content diff --git a/docs/intro.rst b/docs/intro.rst index 76b0a2ff81..8f3ec4cbd9 100644 --- a/docs/intro.rst +++ b/docs/intro.rst @@ -167,7 +167,7 @@ many customization options to optimize each customer's specific needs. .. Links: .. _Red Hat Customer Portal: https://access.redhat.com -.. _Red Hat Insights Portal: https://access.redhat.com/products/red-hat-insights. +.. _Red Hat Insights Portal: https://access.redhat.com/products/red-hat-insights .. _insights-core Repository: https://github.com/RedHatInsights/insights-core .. _Mozilla OpenSSH Security Guidelines: https://wiki.mozilla.org/Security/Guidelines/OpenSSH .. _Red Hat Insights Client GitHub Project: http://github.com/redhataccess/insights-client diff --git a/docs/manpages/insights-run.rst b/docs/manpages/insights-run.rst index d30cb66955..5f1645412f 100644 --- a/docs/manpages/insights-run.rst +++ b/docs/manpages/insights-run.rst @@ -39,6 +39,12 @@ OPTIONS -c CONFIG --config CONFIG Configure components. + \-\-color [=WHEN] + Choose if and how the color encoding is outputted. When can be 'always', 'auto', or + 'never'. If always the color encoding isn't stripped from the output, so it can be + piped. If auto the color is outputted in the terminal but is stripped if piped. If + never then no color encoding is outputted. + \-\-context CONTEXT Execution Context. Defaults to HostContext if an archive isn't passed. See :ref:`context-label` for additional information. @@ -69,6 +75,9 @@ OPTIONS -m --missing Show missing requirements. + -n --none + Show rules returning ``None``. + -p PLUGINS --plugins PLUGINS Comma-separated list without spaces of package(s) or module(s) containing plugins. diff --git a/docs/quickstart_insights_core.rst b/docs/quickstart_insights_core.rst index 83b576df94..8a11d4c49a 100644 --- a/docs/quickstart_insights_core.rst +++ b/docs/quickstart_insights_core.rst @@ -39,27 +39,12 @@ file associated with the insights-core project. ``unzip`` to be able to run `pytest` on the ``insights-core`` repo, and ``pandoc`` to build Insights Core documentation. -********************** -Rule Development Setup -********************** - -In order to develop rules to run in Red Hat Insights you'll need Insights -Core (http://github.com/RedHatInsights/insights-core) as well as your own rules code. -The commands below assume the following sample project directory structure -containing the insights-core project repo and your directory and files -for rule development:: - - project_dir - ├── insights-core - └── myrules - ├── hostname_rel.py - └── bash_version.py - - -.. _insights_dev_setup: +*************************** +Insights Development Setup +*************************** -Insights Core Setup -=================== +Insights Core Development +========================= Clone the project:: @@ -113,6 +98,40 @@ command. If you use this method make sure you periodically update insights core in your virtualenv with the command `pip install --upgrade insights-core`. +Insights Client Development +=========================== + +Clone the project:: + + [userone@hostone project_dir]$ git clone git@github.com:RedHatInsights/insights-core.git + +Initialize a virtualenv with the ``--system-site-packages`` flag:: + + [userone@hostone project_dir/insights-core]$ python3.6 -m venv --sytem-site-packages . + +Next install the insights-core project and its dependencies into your virtualenv:: + + (insights-core)[userone@hostone project_dir/insights-core]$ bin/pip install -e .[client-develop] + +********************** +Rule Development Setup +********************** + +In order to develop rules to run in Red Hat Insights you'll need Insights +Core (http://github.com/RedHatInsights/insights-core) as well as your own rules code. +The commands below assume the following sample project directory structure +containing the insights-core project repo and your directory and files +for rule development:: + + project_dir + ├── insights-core + └── myrules + ├── hostname_rel.py + └── bash_version.py + + +.. _insights_dev_setup: + Rule Development ================ diff --git a/docs/shared_combiners_catalog/ansible_info.rst b/docs/shared_combiners_catalog/ansible_info.rst new file mode 100644 index 0000000000..061a1b24e4 --- /dev/null +++ b/docs/shared_combiners_catalog/ansible_info.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.combiners.ansible_info + :members: + :show-inheritance: diff --git a/docs/shared_combiners_catalog/cloud_instance.rst b/docs/shared_combiners_catalog/cloud_instance.rst new file mode 100644 index 0000000000..9b372a7e20 --- /dev/null +++ b/docs/shared_combiners_catalog/cloud_instance.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.combiners.cloud_instance + :members: + :show-inheritance: diff --git a/docs/shared_combiners_catalog/crio_conf.rst b/docs/shared_combiners_catalog/crio_conf.rst new file mode 100644 index 0000000000..cbbd2dc08f --- /dev/null +++ b/docs/shared_combiners_catalog/crio_conf.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.combiners.crio_conf + :members: + :show-inheritance: diff --git a/docs/shared_combiners_catalog/cryptsetup.rst b/docs/shared_combiners_catalog/cryptsetup.rst new file mode 100644 index 0000000000..1bf56628c4 --- /dev/null +++ b/docs/shared_combiners_catalog/cryptsetup.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.combiners.cryptsetup + :members: LuksDevices + :show-inheritance: diff --git a/docs/shared_combiners_catalog/du.rst b/docs/shared_combiners_catalog/du.rst new file mode 100644 index 0000000000..2e9385cd77 --- /dev/null +++ b/docs/shared_combiners_catalog/du.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.combiners.du + :members: + :show-inheritance: diff --git a/docs/shared_combiners_catalog/identity_domain.rst b/docs/shared_combiners_catalog/identity_domain.rst new file mode 100644 index 0000000000..2a6d4c7c4c --- /dev/null +++ b/docs/shared_combiners_catalog/identity_domain.rst @@ -0,0 +1,7 @@ +.. automodule:: insights.combiners.identity_domain + +.. autoclass:: insights.combiners.identity_domain.DomainInfo + +.. autoclass:: insights.combiners.identity_domain.IdentityDomain + :members: + :show-inheritance: diff --git a/docs/shared_combiners_catalog/ipa.rst b/docs/shared_combiners_catalog/ipa.rst new file mode 100644 index 0000000000..4bcdf587f7 --- /dev/null +++ b/docs/shared_combiners_catalog/ipa.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.combiners.ipa + :members: + :show-inheritance: diff --git a/docs/shared_combiners_catalog/lspci.rst b/docs/shared_combiners_catalog/lspci.rst new file mode 100644 index 0000000000..a8af585159 --- /dev/null +++ b/docs/shared_combiners_catalog/lspci.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.combiners.lspci + :members: + :show-inheritance: diff --git a/docs/shared_combiners_catalog/os_release.rst b/docs/shared_combiners_catalog/os_release.rst new file mode 100644 index 0000000000..e901ecb58a --- /dev/null +++ b/docs/shared_combiners_catalog/os_release.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.combiners.os_release + :members: + :show-inheritance: diff --git a/docs/shared_combiners_catalog/package_provides_httpd.rst b/docs/shared_combiners_catalog/package_provides_httpd.rst deleted file mode 100644 index 07fc4f8092..0000000000 --- a/docs/shared_combiners_catalog/package_provides_httpd.rst +++ /dev/null @@ -1,3 +0,0 @@ -.. automodule:: insights.combiners.package_provides_httpd - :members: - :show-inheritance: diff --git a/docs/shared_combiners_catalog/package_provides_java.rst b/docs/shared_combiners_catalog/package_provides_java.rst deleted file mode 100644 index ac93d1b53f..0000000000 --- a/docs/shared_combiners_catalog/package_provides_java.rst +++ /dev/null @@ -1,3 +0,0 @@ -.. automodule:: insights.combiners.package_provides_java - :members: - :show-inheritance: diff --git a/docs/shared_combiners_catalog/rhel_for_edge.rst b/docs/shared_combiners_catalog/rhel_for_edge.rst new file mode 100644 index 0000000000..891f6fdc44 --- /dev/null +++ b/docs/shared_combiners_catalog/rhel_for_edge.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.combiners.rhel_for_edge + :members: + :show-inheritance: diff --git a/docs/shared_combiners_catalog/ssl_certificate.rst b/docs/shared_combiners_catalog/ssl_certificate.rst new file mode 100644 index 0000000000..1f1ddefff2 --- /dev/null +++ b/docs/shared_combiners_catalog/ssl_certificate.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.combiners.ssl_certificate + :members: + :show-inheritance: diff --git a/docs/shared_combiners_catalog/sudoers.rst b/docs/shared_combiners_catalog/sudoers.rst new file mode 100644 index 0000000000..62b681a0f6 --- /dev/null +++ b/docs/shared_combiners_catalog/sudoers.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.combiners.sudoers + :members: + :show-inheritance: diff --git a/docs/shared_combiners_catalog/sys_vmbus_devices.rst b/docs/shared_combiners_catalog/sys_vmbus_devices.rst new file mode 100644 index 0000000000..f40745f455 --- /dev/null +++ b/docs/shared_combiners_catalog/sys_vmbus_devices.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.combiners.sys_vmbus_devices + :members: + :show-inheritance: diff --git a/docs/shared_combiners_catalog/sysctl_conf.rst b/docs/shared_combiners_catalog/sysctl_conf.rst new file mode 100644 index 0000000000..71ab93d787 --- /dev/null +++ b/docs/shared_combiners_catalog/sysctl_conf.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.combiners.sysctl_conf + :members: + :show-inheritance: diff --git a/docs/shared_combiners_catalog/uptime.rst b/docs/shared_combiners_catalog/uptime.rst deleted file mode 100644 index d6b624ebbc..0000000000 --- a/docs/shared_combiners_catalog/uptime.rst +++ /dev/null @@ -1,3 +0,0 @@ -.. automodule:: insights.combiners.uptime - :members: - :show-inheritance: diff --git a/docs/shared_components_catalog/ceph.rst b/docs/shared_components_catalog/ceph.rst new file mode 100644 index 0000000000..59de0ca85b --- /dev/null +++ b/docs/shared_components_catalog/ceph.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.components.ceph + :members: + :show-inheritance: diff --git a/docs/shared_components_catalog/cloud_provider.rst b/docs/shared_components_catalog/cloud_provider.rst new file mode 100644 index 0000000000..a7a276769f --- /dev/null +++ b/docs/shared_components_catalog/cloud_provider.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.components.cloud_provider + :members: + :show-inheritance: diff --git a/docs/shared_components_catalog/cryptsetup.rst b/docs/shared_components_catalog/cryptsetup.rst new file mode 100644 index 0000000000..fa9e71fe95 --- /dev/null +++ b/docs/shared_components_catalog/cryptsetup.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.components.cryptsetup + :members: + :show-inheritance: diff --git a/docs/shared_components_catalog/satellite.rst b/docs/shared_components_catalog/satellite.rst new file mode 100644 index 0000000000..661763a23a --- /dev/null +++ b/docs/shared_components_catalog/satellite.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.components.satellite + :members: + :show-inheritance: diff --git a/docs/shared_components_catalog/virtualization.rst b/docs/shared_components_catalog/virtualization.rst new file mode 100644 index 0000000000..a3929d7df2 --- /dev/null +++ b/docs/shared_components_catalog/virtualization.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.components.virtualization + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/abrt_ccpp.rst b/docs/shared_parsers_catalog/abrt_ccpp.rst new file mode 100644 index 0000000000..fc0dd570ff --- /dev/null +++ b/docs/shared_parsers_catalog/abrt_ccpp.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.abrt_ccpp + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/auditctl.rst b/docs/shared_parsers_catalog/auditctl.rst new file mode 100644 index 0000000000..83572e683c --- /dev/null +++ b/docs/shared_parsers_catalog/auditctl.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.auditctl + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/auditctl_status.rst b/docs/shared_parsers_catalog/auditctl_status.rst deleted file mode 100644 index b136f9730f..0000000000 --- a/docs/shared_parsers_catalog/auditctl_status.rst +++ /dev/null @@ -1,3 +0,0 @@ -.. automodule:: insights.parsers.auditctl_status - :members: - :show-inheritance: diff --git a/docs/shared_parsers_catalog/authselect.rst b/docs/shared_parsers_catalog/authselect.rst new file mode 100644 index 0000000000..e561fb817a --- /dev/null +++ b/docs/shared_parsers_catalog/authselect.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.authselect + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/aws_instance_type.rst b/docs/shared_parsers_catalog/aws_instance_type.rst deleted file mode 100644 index 7665004622..0000000000 --- a/docs/shared_parsers_catalog/aws_instance_type.rst +++ /dev/null @@ -1,3 +0,0 @@ -.. automodule:: insights.parsers.aws_instance_type - :members: - :show-inheritance: diff --git a/docs/shared_parsers_catalog/awx_manage.rst b/docs/shared_parsers_catalog/awx_manage.rst new file mode 100644 index 0000000000..afe92f9974 --- /dev/null +++ b/docs/shared_parsers_catalog/awx_manage.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.awx_manage + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/azure_instance.rst b/docs/shared_parsers_catalog/azure_instance.rst new file mode 100644 index 0000000000..55318b85a5 --- /dev/null +++ b/docs/shared_parsers_catalog/azure_instance.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.azure_instance + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/azure_instance_plan.rst b/docs/shared_parsers_catalog/azure_instance_plan.rst new file mode 100644 index 0000000000..a41a1acb6f --- /dev/null +++ b/docs/shared_parsers_catalog/azure_instance_plan.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.azure_instance_plan + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/bdi_read_ahead_kb.rst b/docs/shared_parsers_catalog/bdi_read_ahead_kb.rst new file mode 100644 index 0000000000..abfb9d313e --- /dev/null +++ b/docs/shared_parsers_catalog/bdi_read_ahead_kb.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.bdi_read_ahead_kb + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/blacklisted.rst b/docs/shared_parsers_catalog/blacklisted.rst new file mode 100644 index 0000000000..1f65ea50c0 --- /dev/null +++ b/docs/shared_parsers_catalog/blacklisted.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.blacklisted + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/candlepin_broker.rst b/docs/shared_parsers_catalog/candlepin_broker.rst new file mode 100644 index 0000000000..1333d3bd38 --- /dev/null +++ b/docs/shared_parsers_catalog/candlepin_broker.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.candlepin_broker + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/client_metadata.rst b/docs/shared_parsers_catalog/client_metadata.rst new file mode 100644 index 0000000000..1dd2afa621 --- /dev/null +++ b/docs/shared_parsers_catalog/client_metadata.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.client_metadata + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/cloud_cfg.rst b/docs/shared_parsers_catalog/cloud_cfg.rst new file mode 100644 index 0000000000..31f5f19eba --- /dev/null +++ b/docs/shared_parsers_catalog/cloud_cfg.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.cloud_cfg + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/cni_podman_bridge_conf.rst b/docs/shared_parsers_catalog/cni_podman_bridge_conf.rst new file mode 100644 index 0000000000..8a06d82366 --- /dev/null +++ b/docs/shared_parsers_catalog/cni_podman_bridge_conf.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.cni_podman_bridge_conf + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/config_file_perms.rst b/docs/shared_parsers_catalog/config_file_perms.rst new file mode 100644 index 0000000000..72683b832a --- /dev/null +++ b/docs/shared_parsers_catalog/config_file_perms.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.config_file_perms + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/containers_inspect.rst b/docs/shared_parsers_catalog/containers_inspect.rst new file mode 100644 index 0000000000..9657252744 --- /dev/null +++ b/docs/shared_parsers_catalog/containers_inspect.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.containers_inspect + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/containers_policy.rst b/docs/shared_parsers_catalog/containers_policy.rst new file mode 100644 index 0000000000..3628bd63c5 --- /dev/null +++ b/docs/shared_parsers_catalog/containers_policy.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.containers_policy + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/corosync_cmapctl.rst b/docs/shared_parsers_catalog/corosync_cmapctl.rst new file mode 100644 index 0000000000..8837ad1d5a --- /dev/null +++ b/docs/shared_parsers_catalog/corosync_cmapctl.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.corosync_cmapctl + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/cpu_online.rst b/docs/shared_parsers_catalog/cpu_online.rst new file mode 100644 index 0000000000..48c7f462f9 --- /dev/null +++ b/docs/shared_parsers_catalog/cpu_online.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.cpu_online + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/crictl_logs.rst b/docs/shared_parsers_catalog/crictl_logs.rst new file mode 100644 index 0000000000..68a8466194 --- /dev/null +++ b/docs/shared_parsers_catalog/crictl_logs.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.crictl_logs + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/crio_conf.rst b/docs/shared_parsers_catalog/crio_conf.rst new file mode 100644 index 0000000000..3fed8ab920 --- /dev/null +++ b/docs/shared_parsers_catalog/crio_conf.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.crio_conf + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/cron_daily_rhsmd.rst b/docs/shared_parsers_catalog/cron_daily_rhsmd.rst new file mode 100644 index 0000000000..b202f9a0b7 --- /dev/null +++ b/docs/shared_parsers_catalog/cron_daily_rhsmd.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.cron_daily_rhsmd + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/cron_jobs.rst b/docs/shared_parsers_catalog/cron_jobs.rst new file mode 100644 index 0000000000..cbf49e5c97 --- /dev/null +++ b/docs/shared_parsers_catalog/cron_jobs.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.cron_jobs + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/cryptsetup_luksDump.rst b/docs/shared_parsers_catalog/cryptsetup_luksDump.rst new file mode 100644 index 0000000000..479519780d --- /dev/null +++ b/docs/shared_parsers_catalog/cryptsetup_luksDump.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.cryptsetup_luksDump + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/cups_confs.rst b/docs/shared_parsers_catalog/cups_confs.rst new file mode 100644 index 0000000000..a4e810fb4b --- /dev/null +++ b/docs/shared_parsers_catalog/cups_confs.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.cups_confs + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/cups_ppd.rst b/docs/shared_parsers_catalog/cups_ppd.rst new file mode 100644 index 0000000000..ecb18cc534 --- /dev/null +++ b/docs/shared_parsers_catalog/cups_ppd.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.cups_ppd + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/db2.rst b/docs/shared_parsers_catalog/db2.rst new file mode 100644 index 0000000000..e04a106e3c --- /dev/null +++ b/docs/shared_parsers_catalog/db2.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.db2 + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/db2licm.rst b/docs/shared_parsers_catalog/db2licm.rst deleted file mode 100644 index b5777bfc03..0000000000 --- a/docs/shared_parsers_catalog/db2licm.rst +++ /dev/null @@ -1,3 +0,0 @@ -.. automodule:: insights.parsers.db2licm - :members: - :show-inheritance: diff --git a/docs/shared_parsers_catalog/designate_conf.rst b/docs/shared_parsers_catalog/designate_conf.rst new file mode 100644 index 0000000000..3a7fbd7328 --- /dev/null +++ b/docs/shared_parsers_catalog/designate_conf.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.designate_conf + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/dig.rst b/docs/shared_parsers_catalog/dig.rst new file mode 100644 index 0000000000..a7538f8cd3 --- /dev/null +++ b/docs/shared_parsers_catalog/dig.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.dig + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/dirsrv_sysconfig.rst b/docs/shared_parsers_catalog/dirsrv_sysconfig.rst deleted file mode 100644 index 65cf2c255e..0000000000 --- a/docs/shared_parsers_catalog/dirsrv_sysconfig.rst +++ /dev/null @@ -1,3 +0,0 @@ -.. automodule:: insights.parsers.dirsrv_sysconfig - :members: - :show-inheritance: diff --git a/docs/shared_parsers_catalog/dnf_conf.rst b/docs/shared_parsers_catalog/dnf_conf.rst new file mode 100644 index 0000000000..e58f77f7d5 --- /dev/null +++ b/docs/shared_parsers_catalog/dnf_conf.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.dnf_conf + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/docker_storage_setup.rst b/docs/shared_parsers_catalog/docker_storage_setup.rst deleted file mode 100644 index 83d21003fa..0000000000 --- a/docs/shared_parsers_catalog/docker_storage_setup.rst +++ /dev/null @@ -1,3 +0,0 @@ -.. automodule:: insights.parsers.docker_storage_setup - :members: - :show-inheritance: diff --git a/docs/shared_parsers_catalog/dotnet.rst b/docs/shared_parsers_catalog/dotnet.rst new file mode 100644 index 0000000000..775f5a914c --- /dev/null +++ b/docs/shared_parsers_catalog/dotnet.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.dotnet + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/doveconf.rst b/docs/shared_parsers_catalog/doveconf.rst new file mode 100644 index 0000000000..2d9db0d77a --- /dev/null +++ b/docs/shared_parsers_catalog/doveconf.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.doveconf + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/dracut_modules.rst b/docs/shared_parsers_catalog/dracut_modules.rst new file mode 100644 index 0000000000..a161ed3913 --- /dev/null +++ b/docs/shared_parsers_catalog/dracut_modules.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.dracut_modules + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/dse_ldif_simple.rst b/docs/shared_parsers_catalog/dse_ldif_simple.rst new file mode 100644 index 0000000000..5251dd929d --- /dev/null +++ b/docs/shared_parsers_catalog/dse_ldif_simple.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.dse_ldif_simple + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/du.rst b/docs/shared_parsers_catalog/du.rst new file mode 100644 index 0000000000..effa0c6967 --- /dev/null +++ b/docs/shared_parsers_catalog/du.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.du + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/eap_json_reports.rst b/docs/shared_parsers_catalog/eap_json_reports.rst new file mode 100644 index 0000000000..35269195f6 --- /dev/null +++ b/docs/shared_parsers_catalog/eap_json_reports.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.eap_json_reports + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/engine_db_query.rst b/docs/shared_parsers_catalog/engine_db_query.rst new file mode 100644 index 0000000000..91b13bbc5e --- /dev/null +++ b/docs/shared_parsers_catalog/engine_db_query.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.engine_db_query + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/etc_machine_id.rst b/docs/shared_parsers_catalog/etc_machine_id.rst new file mode 100644 index 0000000000..af1c5b0409 --- /dev/null +++ b/docs/shared_parsers_catalog/etc_machine_id.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.etc_machine_id + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/fapolicyd_rules.rst b/docs/shared_parsers_catalog/fapolicyd_rules.rst new file mode 100644 index 0000000000..4baff810f0 --- /dev/null +++ b/docs/shared_parsers_catalog/fapolicyd_rules.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.fapolicyd_rules + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/firewall_cmd.rst b/docs/shared_parsers_catalog/firewall_cmd.rst new file mode 100644 index 0000000000..97203eb4d9 --- /dev/null +++ b/docs/shared_parsers_catalog/firewall_cmd.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.firewall_cmd + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/foreman_tasks_config.rst b/docs/shared_parsers_catalog/foreman_tasks_config.rst deleted file mode 100644 index 41c2df7bb9..0000000000 --- a/docs/shared_parsers_catalog/foreman_tasks_config.rst +++ /dev/null @@ -1,3 +0,0 @@ -.. automodule:: insights.parsers.foreman_tasks_config - :members: - :show-inheritance: diff --git a/docs/shared_parsers_catalog/fwupdagent.rst b/docs/shared_parsers_catalog/fwupdagent.rst new file mode 100644 index 0000000000..49501c632f --- /dev/null +++ b/docs/shared_parsers_catalog/fwupdagent.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.fwupdagent + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/gcp_instance_type.rst b/docs/shared_parsers_catalog/gcp_instance_type.rst new file mode 100644 index 0000000000..5ed223c74d --- /dev/null +++ b/docs/shared_parsers_catalog/gcp_instance_type.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.gcp_instance_type + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/gcp_license_codes.rst b/docs/shared_parsers_catalog/gcp_license_codes.rst new file mode 100644 index 0000000000..dec08c3ccf --- /dev/null +++ b/docs/shared_parsers_catalog/gcp_license_codes.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.gcp_license_codes + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/gfs2_file_system_block_size.rst b/docs/shared_parsers_catalog/gfs2_file_system_block_size.rst new file mode 100644 index 0000000000..32b2bff3d1 --- /dev/null +++ b/docs/shared_parsers_catalog/gfs2_file_system_block_size.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.gfs2_file_system_block_size + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/greenboot_status.rst b/docs/shared_parsers_catalog/greenboot_status.rst new file mode 100644 index 0000000000..e90179d366 --- /dev/null +++ b/docs/shared_parsers_catalog/greenboot_status.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.greenboot_status + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/grubenv.rst b/docs/shared_parsers_catalog/grubenv.rst new file mode 100644 index 0000000000..9905f5c4e4 --- /dev/null +++ b/docs/shared_parsers_catalog/grubenv.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.grubenv + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/ibm_proc.rst b/docs/shared_parsers_catalog/ibm_proc.rst new file mode 100644 index 0000000000..1f7fc0ce34 --- /dev/null +++ b/docs/shared_parsers_catalog/ibm_proc.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.ibm_proc + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/imagemagick_policy.rst b/docs/shared_parsers_catalog/imagemagick_policy.rst new file mode 100644 index 0000000000..b8dbd247f4 --- /dev/null +++ b/docs/shared_parsers_catalog/imagemagick_policy.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.imagemagick_policy + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/insights_client_conf.rst b/docs/shared_parsers_catalog/insights_client_conf.rst new file mode 100644 index 0000000000..361170c660 --- /dev/null +++ b/docs/shared_parsers_catalog/insights_client_conf.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.insights_client_conf + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/ipa_conf.rst b/docs/shared_parsers_catalog/ipa_conf.rst new file mode 100644 index 0000000000..3126d5fbbc --- /dev/null +++ b/docs/shared_parsers_catalog/ipa_conf.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.ipa_conf + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/ipsec_conf.rst b/docs/shared_parsers_catalog/ipsec_conf.rst new file mode 100644 index 0000000000..d2b6920d87 --- /dev/null +++ b/docs/shared_parsers_catalog/ipsec_conf.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.ipsec_conf + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/journal_since_boot.rst b/docs/shared_parsers_catalog/journal_since_boot.rst deleted file mode 100644 index 13351f3a20..0000000000 --- a/docs/shared_parsers_catalog/journal_since_boot.rst +++ /dev/null @@ -1,3 +0,0 @@ -.. automodule:: insights.parsers.journal_since_boot - :members: - :show-inheritance: diff --git a/docs/shared_parsers_catalog/journalctl.rst b/docs/shared_parsers_catalog/journalctl.rst new file mode 100644 index 0000000000..5144215c30 --- /dev/null +++ b/docs/shared_parsers_catalog/journalctl.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.journalctl + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/kpatch_patches.rst b/docs/shared_parsers_catalog/kpatch_patches.rst deleted file mode 100644 index be8ac55335..0000000000 --- a/docs/shared_parsers_catalog/kpatch_patches.rst +++ /dev/null @@ -1,3 +0,0 @@ -.. automodule:: insights.parsers.kpatch_patches - :members: - :show-inheritance: diff --git a/docs/shared_parsers_catalog/ktimer_lockless.rst b/docs/shared_parsers_catalog/ktimer_lockless.rst new file mode 100644 index 0000000000..529b3f77ec --- /dev/null +++ b/docs/shared_parsers_catalog/ktimer_lockless.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.ktimer_lockless + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/ld_library_path.rst b/docs/shared_parsers_catalog/ld_library_path.rst new file mode 100644 index 0000000000..738401ec92 --- /dev/null +++ b/docs/shared_parsers_catalog/ld_library_path.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.ld_library_path + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/ldif_config.rst b/docs/shared_parsers_catalog/ldif_config.rst new file mode 100644 index 0000000000..8fe84b92e8 --- /dev/null +++ b/docs/shared_parsers_catalog/ldif_config.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.ldif_config + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/leapp.rst b/docs/shared_parsers_catalog/leapp.rst new file mode 100644 index 0000000000..0d660abec9 --- /dev/null +++ b/docs/shared_parsers_catalog/leapp.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.leapp + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/libkeyutils.rst b/docs/shared_parsers_catalog/libkeyutils.rst deleted file mode 100644 index a474ae805b..0000000000 --- a/docs/shared_parsers_catalog/libkeyutils.rst +++ /dev/null @@ -1,3 +0,0 @@ -.. automodule:: insights.parsers.libkeyutils - :members: - :show-inheritance: diff --git a/docs/shared_parsers_catalog/libssh_config.rst b/docs/shared_parsers_catalog/libssh_config.rst new file mode 100644 index 0000000000..e67a462201 --- /dev/null +++ b/docs/shared_parsers_catalog/libssh_config.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.libssh_config + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/losetup.rst b/docs/shared_parsers_catalog/losetup.rst new file mode 100644 index 0000000000..7bcc34088b --- /dev/null +++ b/docs/shared_parsers_catalog/losetup.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.losetup + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/ls_ipa_idoverride_memberof.rst b/docs/shared_parsers_catalog/ls_ipa_idoverride_memberof.rst new file mode 100644 index 0000000000..c14ae30f66 --- /dev/null +++ b/docs/shared_parsers_catalog/ls_ipa_idoverride_memberof.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.ls_ipa_idoverride_memberof + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/ls_krb5_sssd.rst b/docs/shared_parsers_catalog/ls_krb5_sssd.rst new file mode 100644 index 0000000000..4a6bf97719 --- /dev/null +++ b/docs/shared_parsers_catalog/ls_krb5_sssd.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.ls_krb5_sssd + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/ls_rsyslog_errorfile.rst b/docs/shared_parsers_catalog/ls_rsyslog_errorfile.rst new file mode 100644 index 0000000000..3828c64642 --- /dev/null +++ b/docs/shared_parsers_catalog/ls_rsyslog_errorfile.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.ls_rsyslog_errorfile + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/ls_run_systemd_generator.rst b/docs/shared_parsers_catalog/ls_run_systemd_generator.rst deleted file mode 100644 index bf246301fa..0000000000 --- a/docs/shared_parsers_catalog/ls_run_systemd_generator.rst +++ /dev/null @@ -1,3 +0,0 @@ -.. automodule:: insights.parsers.ls_run_systemd_generator - :members: - :show-inheritance: diff --git a/docs/shared_parsers_catalog/ls_systemd_units.rst b/docs/shared_parsers_catalog/ls_systemd_units.rst new file mode 100644 index 0000000000..0f589714a6 --- /dev/null +++ b/docs/shared_parsers_catalog/ls_systemd_units.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.ls_systemd_units + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/ls_tmp.rst b/docs/shared_parsers_catalog/ls_tmp.rst new file mode 100644 index 0000000000..8f597e2a87 --- /dev/null +++ b/docs/shared_parsers_catalog/ls_tmp.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.ls_tmp + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/ls_usr_bin.rst b/docs/shared_parsers_catalog/ls_usr_bin.rst new file mode 100644 index 0000000000..8f07c71d95 --- /dev/null +++ b/docs/shared_parsers_catalog/ls_usr_bin.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.ls_usr_bin + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/ls_var_cache_pulp.rst b/docs/shared_parsers_catalog/ls_var_cache_pulp.rst new file mode 100644 index 0000000000..921ad97b06 --- /dev/null +++ b/docs/shared_parsers_catalog/ls_var_cache_pulp.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.ls_var_cache_pulp + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/ls_var_lib_pcp.rst b/docs/shared_parsers_catalog/ls_var_lib_pcp.rst new file mode 100644 index 0000000000..642737af3a --- /dev/null +++ b/docs/shared_parsers_catalog/ls_var_lib_pcp.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.ls_var_lib_pcp + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/ls_var_lib_rpm.rst b/docs/shared_parsers_catalog/ls_var_lib_rpm.rst new file mode 100644 index 0000000000..744458be67 --- /dev/null +++ b/docs/shared_parsers_catalog/ls_var_lib_rpm.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.ls_var_lib_rpm + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/ls_var_lib_rsyslog.rst b/docs/shared_parsers_catalog/ls_var_lib_rsyslog.rst new file mode 100644 index 0000000000..abad60b550 --- /dev/null +++ b/docs/shared_parsers_catalog/ls_var_lib_rsyslog.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.ls_var_lib_rsyslog + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/ls_var_www_perms.rst b/docs/shared_parsers_catalog/ls_var_www_perms.rst new file mode 100644 index 0000000000..5496130a56 --- /dev/null +++ b/docs/shared_parsers_catalog/ls_var_www_perms.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.ls_var_www_perms + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/luksmeta.rst b/docs/shared_parsers_catalog/luksmeta.rst new file mode 100644 index 0000000000..c1b1ddfe3c --- /dev/null +++ b/docs/shared_parsers_catalog/luksmeta.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.luksmeta + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/machine_id.rst b/docs/shared_parsers_catalog/machine_id.rst new file mode 100644 index 0000000000..21812e6a9b --- /dev/null +++ b/docs/shared_parsers_catalog/machine_id.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.machine_id + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/mdadm.rst b/docs/shared_parsers_catalog/mdadm.rst new file mode 100644 index 0000000000..cfc5ef9eaf --- /dev/null +++ b/docs/shared_parsers_catalog/mdadm.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.mdadm + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/mokutil_sbstate.rst b/docs/shared_parsers_catalog/mokutil_sbstate.rst new file mode 100644 index 0000000000..52e8371145 --- /dev/null +++ b/docs/shared_parsers_catalog/mokutil_sbstate.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.mokutil_sbstate + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/mpirun.rst b/docs/shared_parsers_catalog/mpirun.rst new file mode 100644 index 0000000000..761f4f0216 --- /dev/null +++ b/docs/shared_parsers_catalog/mpirun.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.mpirun + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/mssql_api_assessment.rst b/docs/shared_parsers_catalog/mssql_api_assessment.rst new file mode 100644 index 0000000000..f731e0d4e0 --- /dev/null +++ b/docs/shared_parsers_catalog/mssql_api_assessment.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.mssql_api_assessment + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/named_checkconf.rst b/docs/shared_parsers_catalog/named_checkconf.rst new file mode 100644 index 0000000000..9a6710eb86 --- /dev/null +++ b/docs/shared_parsers_catalog/named_checkconf.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.named_checkconf + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/named_conf.rst b/docs/shared_parsers_catalog/named_conf.rst new file mode 100644 index 0000000000..9db1477365 --- /dev/null +++ b/docs/shared_parsers_catalog/named_conf.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.named_conf + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/ndctl_list.rst b/docs/shared_parsers_catalog/ndctl_list.rst new file mode 100644 index 0000000000..0545cbde96 --- /dev/null +++ b/docs/shared_parsers_catalog/ndctl_list.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.ndctl_list + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/netconsole.rst b/docs/shared_parsers_catalog/netconsole.rst deleted file mode 100644 index 463cb9016e..0000000000 --- a/docs/shared_parsers_catalog/netconsole.rst +++ /dev/null @@ -1,3 +0,0 @@ -.. automodule:: insights.parsers.netconsole - :members: - :show-inheritance: diff --git a/docs/shared_parsers_catalog/networkmanager_config.rst b/docs/shared_parsers_catalog/networkmanager_config.rst new file mode 100644 index 0000000000..cd85d4908d --- /dev/null +++ b/docs/shared_parsers_catalog/networkmanager_config.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.networkmanager_config + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/networkmanager_dhclient.rst b/docs/shared_parsers_catalog/networkmanager_dhclient.rst new file mode 100644 index 0000000000..37e745b567 --- /dev/null +++ b/docs/shared_parsers_catalog/networkmanager_dhclient.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.networkmanager_dhclient + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/neutron_sriov_agent.rst b/docs/shared_parsers_catalog/neutron_sriov_agent.rst new file mode 100644 index 0000000000..bb2de76e8f --- /dev/null +++ b/docs/shared_parsers_catalog/neutron_sriov_agent.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.neutron_sriov_agent + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/nfs_conf.rst b/docs/shared_parsers_catalog/nfs_conf.rst new file mode 100644 index 0000000000..552f6deff9 --- /dev/null +++ b/docs/shared_parsers_catalog/nfs_conf.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.nfs_conf + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/nginx_log.rst b/docs/shared_parsers_catalog/nginx_log.rst new file mode 100644 index 0000000000..6d56f5f036 --- /dev/null +++ b/docs/shared_parsers_catalog/nginx_log.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.nginx_log + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/nss_rhel7.rst b/docs/shared_parsers_catalog/nss_rhel7.rst new file mode 100644 index 0000000000..04d179942e --- /dev/null +++ b/docs/shared_parsers_catalog/nss_rhel7.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.nss_rhel7 + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/od_cpu_dma_latency.rst b/docs/shared_parsers_catalog/od_cpu_dma_latency.rst new file mode 100644 index 0000000000..1cd6dba7ec --- /dev/null +++ b/docs/shared_parsers_catalog/od_cpu_dma_latency.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.od_cpu_dma_latency + :members: + :show-inheritance: \ No newline at end of file diff --git a/docs/shared_parsers_catalog/open_vm_tools.rst b/docs/shared_parsers_catalog/open_vm_tools.rst new file mode 100644 index 0000000000..ff951d21f2 --- /dev/null +++ b/docs/shared_parsers_catalog/open_vm_tools.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.open_vm_tools + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/ovs_vsctl.rst b/docs/shared_parsers_catalog/ovs_vsctl.rst new file mode 100644 index 0000000000..2b6cf0ad12 --- /dev/null +++ b/docs/shared_parsers_catalog/ovs_vsctl.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.ovs_vsctl + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/package_provides.rst b/docs/shared_parsers_catalog/package_provides.rst new file mode 100644 index 0000000000..f885b46142 --- /dev/null +++ b/docs/shared_parsers_catalog/package_provides.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.package_provides + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/package_provides_httpd.rst b/docs/shared_parsers_catalog/package_provides_httpd.rst deleted file mode 100644 index 0d07709d27..0000000000 --- a/docs/shared_parsers_catalog/package_provides_httpd.rst +++ /dev/null @@ -1,3 +0,0 @@ -.. automodule:: insights.parsers.package_provides_httpd - :members: - :show-inheritance: diff --git a/docs/shared_parsers_catalog/package_provides_java.rst b/docs/shared_parsers_catalog/package_provides_java.rst deleted file mode 100644 index 3be78e3f68..0000000000 --- a/docs/shared_parsers_catalog/package_provides_java.rst +++ /dev/null @@ -1,3 +0,0 @@ -.. automodule:: insights.parsers.package_provides_java - :members: - :show-inheritance: diff --git a/docs/shared_parsers_catalog/pcp_openmetrics_log.rst b/docs/shared_parsers_catalog/pcp_openmetrics_log.rst new file mode 100644 index 0000000000..c3c1453a4a --- /dev/null +++ b/docs/shared_parsers_catalog/pcp_openmetrics_log.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.pcp_openmetrics_log + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/php_ini.rst b/docs/shared_parsers_catalog/php_ini.rst new file mode 100644 index 0000000000..2e8bf4d82f --- /dev/null +++ b/docs/shared_parsers_catalog/php_ini.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.php_ini + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/pmlog_summary.rst b/docs/shared_parsers_catalog/pmlog_summary.rst new file mode 100644 index 0000000000..f2124d8ba0 --- /dev/null +++ b/docs/shared_parsers_catalog/pmlog_summary.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.pmlog_summary + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/pmrep.rst b/docs/shared_parsers_catalog/pmrep.rst new file mode 100644 index 0000000000..df5b40aa10 --- /dev/null +++ b/docs/shared_parsers_catalog/pmrep.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.pmrep + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/postconf.rst b/docs/shared_parsers_catalog/postconf.rst new file mode 100644 index 0000000000..0607ad58db --- /dev/null +++ b/docs/shared_parsers_catalog/postconf.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.postconf + :members: + :show-inheritance: \ No newline at end of file diff --git a/docs/shared_parsers_catalog/proc_keys.rst b/docs/shared_parsers_catalog/proc_keys.rst new file mode 100644 index 0000000000..570dc0733d --- /dev/null +++ b/docs/shared_parsers_catalog/proc_keys.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.proc_keys + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/proc_keyusers.rst b/docs/shared_parsers_catalog/proc_keyusers.rst new file mode 100644 index 0000000000..c1262ed53e --- /dev/null +++ b/docs/shared_parsers_catalog/proc_keyusers.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.proc_keyusers + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/puppet_ca_cert_expire_date.rst b/docs/shared_parsers_catalog/puppet_ca_cert_expire_date.rst new file mode 100644 index 0000000000..31cb372e03 --- /dev/null +++ b/docs/shared_parsers_catalog/puppet_ca_cert_expire_date.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.puppet_ca_cert_expire_date + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/puppetserver_config.rst b/docs/shared_parsers_catalog/puppetserver_config.rst deleted file mode 100644 index 700a31a966..0000000000 --- a/docs/shared_parsers_catalog/puppetserver_config.rst +++ /dev/null @@ -1,3 +0,0 @@ -.. automodule:: insights.parsers.puppetserver_config - :members: - :show-inheritance: diff --git a/docs/shared_parsers_catalog/readlink_openshift_certs.rst b/docs/shared_parsers_catalog/readlink_openshift_certs.rst new file mode 100644 index 0000000000..9d42ecff56 --- /dev/null +++ b/docs/shared_parsers_catalog/readlink_openshift_certs.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.readlink_openshift_certs + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/repquota.rst b/docs/shared_parsers_catalog/repquota.rst new file mode 100644 index 0000000000..d5a5cdf5bd --- /dev/null +++ b/docs/shared_parsers_catalog/repquota.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.repquota + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/rhsm_conf.rst b/docs/shared_parsers_catalog/rhsm_conf.rst new file mode 100644 index 0000000000..2e344968ea --- /dev/null +++ b/docs/shared_parsers_catalog/rhsm_conf.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.rhsm_conf + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/ros_config.rst b/docs/shared_parsers_catalog/ros_config.rst new file mode 100644 index 0000000000..b841e6a26d --- /dev/null +++ b/docs/shared_parsers_catalog/ros_config.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.ros_config + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/rpm_ostree_status.rst b/docs/shared_parsers_catalog/rpm_ostree_status.rst new file mode 100644 index 0000000000..def269695d --- /dev/null +++ b/docs/shared_parsers_catalog/rpm_ostree_status.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.rpm_ostree_status + :members: + :show-inheritance: \ No newline at end of file diff --git a/docs/shared_parsers_catalog/rpm_pkgs.rst b/docs/shared_parsers_catalog/rpm_pkgs.rst new file mode 100644 index 0000000000..09205b93a4 --- /dev/null +++ b/docs/shared_parsers_catalog/rpm_pkgs.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.rpm_pkgs + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/rpm_v_packages.rst b/docs/shared_parsers_catalog/rpm_v_packages.rst new file mode 100644 index 0000000000..d38a5b31f2 --- /dev/null +++ b/docs/shared_parsers_catalog/rpm_v_packages.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.rpm_v_packages + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/sap_dev_trace_files.rst b/docs/shared_parsers_catalog/sap_dev_trace_files.rst new file mode 100644 index 0000000000..cebe9f08cf --- /dev/null +++ b/docs/shared_parsers_catalog/sap_dev_trace_files.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.sap_dev_trace_files + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/sap_hana_python_script.rst b/docs/shared_parsers_catalog/sap_hana_python_script.rst new file mode 100644 index 0000000000..80d4ba733e --- /dev/null +++ b/docs/shared_parsers_catalog/sap_hana_python_script.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.sap_hana_python_script + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/satellite_content_hosts_count.rst b/docs/shared_parsers_catalog/satellite_content_hosts_count.rst new file mode 100644 index 0000000000..fbbd1825d6 --- /dev/null +++ b/docs/shared_parsers_catalog/satellite_content_hosts_count.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.satellite_content_hosts_count + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/satellite_missed_queues.rst b/docs/shared_parsers_catalog/satellite_missed_queues.rst new file mode 100644 index 0000000000..74992bcb41 --- /dev/null +++ b/docs/shared_parsers_catalog/satellite_missed_queues.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.satellite_missed_queues + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/satellite_postgresql_query.rst b/docs/shared_parsers_catalog/satellite_postgresql_query.rst new file mode 100644 index 0000000000..8a8c710a19 --- /dev/null +++ b/docs/shared_parsers_catalog/satellite_postgresql_query.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.satellite_postgresql_query + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/satellite_yaml.rst b/docs/shared_parsers_catalog/satellite_yaml.rst new file mode 100644 index 0000000000..4f631765c5 --- /dev/null +++ b/docs/shared_parsers_catalog/satellite_yaml.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.satellite_yaml + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/scheduler.rst b/docs/shared_parsers_catalog/scheduler.rst new file mode 100644 index 0000000000..406c2d5f86 --- /dev/null +++ b/docs/shared_parsers_catalog/scheduler.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.scheduler + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/semanage.rst b/docs/shared_parsers_catalog/semanage.rst new file mode 100644 index 0000000000..00fed833b5 --- /dev/null +++ b/docs/shared_parsers_catalog/semanage.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.semanage + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/sendq_recvq_socket_buffer.rst b/docs/shared_parsers_catalog/sendq_recvq_socket_buffer.rst new file mode 100644 index 0000000000..a64c33cc09 --- /dev/null +++ b/docs/shared_parsers_catalog/sendq_recvq_socket_buffer.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.sendq_recvq_socket_buffer + :members: + :show-inheritance: \ No newline at end of file diff --git a/docs/shared_parsers_catalog/sos_conf.rst b/docs/shared_parsers_catalog/sos_conf.rst new file mode 100644 index 0000000000..360e74b180 --- /dev/null +++ b/docs/shared_parsers_catalog/sos_conf.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.sos_conf + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/spamassassin_channels.rst b/docs/shared_parsers_catalog/spamassassin_channels.rst new file mode 100644 index 0000000000..3b579b2af0 --- /dev/null +++ b/docs/shared_parsers_catalog/spamassassin_channels.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.spamassassin_channels + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/ssl_certificate.rst b/docs/shared_parsers_catalog/ssl_certificate.rst new file mode 100644 index 0000000000..aa27fe9574 --- /dev/null +++ b/docs/shared_parsers_catalog/ssl_certificate.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.ssl_certificate + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/subscription_manager.rst b/docs/shared_parsers_catalog/subscription_manager.rst new file mode 100644 index 0000000000..6428343a3e --- /dev/null +++ b/docs/shared_parsers_catalog/subscription_manager.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.subscription_manager + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/sudoers.rst b/docs/shared_parsers_catalog/sudoers.rst new file mode 100644 index 0000000000..1f5d13f4f8 --- /dev/null +++ b/docs/shared_parsers_catalog/sudoers.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.sudoers + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/sys_fs_cgroup_memory.rst b/docs/shared_parsers_catalog/sys_fs_cgroup_memory.rst new file mode 100644 index 0000000000..39cb3c5781 --- /dev/null +++ b/docs/shared_parsers_catalog/sys_fs_cgroup_memory.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.sys_fs_cgroup_memory + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/sys_fs_cgroup_memory_tasks_number.rst b/docs/shared_parsers_catalog/sys_fs_cgroup_memory_tasks_number.rst new file mode 100644 index 0000000000..b69ebd34a1 --- /dev/null +++ b/docs/shared_parsers_catalog/sys_fs_cgroup_memory_tasks_number.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.sys_fs_cgroup_memory_tasks_number + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/sys_kernel.rst b/docs/shared_parsers_catalog/sys_kernel.rst new file mode 100644 index 0000000000..09937a841c --- /dev/null +++ b/docs/shared_parsers_catalog/sys_kernel.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.sys_kernel + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/sys_module.rst b/docs/shared_parsers_catalog/sys_module.rst new file mode 100644 index 0000000000..d24fd608b8 --- /dev/null +++ b/docs/shared_parsers_catalog/sys_module.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.sys_module + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/sys_vmbus.rst b/docs/shared_parsers_catalog/sys_vmbus.rst new file mode 100644 index 0000000000..92daeafce6 --- /dev/null +++ b/docs/shared_parsers_catalog/sys_vmbus.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.sys_vmbus + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/systemctl_status_all.rst b/docs/shared_parsers_catalog/systemctl_status_all.rst new file mode 100644 index 0000000000..e9f62dcf15 --- /dev/null +++ b/docs/shared_parsers_catalog/systemctl_status_all.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.systemctl_status_all + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/systemd_analyze.rst b/docs/shared_parsers_catalog/systemd_analyze.rst new file mode 100644 index 0000000000..5869e1be3e --- /dev/null +++ b/docs/shared_parsers_catalog/systemd_analyze.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.systemd_analyze + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/upstart.rst b/docs/shared_parsers_catalog/upstart.rst new file mode 100644 index 0000000000..07a7de5ae1 --- /dev/null +++ b/docs/shared_parsers_catalog/upstart.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.upstart + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/user_group.rst b/docs/shared_parsers_catalog/user_group.rst new file mode 100644 index 0000000000..a4ea9379b8 --- /dev/null +++ b/docs/shared_parsers_catalog/user_group.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.user_group + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/version_info.rst b/docs/shared_parsers_catalog/version_info.rst new file mode 100644 index 0000000000..2410a98eb7 --- /dev/null +++ b/docs/shared_parsers_catalog/version_info.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.version_info + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/watchdog_logs.rst b/docs/shared_parsers_catalog/watchdog_logs.rst new file mode 100644 index 0000000000..df21036e83 --- /dev/null +++ b/docs/shared_parsers_catalog/watchdog_logs.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.watchdog_logs + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/wc_proc_1_mountinfo.rst b/docs/shared_parsers_catalog/wc_proc_1_mountinfo.rst new file mode 100644 index 0000000000..53b8563b23 --- /dev/null +++ b/docs/shared_parsers_catalog/wc_proc_1_mountinfo.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.wc_proc_1_mountinfo + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/yum_list.rst b/docs/shared_parsers_catalog/yum_list.rst new file mode 100644 index 0000000000..aceecdd6a0 --- /dev/null +++ b/docs/shared_parsers_catalog/yum_list.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.yum_list + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/yum_list_installed.rst b/docs/shared_parsers_catalog/yum_list_installed.rst deleted file mode 100644 index 8d5725ac66..0000000000 --- a/docs/shared_parsers_catalog/yum_list_installed.rst +++ /dev/null @@ -1,3 +0,0 @@ -.. automodule:: insights.parsers.yum_list_installed - :members: - :show-inheritance: diff --git a/docs/shared_parsers_catalog/yum_updateinfo.rst b/docs/shared_parsers_catalog/yum_updateinfo.rst new file mode 100644 index 0000000000..d5357f89e7 --- /dev/null +++ b/docs/shared_parsers_catalog/yum_updateinfo.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.yum_updateinfo + :members: + :show-inheritance: diff --git a/docs/shared_parsers_catalog/yum_updates.rst b/docs/shared_parsers_catalog/yum_updates.rst new file mode 100644 index 0000000000..506382b20b --- /dev/null +++ b/docs/shared_parsers_catalog/yum_updates.rst @@ -0,0 +1,3 @@ +.. automodule:: insights.parsers.yum_updates + :members: + :show-inheritance: diff --git a/docs/shell.rst b/docs/shell.rst new file mode 100644 index 0000000000..065fe0dccf --- /dev/null +++ b/docs/shell.rst @@ -0,0 +1,11 @@ +Insights Shell +============== +The insights shell is an ipython-based interactive environment for exploring +insights components. You can run it against your local system or different +kinds of packaged data like insights archives, sosreports, JBoss Diagnostic +Reports, must-gather archives, and more. See ``insights shell -h`` for +details. + +.. automodule:: insights.shell + :members: + :show-inheritance: diff --git a/examples/rules/skip_component.py b/examples/rules/skip_component.py index 86056d2c8f..2bcd3eb467 100755 --- a/examples/rules/skip_component.py +++ b/examples/rules/skip_component.py @@ -19,13 +19,14 @@ from __future__ import print_function from collections import namedtuple -from insights import get_active_lines, parser, Parser -from insights import make_fail, make_pass, rule, run -from insights.core.spec_factory import SpecSet, simple_file +from insights import run from insights.combiners.redhat_release import RedHatRelease -from insights.core.plugins import component -from insights.core.dr import SkipComponent from insights.components.rhel_version import IsRhel6, IsRhel7 +from insights.core import Parser +from insights.core.exceptions import SkipComponent +from insights.core.plugins import component, make_fail, make_pass, parser, rule +from insights.core.spec_factory import SpecSet, simple_file +from insights.parsers import get_active_lines # Error key used in make_fail ERROR_KEY = "TOO_MANY_HOSTS" diff --git a/insights-completion.bash b/insights-completion.bash new file mode 100644 index 0000000000..9ac6708a17 --- /dev/null +++ b/insights-completion.bash @@ -0,0 +1 @@ +complete -o default -W "cat collect inspect info ocpshell shell run version" insights diff --git a/insights-core.spec b/insights-core.spec new file mode 100644 index 0000000000..35be779a6a --- /dev/null +++ b/insights-core.spec @@ -0,0 +1,53 @@ +Name: insights-core +Version: 3.0.8 +Release: 1%{?dist} +Summary: Insights Core is a data collection and analysis framework. + +License: ASL 2.0 +URL: https://github.com/RedHatInsights/insights-core +Source0: %{name}-%{version}.tar.gz + +BuildArch: noarch +BuildRequires: python3-devel +BuildRequires: python3-setuptools + +Requires: python3 +Requires: python3-redis + +%if 0%{?rhel} == 7 +Requires: python36-CacheControl +Requires: python36-colorama +Requires: python36-defusedxml +Requires: python36-jinja2 +Requires: python36-lockfile +Requires: python36-PyYAML +Requires: python36-requests +Requires: python36-six +%else +Requires: python3-CacheControl +Requires: python3-colorama +Requires: python3-defusedxml +Requires: python3-jinja2 +Requires: python3-lockfile +Requires: python3-pyyaml +Requires: python3-requests +Requires: python3-six +%endif + +%description +Insights Core is a data collection and analysis framework. + +%prep +%setup -q -n %{name}-%{version} + +%install +rm -rf $RPM_BUILD_ROOT +%{__python3} setup.py install -O1 --root $RPM_BUILD_ROOT +rm -rf $RPM_BUILD_ROOT/usr/bin + +%files +# For noarch packages: sitelib +%{python3_sitelib}/* + +%changelog + diff --git a/insights/__init__.py b/insights/__init__.py index 0d2500ba84..58910f7e8b 100644 --- a/insights/__init__.py +++ b/insights/__init__.py @@ -17,36 +17,32 @@ True """ from __future__ import print_function + import logging -import pkgutil import os +import pkgutil import sys import yaml -from collections import defaultdict -from .core import Scannable, LogFileOutput, Parser, IniConfigFile # noqa: F401 -from .core import FileListing, LegacyItemAccess, SysconfigOptions # noqa: F401 -from .core import YAMLParser, JSONParser, XMLParser, CommandParser # noqa: F401 -from .core import AttributeDict # noqa: F401 -from .core import Syslog # noqa: F401 -from .core import taglang -from .core.archives import COMPRESSION_TYPES, extract, InvalidArchive, InvalidContentType # noqa: F401 -from .core import dr # noqa: F401 -from .core.context import ClusterArchiveContext, HostContext, HostArchiveContext, SerializedArchiveContext, ExecutionContext # noqa: F401 -from .core.dr import SkipComponent # noqa: F401 -from .core.hydration import create_context -from .core.plugins import combiner, fact, metadata, parser, rule # noqa: F401 -from .core.plugins import datasource, condition, incident # noqa: F401 -from .core.plugins import make_response, make_metadata, make_fingerprint # noqa: F401 -from .core.plugins import make_pass, make_fail, make_info # noqa: F401 -from .core.filters import add_filter, apply_filters, get_filters # noqa: F401 -from .core.serde import Hydration -from .formats import get_formatter -from .parsers import get_active_lines # noqa: F401 -from .util import defaults # noqa: F401 -from .formats import Formatter as FormatterClass - -from .core.spec_factory import RawFileProvider, TextFileProvider +from collections import defaultdict +from contextlib import contextmanager + +from insights.core import (CommandParser, ContainerParser, FileListing, IniConfigFile, JSONParser, LegacyItemAccess, # noqa: F401 + LogFileOutput, Parser, Scannable, SysconfigOptions, Syslog, XMLParser, YAMLParser, dr, # noqa: F401 + taglang) +from insights.core.archives import COMPRESSION_TYPES, extract +from insights.core.context import (ClusterArchiveContext, ExecutionContext, HostContext, # noqa: F401 + HostArchiveContext, SerializedArchiveContext) +from insights.core.exceptions import InvalidArchive, InvalidContentType, SkipComponent # noqa: F401 +from insights.core.filters import add_filter, apply_filters, get_filters # noqa: F401 +from insights.core.hydration import create_context, initialize_broker # noqa: F401 +from insights.core.plugins import (combiner, condition, datasource, fact, incident, make_fail, make_fingerprint, # noqa: F401 + make_info, make_metadata, make_none, make_pass, make_response, metadata, + parser, rule) +from insights.core.spec_factory import RawFileProvider, TextFileProvider +from insights.formats import Formatter as FormatterClass, get_formatter +from insights.parsers import get_active_lines # noqa: F401 +from insights.util import defaults # noqa: F401 log = logging.getLogger(__name__) @@ -57,6 +53,8 @@ for name in package_info: package_info[name] = pkgutil.get_data(__name__, name).strip().decode("utf-8") +_COLOR = "auto" + def get_nvr(): return "{0}-{1}-{2}".format(package_info["NAME"], @@ -64,6 +62,24 @@ def get_nvr(): package_info["RELEASE"]) +@contextmanager +def get_pool(parallel, prefix, kwargs): + """ + Yields: + a ThreadPoolExecutor if parallel is True and `concurrent.futures` exists. + `None` otherwise. + """ + if parallel: + try: + from concurrent.futures import ThreadPoolExecutor + with ThreadPoolExecutor(thread_name_prefix=prefix, **kwargs) as pool: + yield pool + except ImportError: + yield None + else: + yield None + + RULES_STATUS = {} """ Mapping of dictionaries containing nvr and commitid for each rule repo included @@ -73,7 +89,7 @@ def get_nvr(): """ -def add_status(name, nvr, commit): +def add_status(name, nvr, commit=None): """ Rule repositories should call this method in their package __init__ to register their version information. @@ -81,8 +97,11 @@ def add_status(name, nvr, commit): RULES_STATUS[name] = {"version": nvr, "commit": commit} -def process_dir(broker, root, graph, context, inventory=None): - ctx = create_context(root, context) +add_status(package_info["NAME"], get_nvr(), package_info["COMMIT"]) + + +def process_dir(broker, root, graph, context, inventory=None, parallel=False): + ctx, broker = initialize_broker(root, context=context, broker=broker) log.debug("Processing %s with %s" % (root, ctx)) if isinstance(ctx, ClusterArchiveContext): @@ -90,52 +109,63 @@ def process_dir(broker, root, graph, context, inventory=None): archives = [f for f in ctx.all_files if f.endswith(COMPRESSION_TYPES)] return process_cluster(graph, archives, broker=broker, inventory=inventory) - broker[ctx.__class__] = ctx - if isinstance(ctx, SerializedArchiveContext): - h = Hydration(ctx.root) - broker = h.hydrate(broker=broker) graph = dict((k, v) for k, v in graph.items() if k in dr.COMPONENTS[dr.GROUPS.single]) - broker = dr.run(graph, broker=broker) + if parallel: + with get_pool(parallel, "insights-run-pool", {"max_workers": None}) as pool: + broker = dr.run_all(graph, broker, pool) + else: + broker = dr.run(graph, broker=broker) return broker -def _run(broker, graph=None, root=None, context=None, inventory=None): +def _run(broker, graph=None, root=None, context=None, inventory=None, parallel=False): """ - run is a general interface that is meant for stand alone scripts to use + run is a general interface that is meant for stand-alone scripts to use when executing insights components. Args: - root (str): None will causes a host collection in which command and + broker (Broker): Optionally pass a broker to use for evaluation. One is + created by default, but it's often useful to seed a broker with an + initial dependency. + graph (function or class): The component to execute. Will only execute + the component and its dependency graph. If None, all components with + met dependencies will execute. + root (str): None will cause a host collection in which command and file specs are run. A directory or archive path will cause collection from the directory or archive, and only file type specs or those that depend on `insights.core.context.HostArchiveContext` will execute. - component (function or class): The component to execute. Will only execute - the component and its dependency graph. If None, all components with - met dependencies will execute. + context (obj): The execution context that's set. + inventory (str): Path to inventory file. + parallel (bool): Boolean as to weather to use parallel execution or not. Returns: broker: object containing the result of the evaluation. """ - if not root: context = context or HostContext broker[context] = context() graph = dict((k, v) for k, v in graph.items() if k in dr.COMPONENTS[dr.GROUPS.single]) - return dr.run(graph, broker=broker) + if parallel: + with get_pool(parallel, "insights-run-pool", {"max_workers": None}) as pool: + dr.run_all(graph, broker, pool) + else: + return dr.run(graph, broker=broker) if os.path.isdir(root): - return process_dir(broker, root, graph, context, inventory=inventory) + return process_dir(broker, root, graph, context, inventory=inventory, parallel=parallel) else: with extract(root) as ex: - return process_dir(broker, ex.tmp_dir, graph, context, inventory=inventory) + return process_dir(broker, ex.tmp_dir, graph, context, inventory=inventory, parallel=parallel) def load_default_plugins(): dr.load_components("insights.specs.default") dr.load_components("insights.specs.insights_archive") + dr.load_components("insights.specs.core3_archive") dr.load_components("insights.specs.sos_archive") dr.load_components("insights.specs.jdr_archive") + dr.load_components("insights.specs.must_gather_archive") def load_packages(packages): @@ -236,33 +266,35 @@ def _load_context(path): return dr.get_component(path) -def run(component=None, root=None, print_summary=False, - context=None, inventory=None, print_component=None): - - load_default_plugins() - +def run(component=None, root=None, print_summary=False, context=None, inventory=None, print_component=None, + store_skips=False): args = None - formatter = None formatters = None + if print_summary: import argparse import logging p = argparse.ArgumentParser(add_help=False) p.add_argument("archive", nargs="?", help="Archive or directory to analyze.") - p.add_argument("-p", "--plugins", default="", - help="Comma-separated list without spaces of package(s) or module(s) containing plugins.") p.add_argument("-b", "--bare", - help='Specify "spec=filename[,spec=filename,...]" to use the bare file for the spec', - default="") + help='Specify "spec=filename[,spec=filename,...]" to use the bare file for the spec', default="") p.add_argument("-c", "--config", help="Configure components.") + p.add_argument("-f", "--format", help="Output format.", default="insights.formats.text") p.add_argument("-i", "--inventory", help="Ansible inventory file for cluster analysis.") p.add_argument("-k", "--pkg-query", help="Expression to select rules by package.") - p.add_argument("-v", "--verbose", help="Verbose output.", action="store_true") - p.add_argument("-f", "--format", help="Output format.", default="insights.formats.text") + p.add_argument("-p", "--plugins", default="", + help="Comma-separated list without spaces of package(s) or module(s) containing plugins.") p.add_argument("-s", "--syslog", help="Log results to syslog.", action="store_true") - p.add_argument("--tags", help="Expression to select rules by tag.") + p.add_argument("-v", "--verbose", help="Verbose output.", action="store_true") p.add_argument("-D", "--debug", help="Verbose debug output.", action="store_true") + p.add_argument("--color", default="auto", choices=["always", "auto", "never"], metavar="[=WHEN]", + help="Choose if and how the color encoding is outputted. When is 'always', 'auto', or 'never'.") p.add_argument("--context", help="Execution Context. Defaults to HostContext if an archive isn't passed.") + p.add_argument("--no-load-default", help="Don't load the default plugins.", action="store_true") + p.add_argument("--parallel", help="Execute rules in parallel.", action="store_true") + p.add_argument("--show-skips", help="Capture skips in the broker for troubleshooting.", action="store_true", + default=False) + p.add_argument("--tags", help="Expression to select rules by tag.") class Args(object): pass @@ -271,13 +303,22 @@ class Args(object): args = Args() p.parse_known_args(namespace=args) p = argparse.ArgumentParser(parents=[p]) + + if not args.no_load_default: + load_default_plugins() + + global _COLOR + _COLOR = args.color + args.format = "insights.formats._json" if args.format == "json" else args.format args.format = "insights.formats._yaml" if args.format == "yaml" else args.format fmt = args.format if "." in args.format else "insights.formats." + args.format + Formatter = dr.get_component(fmt) if not Formatter or not isinstance(Formatter, FormatterClass): dr.load_components(fmt, continue_on_error=False) Formatter = get_formatter(fmt) + Formatter.configure(p) p.parse_args(namespace=args) formatter = Formatter(args) @@ -345,6 +386,10 @@ class Args(object): graph = dr.COMPONENTS[dr.GROUPS.single] broker = dr.Broker() + if args: + broker.store_skips = args.show_skips + else: + broker.store_skips = store_skips if args and args.bare: ctx = ExecutionContext() # dummy context that no spec depend on. needed for filters to work @@ -360,23 +405,32 @@ class Args(object): for formatter in formatters: formatter.preprocess(broker) - if args and args.bare: - broker = dr.run(graph, broker=broker) + if args: + if args.bare: + broker = dr.run(graph, broker=broker) + else: + broker = _run(broker, graph, root, context=context, inventory=inventory, parallel=args.parallel) else: broker = _run(broker, graph, root, context=context, inventory=inventory) for formatter in formatters: formatter.postprocess(broker) elif print_component: - if args and args.bare: - broker = dr.run(graph, broker=broker) + if args: + if args.bare: + broker = dr.run(graph, broker=broker) + else: + broker = _run(broker, graph, root, context=context, inventory=inventory, parallel=args.parallel) else: broker = _run(broker, graph, root, context=context, inventory=inventory) broker.print_component(print_component) else: - if args and args.bare: - broker = dr.run(graph, broker=broker) + if args: + if args.bare: + broker = dr.run(graph, broker=broker) + else: + broker = _run(broker, graph, root, context=context, inventory=inventory, parallel=args.parallel) else: broker = _run(broker, graph, root, context=context, inventory=inventory) diff --git a/insights/client/README.md b/insights/client/README.md index 1165dd08b7..86a2762ea5 100644 --- a/insights/client/README.md +++ b/insights/client/README.md @@ -1,22 +1,7 @@ -## Developer Setup -Instructions are for RHSM-subscribed machines only. -1. Clone this repo and https://github.com/RedHatInsights/insights-client to the same directory. +## Insights Client (Core) Developer Notes -``` -$ git clone git@github.com:RedHatInsights/insights-client.git -$ git clone git@github.com:RedHatInsights/insights-core.git -``` -2. Build the egg and install the client. +* ### **See https://github.com/RedHatInsights/insights-client for build and usage instructions, and details about configuration and runtime.** -``` -$ cd insights-client -$ sh lay-the-eggs.sh -``` +* To rebuild the egg from source, run `./build_client_egg.sh` from the repo root. This will generate a file `insights.zip` that you can pass to `insights-client` with the `EGG` environment variable. -3. Run the client with the following options to disable GPG since this egg is unsigned. - -``` -$ sudo BYPASS_GPG=True EGG=/etc/insights-client/rpm.egg insights-client --no-gpg -``` - -4. Repeat steps 2 & 3 upon making code changes. The majority of the client code lives in this directory, `insights-core/insights/client`. +* The `uploader_json_map.json` file is **NOT** `uploader.json`. Its purpose is to serve as a compatibility layer between denylist configurations for classic collection and core collection. Changes to this file will not affect the commands or files that are collected. It is advised not to make changes to this file as it is copied from the production-ready uploader.json file at release time and not intended to be modified further. diff --git a/insights/client/__init__.py b/insights/client/__init__.py index 27fa277bbd..fd62dd9aa2 100644 --- a/insights/client/__init__.py +++ b/insights/client/__init__.py @@ -6,27 +6,29 @@ import shlex import shutil import sys +import atexit from subprocess import Popen, PIPE +from requests import ConnectionError from .. import package_info from . import client from .constants import InsightsConstants as constants from .config import InsightsConfig from .auto_config import try_auto_configuration -from .utilities import (delete_registered_file, - delete_unregistered_file, +from .utilities import (write_data_to_file, write_to_disk, - generate_machine_id, get_tags, - write_tags) + write_tags, + migrate_tags, + get_parent_process) +NETWORK = constants.custom_network_log_level logger = logging.getLogger(__name__) -net_logger = logging.getLogger("network") class InsightsClient(object): - def __init__(self, config=None, setup_logging=True, **kwargs): + def __init__(self, config=None, from_phase=True, **kwargs): """ The Insights client interface """ @@ -46,32 +48,24 @@ def __init__(self, config=None, setup_logging=True, **kwargs): sys.exit(constants.sig_kill_bad) # END hack. in the future, just set self.config=config - # setup_logging is True when called from phase, but not from wrapper. - # use this to do any common init (like auto_config) - if setup_logging: + if from_phase: + _init_client_config_dirs() self.set_up_logging() try_auto_configuration(self.config) - else: - # write PID to file in case we need to ping systemd - write_to_disk(constants.pidfile, content=str(os.getpid())) + self.initialize_tags() + else: # from wrapper + _write_pid_files() + # setup insights connection placeholder # used for requests - self.session = None self.connection = None - - if self.config.group: - tags = get_tags() - if tags is None: - tags = {} - tags["group"] = self.config.group - write_tags(tags) + self.tmpdir = None def _net(func): def _init_connection(self, *args, **kwargs): # setup a request session - if not self.config.offline and not self.session: + if not self.config.offline and not self.connection: self.connection = client.get_connection(self.config) - self.session = self.connection.session return func(self, *args, **kwargs) return _init_connection @@ -99,30 +93,59 @@ def branch_info(self): """ return client.get_branch_info(self.config, self.connection) + @_net + def get_egg_url(self): + """ + Get the proper url based on the configured egg release branch + """ + if self.config.legacy_upload: + url = self.connection.base_url + '/platform' + constants.module_router_path + else: + url = self.connection.base_url + constants.module_router_path + try: + response = self.connection.get(url) + if response.status_code == 200: + return response.json()["url"] + else: + raise ConnectionError("%s: %s" % (response.status_code, response.reason)) + except ConnectionError as e: + logger.warning("Unable to fetch egg url %s: %s. Defaulting to /release", url, str(e)) + return '/release' + def fetch(self, force=False): """ returns (dict): {'core': path to new egg, None if no update, 'gpg_sig': path to new sig, None if no update} """ - tmpdir = tempfile.mkdtemp() + self.tmpdir = tempfile.mkdtemp() + atexit.register(self.delete_tmpdir) fetch_results = { - 'core': os.path.join(tmpdir, 'insights-core.egg'), - 'gpg_sig': os.path.join(tmpdir, 'insights-core.egg.asc') + 'core': os.path.join(self.tmpdir, 'insights-core.egg'), + 'gpg_sig': os.path.join(self.tmpdir, 'insights-core.egg.asc') } logger.debug("Beginning core fetch.") # guess the URLs based on what legacy setting is + egg_release = self.get_egg_url() + + try: + # write the release path to temp so we can collect it + # in the archive + write_data_to_file(egg_release, constants.egg_release_file) + except (OSError, IOError) as e: + logger.debug('Could not write egg release file: %s', str(e)) + egg_url = self.config.egg_path egg_gpg_url = self.config.egg_gpg_path if egg_url is None: - egg_url = '/v1/static/core/insights-core.egg' + egg_url = '/v1/static{0}/insights-core.egg'.format(egg_release) # if self.config.legacy_upload: # egg_url = '/v1/static/core/insights-core.egg' # else: # egg_url = '/static/insights-core.egg' if egg_gpg_url is None: - egg_gpg_url = '/v1/static/core/insights-core.egg.asc' + egg_gpg_url = '/v1/static{0}/insights-core.egg.asc'.format(egg_release) # if self.config.legacy_upload: # egg_gpg_url = '/v1/static/core/insights-core.egg.asc' # else: @@ -174,14 +197,19 @@ def _fetch(self, path, etag_file, target_path, force): # If the etag was found and we are not force fetching # Then add it to the request - net_logger.info("GET %s", url) - if current_etag and not force: - logger.debug('Requesting new file with etag %s', current_etag) - etag_headers = {'If-None-Match': current_etag} - response = self.session.get(url, headers=etag_headers, timeout=self.config.http_timeout) - else: - logger.debug('Found no etag or forcing fetch') - response = self.session.get(url, timeout=self.config.http_timeout) + logger.log(NETWORK, "GET %s", url) + try: + if current_etag and not force: + logger.debug('Requesting new file with etag %s', current_etag) + etag_headers = {'If-None-Match': current_etag} + response = self.connection.get(url, headers=etag_headers, log_response_text=False) + else: + logger.debug('Found no etag or forcing fetch') + response = self.connection.get(url, log_response_text=False) + except ConnectionError as e: + logger.error(e) + logger.error('The Insights API could not be reached.') + return False # Debug information logger.debug('Status code: %d', response.status_code) @@ -225,6 +253,7 @@ def update(self): return True if self.config.auto_update: + logger.debug("Egg update enabled") # fetch the new eggs and gpg egg_paths = self.fetch() @@ -341,6 +370,11 @@ def install(self, new_egg, new_egg_gpg_sig): logger.debug("The new Insights Core was installed successfully.") return {'success': True} + def delete_tmpdir(self): + if self.tmpdir: + logger.debug("Deleting temp directory %s." % (self.tmpdir)) + shutil.rmtree(self.tmpdir, True) + @_net def update_rules(self): """ @@ -355,7 +389,7 @@ def update_rules(self): @_net def collect(self): # return collection results - tar_file = client.collect(self.config, self.connection) + tar_file = client.collect(self.config) # it is important to note that --to-stdout is utilized via the wrapper RPM # this file is received and then we invoke shutil.copyfileobj @@ -471,6 +505,13 @@ def set_display_name(self, display_name): ''' return self.connection.set_display_name(display_name) + @_net + def set_ansible_host(self, ansible_host): + ''' + returns True on success, False on failure + ''' + return self.connection.set_ansible_host(ansible_host) + @_net def get_diagnosis(self, remediation_id=None): ''' @@ -495,16 +536,6 @@ def delete_cached_branch_info(self): def get_machine_id(self): return client.get_machine_id() - def clear_local_registration(self): - ''' - Deletes dotfiles and machine-id for fresh registration - ''' - delete_registered_file() - delete_unregistered_file() - write_to_disk(constants.machine_id_file, delete=True) - logger.debug('Re-register set, forcing registration.') - logger.debug('New machine-id: %s', generate_machine_id(new=True)) - @_net def check_results(self): content = self.connection.get_advisor_report() @@ -525,6 +556,24 @@ def show_results(self): else: raise e + def show_inventory_deep_link(self): + """ + Show a deep link to this host inventory record + """ + system = self.connection._fetch_system_by_machine_id() + if system: + if len(system) == 1: + try: + id = system[0]["id"] + logger.info("View details about this system on console.redhat.com:") + logger.info( + "https://console.redhat.com/insights/inventory/{0}".format(id) + ) + except Exception as e: + logger.error( + "Error: malformed system record: {0}: {1}".format(system, e) + ) + def _copy_soscleaner_files(self, insights_archive): ''' Helper function to copy the .csv reports generated by SOScleaner @@ -614,6 +663,35 @@ def copy_to_output_file(self, insights_archive): if self.config.obfuscate: self._copy_soscleaner_files(insights_archive) + def initialize_tags(self): + ''' + Initialize the tags file if needed + ''' + # migrate the old file if necessary + migrate_tags() + + # initialize with group if group was specified + if self.config.group: + tags = get_tags() + if tags is None: + tags = {} + tags["group"] = self.config.group + write_tags(tags) + + def list_specs(self): + logger.info("For a full list of insights-core datasources, please refer to https://insights-core.readthedocs.io/en/latest/specs_catalog.html") + logger.info("The items in General Datasources can be selected for omission by adding them to the 'components' section of file-redaction.yaml") + logger.info("When specifying these items in file-redaction.yaml, they must be prefixed with 'insights.specs.default.DefaultSpecs.', i.e. 'insights.specs.default.DefaultSpecs.httpd_V'") + logger.info("This information applies only to Insights Core collection. To use Core collection, set core_collect=True in %s", self.config.conf) + + @_net + def checkin(self): + if self.config.offline: + logger.error('Cannot check-in in offline mode.') + return None + + return self.connection.checkin() + def format_config(config): # Log config except the password @@ -624,3 +702,28 @@ def format_config(config): del config_copy["proxy"] finally: return json.dumps(config_copy, indent=4) + + +def _init_client_config_dirs(): + ''' + Initialize log and lib dirs + TODO: init non-root config dirs + ''' + for d in (constants.log_dir, constants.insights_core_lib_dir): + try: + os.makedirs(d) + except OSError as e: + if e.errno == errno.EEXIST: + # dir exists, this is OK + pass + else: + raise e + + +def _write_pid_files(): + for file, content in ( + (constants.pidfile, str(os.getpid())), # PID in case we need to ping systemd + (constants.ppidfile, get_parent_process()) # PPID so that we can grab the client execution method + ): + write_to_disk(file, content=content) + atexit.register(write_to_disk, file, delete=True) diff --git a/insights/combiners/tests/__init__.py b/insights/client/apps/ansible/__init__.py similarity index 100% rename from insights/combiners/tests/__init__.py rename to insights/client/apps/ansible/__init__.py diff --git a/insights/client/apps/ansible/playbook_verifier/__init__.py b/insights/client/apps/ansible/playbook_verifier/__init__.py new file mode 100644 index 0000000000..adedc14c33 --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/__init__.py @@ -0,0 +1,238 @@ +import os +import six +import copy +import base64 +import tempfile +import pkgutil +import hashlib +import insights.client.apps.ansible +from logging import getLogger +from insights.client.apps.ansible.playbook_verifier.contrib import gnupg +from insights.client.apps.ansible.playbook_verifier.contrib.ruamel_yaml.ruamel import yaml +from insights.client.apps.ansible.playbook_verifier.contrib.ruamel_yaml.ruamel.yaml.comments import CommentedMap, CommentedSeq +from insights.client.apps.ansible.playbook_verifier.contrib.ruamel_yaml.ruamel.yaml.scalarint import ScalarInt +from insights.client.constants import InsightsConstants as constants + +__all__ = ("loadPlaybookYaml", "verify", "PlaybookVerificationError") + +SIGKEY = 'insights_signature' +PUBLIC_KEY_FOLDER = pkgutil.get_data(insights.client.apps.ansible.__name__, 'playbook_verifier/public.gpg') # Update this when we have the key generated +EXCLUDABLE_VARIABLES = ['hosts', 'vars'] + +logger = getLogger(__name__) + +yaml = yaml.YAML(typ='rt') +yaml.indent(mapping=2, sequence=4, offset=2) +yaml.default_flow_style = False +yaml.preserve_quotes = True +yaml.width = 200 + + +class PlaybookVerificationError(Exception): + """ + Exception raised when playbook verification fails + + Attributes: + playbook -- stringified playbook yaml from stdin + message -- explanation of why verification failed + """ + + def __init__(self, message="PLAYBOOK VALIDATION FAILED"): + self.message = message + super(PlaybookVerificationError, self).__init__(self.message) + + def __str__(self): + return self.message + + +def decodeSignature(encodedSignature): + try: + decodedSignature = base64.b64decode(encodedSignature) + return decodedSignature + except: + raise PlaybookVerificationError(message='VERIFICATION FAILED: Error Decoding Signature') + + +def createSnippetHash(snippet): + """ + Function that creates and returns a hash of the snippet given to the function. + output: snippetHash (bytes) + """ + snippetHash = hashlib.sha256() + if six.PY2: + normalizedSnippet = normalizeSnippet(snippet) + serializedSnippet = str(normalizedSnippet).encode("UTF-8") + else: + serializedSnippet = str(snippet).encode("UTF-8") + snippetHash.update(serializedSnippet) + + return snippetHash.digest() + + +def getPublicKey(gpg): + if not PUBLIC_KEY_FOLDER: + raise PlaybookVerificationError(message="PUBLIC KEY IMPORT ERROR: Public key file not found") + + publicKey = PUBLIC_KEY_FOLDER + importResults = gpg.import_keys(publicKey) + if (importResults.count < 1): + raise PlaybookVerificationError(message="PUBLIC KEY NOT IMPORTED: Public key import failed") + + return importResults + + +def excludeDynamicElements(snippet): + if 'insights_signature_exclude' not in snippet['vars']: + raise PlaybookVerificationError(message='EXCLUDE MISSING: the insights_signature_exclude var does not exist.') + + exclusions = snippet['vars']['insights_signature_exclude'].split(',') + + for element in exclusions: + element = element.split('/') + + # remove empty strings + element = [string for string in element if string != ''] + + if (len(element) == 1 and element[0] in EXCLUDABLE_VARIABLES and element[0] in snippet.keys()): + del snippet[element[0]] + elif (len(element) == 2 and element[0] in EXCLUDABLE_VARIABLES): + try: + del snippet[element[0]][element[1]] + except: + raise PlaybookVerificationError(message='INVALID FIELD: the variable {0} defined in insights_signature_exclude does not exist.'.format(element)) + else: + raise PlaybookVerificationError(message='INVALID EXCLUSION: the variable {0} is not a valid exclusion.'.format(element)) + + return snippet + + +def executeVerification(snippet, encodedSignature): + gpg = gnupg.GPG(gnupghome=constants.insights_core_lib_dir) + snippetHash = createSnippetHash(snippet) + decodedSignature = decodeSignature(encodedSignature) + + # load public key + getPublicKey(gpg) + + fd, fn = tempfile.mkstemp() + os.write(fd, decodedSignature) + os.close(fd) + + result = gpg.verify_data(fn, snippetHash) + os.unlink(fn) + + return result, snippetHash + + +def verifyPlaybookSnippet(snippet): + if ('vars' not in snippet.keys()): + raise PlaybookVerificationError(message='VERIFICATION FAILED: Vars field not found') + elif (snippet['vars'] is None): + raise PlaybookVerificationError(message='VERIFICATION FAILED: Empty vars field') + elif (SIGKEY not in snippet['vars']): + raise PlaybookVerificationError(message='VERIFICATION FAILED: Signature not found') + + encodedSignature = snippet['vars'][SIGKEY] + snippetCopy = copy.deepcopy(snippet) + + snippetCopy = excludeDynamicElements(snippetCopy) + + return executeVerification(snippetCopy, encodedSignature) + + +def getRevocationList(): + """ + Load the list of revoked playbook snippet hashes from the egg + + Returns: + dictionary of revocation list entries (name, hash) + """ + try: + # Import revoked list yaml. The yaml is structured as a list of lists, so we can reuse the playbook signing and + # verification code. There will only ever be one list, so we just grab the first element... + revoked_playbooks = yaml.load(pkgutil.get_data('insights', 'revoked_playbooks.yaml'))[0] + + except Exception: + raise PlaybookVerificationError(message='VERIFICATION FAILED: Error loading revocation list') + + # verify the list signature! + verified, snippetHash = verifyPlaybookSnippet(revoked_playbooks) + + if not verified: + raise PlaybookVerificationError(message='VERIFICATION FAILED: Revocation list signature invalid') + + revocationList = revoked_playbooks.get('revoked_playbooks', []) + return revocationList + + +def verify(playbook, skipVerify=False): + """ + Verify the signed playbook. + + Input: unverified playbook (dictionary format) + Output: "verified" playbook (dictionary format) + Error: Playbook Verification failure / Playbook Signature not found. + """ + logger.info('Playbook Verification has started') + + if not skipVerify: + if not playbook: + raise PlaybookVerificationError(message="PLAYBOOK VERIFICATION FAILURE: Playbook is empty") + + revocationList = getRevocationList() + + for snippet in playbook: + verified, snippetHash = verifyPlaybookSnippet(snippet) + + if not verified: + name = snippet.get('name', 'NAME UNAVAILABLE') + raise PlaybookVerificationError(message="SIGNATURE NOT VALID: Template [name: {0}] has invalid signature".format(name)) + + # check if snippetHash is on the revoked list + for revokedItem in revocationList: + if snippetHash == bytearray.fromhex(revokedItem['hash']): + raise PlaybookVerificationError(message="REVOKED PLAYBOOK: Template is on the revoked list [name: {0}]".format(revokedItem['name'])) + + logger.info('All templates successfully validated') + return playbook + + +def loadPlaybookYaml(playbook): + """ + Load playbook yaml using current yaml library implementation + output: playbook yaml + """ + try: + playbookYaml = yaml.load(playbook) + return playbookYaml + except: + raise PlaybookVerificationError(message="PLAYBOOK VERIFICATION FAILURE: Failed to load playbook yaml because yaml is not valid") + + +def normalizeSnippet(snippet): + """ + Normalize python2 snippet and get rid of any default unicode values + output: normalized snippet + """ + new = CommentedMap() + for key, value in snippet.iteritems(): + if isinstance(value, CommentedMap): + new[key] = CommentedMap(normalizeSnippet(value)) + elif isinstance(value, CommentedSeq): + new_sequence = CommentedSeq() + for item in value: + if isinstance(item, six.text_type): + new_sequence.append(item.encode('ascii', 'ignore')) + elif isinstance(item, CommentedMap): + new_sequence.append(normalizeSnippet(item)) + else: + new_sequence.append(item) + new[key] = new_sequence + elif isinstance(value, six.text_type): + new[key] = value.encode('ascii', 'ignore') + elif isinstance(value, ScalarInt): + new[key] = int(value) + else: + new[key] = value + + return new diff --git a/insights/client/apps/ansible/playbook_verifier/__main__.py b/insights/client/apps/ansible/playbook_verifier/__main__.py new file mode 100644 index 0000000000..e76f19833c --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/__main__.py @@ -0,0 +1,31 @@ +import os +import sys +from insights.client.constants import InsightsConstants as constants +from insights.client.apps.ansible.playbook_verifier import verify, loadPlaybookYaml, PlaybookVerificationError + +skipVerify = False + + +def read_playbook(): + """ + Read in the stringified playbook yaml from stdin + """ + unverified_playbook = '' + for line in sys.stdin: + unverified_playbook += line + + return unverified_playbook + + +if (os.environ.get('SKIP_VERIFY')): + skipVerify = True + +try: + playbook = read_playbook() + playbook_yaml = loadPlaybookYaml(playbook) + verified_playbook = verify(playbook_yaml, skipVerify) +except PlaybookVerificationError as err: + sys.stderr.write(err.message) + sys.exit(constants.sig_kill_bad) + +print(playbook) diff --git a/insights/components/tests/__init__.py b/insights/client/apps/ansible/playbook_verifier/contrib/__init__.py similarity index 100% rename from insights/components/tests/__init__.py rename to insights/client/apps/ansible/playbook_verifier/contrib/__init__.py diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/gnupg.py b/insights/client/apps/ansible/playbook_verifier/contrib/gnupg.py new file mode 100644 index 0000000000..671b46ddc2 --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/gnupg.py @@ -0,0 +1,1646 @@ +""" A wrapper for the 'gpg' command:: + +Portions of this module are derived from A.M. Kuchling's well-designed +GPG.py, using Richard Jones' updated version 1.3, which can be found +in the pycrypto CVS repository on Sourceforge: + +http://pycrypto.cvs.sourceforge.net/viewvc/pycrypto/gpg/GPG.py + +This module is *not* forward-compatible with amk's; some of the +old interface has changed. For instance, since I've added decrypt +functionality, I elected to initialize with a 'gnupghome' argument +instead of 'keyring', so that gpg can find both the public and secret +keyrings. I've also altered some of the returned objects in order for +the caller to not have to know as much about the internals of the +result classes. + +While the rest of ISconf is released under the GPL, I am releasing +this single file under the same terms that A.M. Kuchling used for +pycrypto. + +Steve Traugott, stevegt@terraluna.org +Thu Jun 23 21:27:20 PDT 2005 + +This version of the module has been modified from Steve Traugott's version +(see http://trac.t7a.org/isconf/browser/trunk/lib/python/isconf/GPG.py) by +Vinay Sajip to make use of the subprocess module (Steve's version uses os.fork() +and so does not work on Windows). Renamed to gnupg.py to avoid confusion with +the previous versions. + +Modifications Copyright (C) 2008-2019 Vinay Sajip. All rights reserved. + +A unittest harness (test_gnupg.py) has also been added. +""" + +__version__ = "0.4.6" +__author__ = "Vinay Sajip" +__date__ = "$17-Apr-2020 09:35:35$" + +try: + from io import StringIO +except ImportError: # pragma: no cover + from cStringIO import StringIO + +import codecs +import locale +import logging +import os +import re +import socket +from subprocess import Popen +from subprocess import PIPE +import sys +import threading + +STARTUPINFO = None +if os.name == 'nt': # pragma: no cover + try: + from subprocess import STARTUPINFO, STARTF_USESHOWWINDOW, SW_HIDE + except ImportError: + STARTUPINFO = None + +try: + import logging.NullHandler as NullHandler +except ImportError: + class NullHandler(logging.Handler): + def handle(self, record): + pass +try: + unicode + _py3k = False + string_types = basestring + text_type = unicode +except NameError: + _py3k = True + string_types = str + text_type = str + +logger = logging.getLogger(__name__) +if not logger.handlers: + logger.addHandler(NullHandler()) + +# We use the test below because it works for Jython as well as CPython +if os.path.__name__ == 'ntpath': # pragma: no cover + # On Windows, we don't need shell quoting, other than worrying about + # paths with spaces in them. + def shell_quote(s): + return '"%s"' % s +else: + # Section copied from sarge + + # This regex determines which shell input needs quoting + # because it may be unsafe + UNSAFE = re.compile(r'[^\w%+,./:=@-]') + + def shell_quote(s): + """ + Quote text so that it is safe for Posix command shells. + + For example, "*.py" would be converted to "'*.py'". If the text is + considered safe it is returned unquoted. + + :param s: The value to quote + :type s: str (or unicode on 2.x) + :return: A safe version of the input, from the point of view of Posix + command shells + :rtype: The passed-in type + """ + if not isinstance(s, string_types): # pragma: no cover + raise TypeError('Expected string type, got %s' % type(s)) + if not s: + result = "''" + elif not UNSAFE.search(s): + result = s + else: + result = "'%s'" % s.replace("'", r"'\''") + return result + + # end of sarge code + +# Now that we use shell=False, we shouldn't need to quote arguments. +# Use no_quote instead of shell_quote to remind us of where quoting +# was needed. However, note that we still need, on 2.x, to encode any +# Unicode argument with the file system encoding - see Issue #41 and +# Python issue #1759845 ("subprocess.call fails with unicode strings in +# command line"). + +# Allows the encoding used to be overridden in special cases by setting +# this module attribute appropriately. +fsencoding = sys.getfilesystemencoding() + +def no_quote(s): + if not _py3k and isinstance(s, text_type): + s = s.encode(fsencoding) + return s + +def _copy_data(instream, outstream): + # Copy one stream to another + sent = 0 + if hasattr(sys.stdin, 'encoding'): + enc = sys.stdin.encoding + else: # pragma: no cover + enc = 'ascii' + while True: + # See issue #39: read can fail when e.g. a text stream is provided + # for what is actually a binary file + try: + data = instream.read(1024) + except UnicodeError: + logger.warning('Exception occurred while reading', exc_info=1) + break + if not data: + break + sent += len(data) + # logger.debug("sending chunk (%d): %r", sent, data[:256]) + try: + outstream.write(data) + except UnicodeError: # pragma: no cover + outstream.write(data.encode(enc)) + except: + # Can sometimes get 'broken pipe' errors even when the data has all + # been sent + logger.exception('Error sending data') + break + try: + outstream.close() + except IOError: # pragma: no cover + logger.warning('Exception occurred while closing: ignored', exc_info=1) + logger.debug("closed output, %d bytes sent", sent) + +def _threaded_copy_data(instream, outstream): + wr = threading.Thread(target=_copy_data, args=(instream, outstream)) + wr.setDaemon(True) + logger.debug('data copier: %r, %r, %r', wr, instream, outstream) + wr.start() + return wr + +def _write_passphrase(stream, passphrase, encoding): + passphrase = '%s\n' % passphrase + passphrase = passphrase.encode(encoding) + stream.write(passphrase) + logger.debug('Wrote passphrase') + +def _is_sequence(instance): + return isinstance(instance, (list, tuple, set, frozenset)) + +def _make_memory_stream(s): + try: + from io import BytesIO + rv = BytesIO(s) + except ImportError: # pragma: no cover + rv = StringIO(s) + return rv + +def _make_binary_stream(s, encoding): + if _py3k: + if isinstance(s, str): + s = s.encode(encoding) + else: + if type(s) is not str: + s = s.encode(encoding) + return _make_memory_stream(s) + +class Verify(object): + "Handle status messages for --verify" + + TRUST_UNDEFINED = 0 + TRUST_NEVER = 1 + TRUST_MARGINAL = 2 + TRUST_FULLY = 3 + TRUST_ULTIMATE = 4 + + TRUST_LEVELS = { + "TRUST_UNDEFINED" : TRUST_UNDEFINED, + "TRUST_NEVER" : TRUST_NEVER, + "TRUST_MARGINAL" : TRUST_MARGINAL, + "TRUST_FULLY" : TRUST_FULLY, + "TRUST_ULTIMATE" : TRUST_ULTIMATE, + } + + # for now, just the most common error codes. This can be expanded as and + # when reports come in of other errors. + GPG_SYSTEM_ERROR_CODES = { + 1: 'permission denied', + 35: 'file exists', + 81: 'file not found', + 97: 'not a directory', + } + + GPG_ERROR_CODES = { + 11: 'incorrect passphrase', + } + + def __init__(self, gpg): + self.gpg = gpg + self.valid = False + self.fingerprint = self.creation_date = self.timestamp = None + self.signature_id = self.key_id = None + self.username = None + self.key_id = None + self.key_status = None + self.status = None + self.pubkey_fingerprint = None + self.expire_timestamp = None + self.sig_timestamp = None + self.trust_text = None + self.trust_level = None + self.sig_info = {} + + def __nonzero__(self): + return self.valid + + __bool__ = __nonzero__ + + def handle_status(self, key, value): + + def update_sig_info(**kwargs): + sig_id = self.signature_id + if sig_id: + info = self.sig_info[sig_id] + info.update(kwargs) + + if key in self.TRUST_LEVELS: + self.trust_text = key + self.trust_level = self.TRUST_LEVELS[key] + update_sig_info(trust_level=self.trust_level, + trust_text=self.trust_text) + elif key in ("WARNING", "ERROR"): + logger.warning('potential problem: %s: %s', key, value) + elif key == "BADSIG": # pragma: no cover + self.valid = False + self.status = 'signature bad' + self.key_id, self.username = value.split(None, 1) + update_sig_info(keyid=self.key_id, username=self.username, + status=self.status) + elif key == "ERRSIG": # pragma: no cover + self.valid = False + parts = value.split() + (self.key_id, + algo, hash_algo, + cls, + self.timestamp) = parts[:5] + # Since GnuPG 2.2.7, a fingerprint is tacked on + if len(parts) >= 7: + self.fingerprint = parts[6] + self.status = 'signature error' + update_sig_info(keyid=self.key_id, timestamp=self.timestamp, + fingerprint=self.fingerprint, status=self.status) + elif key == "EXPSIG": # pragma: no cover + self.valid = False + self.status = 'signature expired' + self.key_id, self.username = value.split(None, 1) + update_sig_info(keyid=self.key_id, username=self.username, + status=self.status) + elif key == "GOODSIG": + self.valid = True + self.status = 'signature good' + self.key_id, self.username = value.split(None, 1) + update_sig_info(keyid=self.key_id, username=self.username, + status=self.status) + elif key == "VALIDSIG": + fingerprint, creation_date, sig_ts, expire_ts = value.split()[:4] + (self.fingerprint, + self.creation_date, + self.sig_timestamp, + self.expire_timestamp) = (fingerprint, creation_date, sig_ts, + expire_ts) + # may be different if signature is made with a subkey + self.pubkey_fingerprint = value.split()[-1] + self.status = 'signature valid' + update_sig_info(fingerprint=fingerprint, creation_date=creation_date, + timestamp=sig_ts, expiry=expire_ts, + pubkey_fingerprint=self.pubkey_fingerprint, + status=self.status) + elif key == "SIG_ID": + sig_id, creation_date, timestamp = value.split() + self.sig_info[sig_id] = {'creation_date': creation_date, + 'timestamp': timestamp} + (self.signature_id, + self.creation_date, self.timestamp) = (sig_id, creation_date, + timestamp) + elif key == "DECRYPTION_FAILED": # pragma: no cover + self.valid = False + self.key_id = value + self.status = 'decryption failed' + elif key == "NO_PUBKEY": # pragma: no cover + self.valid = False + self.key_id = value + self.status = 'no public key' + elif key in ("EXPKEYSIG", "REVKEYSIG"): # pragma: no cover + # signed with expired or revoked key + self.valid = False + self.key_id = value.split()[0] + if key == "EXPKEYSIG": + self.key_status = 'signing key has expired' + else: + self.key_status = 'signing key was revoked' + self.status = self.key_status + update_sig_info(status=self.status, keyid=self.key_id) + elif key in ("UNEXPECTED", "FAILURE"): # pragma: no cover + self.valid = False + self.key_id = value + if key == "UNEXPECTED": + self.status = 'unexpected data' + else: + # N.B. there might be other reasons. For example, if an output + # file can't be created - /dev/null/foo will lead to a + # "not a directory" error, but which is not sent as a status + # message with the [GNUPG:] prefix. Similarly if you try to + # write to "/etc/foo" as a non-root user, a "permission denied" + # error will be sent as a non-status message. + message = 'error - %s' % value + operation, code = value.rsplit(' ', 1) + if code.isdigit(): + code = int(code) & 0xFFFFFF # lose the error source + if self.gpg.error_map and code in self.gpg.error_map: + message = '%s: %s' % (operation, self.gpg.error_map[code]) + else: + system_error = bool(code & 0x8000) + code = code & 0x7FFF + if system_error: + mapping = self.GPG_SYSTEM_ERROR_CODES + else: + mapping = self.GPG_ERROR_CODES + if code in mapping: + message = '%s: %s' % (operation, mapping[code]) + if not self.status: + self.status = message + elif key in ("DECRYPTION_INFO", "PLAINTEXT", "PLAINTEXT_LENGTH", + "NO_SECKEY", "BEGIN_SIGNING"): + pass + else: # pragma: no cover + logger.debug('message ignored: %s, %s', key, value) + +class ImportResult(object): + "Handle status messages for --import" + + counts = '''count no_user_id imported imported_rsa unchanged + n_uids n_subk n_sigs n_revoc sec_read sec_imported + sec_dups not_imported'''.split() + def __init__(self, gpg): + self.gpg = gpg + self.results = [] + self.fingerprints = [] + for result in self.counts: + setattr(self, result, 0) + + def __nonzero__(self): + if self.not_imported: return False + if not self.fingerprints: return False + return True + + __bool__ = __nonzero__ + + ok_reason = { + '0': 'Not actually changed', + '1': 'Entirely new key', + '2': 'New user IDs', + '4': 'New signatures', + '8': 'New subkeys', + '16': 'Contains private key', + } + + problem_reason = { + '0': 'No specific reason given', + '1': 'Invalid Certificate', + '2': 'Issuer Certificate missing', + '3': 'Certificate Chain too long', + '4': 'Error storing certificate', + } + + def handle_status(self, key, value): + if key in ("WARNING", "ERROR"): + logger.warning('potential problem: %s: %s', key, value) + elif key in ("IMPORTED", "KEY_CONSIDERED"): + # this duplicates info we already see in import_ok & import_problem + pass + elif key == "NODATA": # pragma: no cover + self.results.append({'fingerprint': None, + 'problem': '0', 'text': 'No valid data found'}) + elif key == "IMPORT_OK": + reason, fingerprint = value.split() + reasons = [] + for code, text in list(self.ok_reason.items()): + if int(reason) | int(code) == int(reason): + reasons.append(text) + reasontext = '\n'.join(reasons) + "\n" + self.results.append({'fingerprint': fingerprint, + 'ok': reason, 'text': reasontext}) + self.fingerprints.append(fingerprint) + elif key == "IMPORT_PROBLEM": # pragma: no cover + try: + reason, fingerprint = value.split() + except: + reason = value + fingerprint = '' + self.results.append({'fingerprint': fingerprint, + 'problem': reason, 'text': self.problem_reason[reason]}) + elif key == "IMPORT_RES": + import_res = value.split() + for i, count in enumerate(self.counts): + setattr(self, count, int(import_res[i])) + elif key == "KEYEXPIRED": # pragma: no cover + self.results.append({'fingerprint': None, + 'problem': '0', 'text': 'Key expired'}) + elif key == "SIGEXPIRED": # pragma: no cover + self.results.append({'fingerprint': None, + 'problem': '0', 'text': 'Signature expired'}) + elif key == "FAILURE": # pragma: no cover + self.results.append({'fingerprint': None, + 'problem': '0', 'text': 'Other failure'}) + else: # pragma: no cover + logger.debug('message ignored: %s, %s', key, value) + + def summary(self): + result = [] + result.append('%d imported' % self.imported) + if self.not_imported: # pragma: no cover + result.append('%d not imported' % self.not_imported) + return ', '.join(result) + +ESCAPE_PATTERN = re.compile(r'\\x([0-9a-f][0-9a-f])', re.I) +BASIC_ESCAPES = { + r'\n': '\n', + r'\r': '\r', + r'\f': '\f', + r'\v': '\v', + r'\b': '\b', + r'\0': '\0', +} + +class SendResult(object): + def __init__(self, gpg): + self.gpg = gpg + + def handle_status(self, key, value): + logger.debug('SendResult: %s: %s', key, value) + +def _set_fields(target, fieldnames, args): + for i, var in enumerate(fieldnames): + if i < len(args): + target[var] = args[i] + else: + target[var] = 'unavailable' + +class SearchKeys(list): + ''' Handle status messages for --search-keys. + + Handle pub and uid (relating the latter to the former). + + Don't care about the rest + ''' + + UID_INDEX = 1 + FIELDS = 'type keyid algo length date expires'.split() + + def __init__(self, gpg): + self.gpg = gpg + self.curkey = None + self.fingerprints = [] + self.uids = [] + + def get_fields(self, args): + result = {} + _set_fields(result, self.FIELDS, args) + result['uids'] = [] + result['sigs'] = [] + return result + + def pub(self, args): + self.curkey = curkey = self.get_fields(args) + self.append(curkey) + + def uid(self, args): + uid = args[self.UID_INDEX] + uid = ESCAPE_PATTERN.sub(lambda m: chr(int(m.group(1), 16)), uid) + for k, v in BASIC_ESCAPES.items(): + uid = uid.replace(k, v) + self.curkey['uids'].append(uid) + self.uids.append(uid) + + def handle_status(self, key, value): # pragma: no cover + pass + +class ListKeys(SearchKeys): + ''' Handle status messages for --list-keys, --list-sigs. + + Handle pub and uid (relating the latter to the former). + + Don't care about (info from src/DETAILS): + + crt = X.509 certificate + crs = X.509 certificate and private key available + uat = user attribute (same as user id except for field 10). + sig = signature + rev = revocation signature + pkd = public key data (special field format, see below) + grp = reserved for gpgsm + rvk = revocation key + ''' + + UID_INDEX = 9 + FIELDS = 'type trust length algo keyid date expires dummy ownertrust uid sig cap issuer flag token hash curve compliance updated origin'.split() + + def __init__(self, gpg): + super(ListKeys, self).__init__(gpg) + self.in_subkey = False + self.key_map = {} + + def key(self, args): + self.curkey = curkey = self.get_fields(args) + if curkey['uid']: + curkey['uids'].append(curkey['uid']) + del curkey['uid'] + curkey['subkeys'] = [] + self.append(curkey) + self.in_subkey = False + + pub = sec = key + + def fpr(self, args): + fp = args[9] + if fp in self.key_map and self.gpg.check_fingerprint_collisions: # pragma: no cover + raise ValueError('Unexpected fingerprint collision: %s' % fp) + if not self.in_subkey: + self.curkey['fingerprint'] = fp + self.fingerprints.append(fp) + self.key_map[fp] = self.curkey + else: + self.curkey['subkeys'][-1].append(fp) + self.key_map[fp] = self.curkey + + def _collect_subkey_info(self, curkey, args): + info_map = curkey.setdefault('subkey_info', {}) + info = {} + _set_fields(info, self.FIELDS, args) + info_map[args[4]] = info + + def sub(self, args): + # See issue #81. We create a dict with more information about + # subkeys, but for backward compatibility reason, have to add it in + # as a separate entry 'subkey_info' + subkey = [args[4], args[11]] # keyid, type + self.curkey['subkeys'].append(subkey) + self._collect_subkey_info(self.curkey, args) + self.in_subkey = True + + def ssb(self, args): + subkey = [args[4], None] # keyid, type + self.curkey['subkeys'].append(subkey) + self._collect_subkey_info(self.curkey, args) + self.in_subkey = True + + def sig(self, args): + # keyid, uid, sigclass + self.curkey['sigs'].append((args[4], args[9], args[10])) + +class ScanKeys(ListKeys): + ''' Handle status messages for --with-fingerprint.''' + + def sub(self, args): + # --with-fingerprint --with-colons somehow outputs fewer colons, + # use the last value args[-1] instead of args[11] + subkey = [args[4], args[-1]] + self.curkey['subkeys'].append(subkey) + self._collect_subkey_info(self.curkey, args) + self.in_subkey = True + +class TextHandler(object): + def _as_text(self): + return self.data.decode(self.gpg.encoding, self.gpg.decode_errors) + + if _py3k: + __str__ = _as_text + else: + __unicode__ = _as_text + + def __str__(self): + return self.data + + +class Crypt(Verify, TextHandler): + "Handle status messages for --encrypt and --decrypt" + def __init__(self, gpg): + Verify.__init__(self, gpg) + self.data = '' + self.ok = False + self.status = '' + self.key_id = None + + def __nonzero__(self): + if self.ok: return True + return False + + __bool__ = __nonzero__ + + def handle_status(self, key, value): + if key in ("WARNING", "ERROR"): + logger.warning('potential problem: %s: %s', key, value) + elif key == "NODATA": + self.status = "no data was provided" + elif key in ("NEED_PASSPHRASE", "BAD_PASSPHRASE", "GOOD_PASSPHRASE", + "MISSING_PASSPHRASE", "DECRYPTION_FAILED", + "KEY_NOT_CREATED", "NEED_PASSPHRASE_PIN"): + self.status = key.replace("_", " ").lower() + elif key == "NEED_PASSPHRASE_SYM": + self.status = 'need symmetric passphrase' + elif key == "BEGIN_DECRYPTION": + self.status = 'decryption incomplete' + elif key == "BEGIN_ENCRYPTION": + self.status = 'encryption incomplete' + elif key == "DECRYPTION_OKAY": + self.status = 'decryption ok' + self.ok = True + elif key == "END_ENCRYPTION": + self.status = 'encryption ok' + self.ok = True + elif key == "INV_RECP": # pragma: no cover + self.status = 'invalid recipient' + elif key == "KEYEXPIRED": # pragma: no cover + self.status = 'key expired' + elif key == "SIG_CREATED": # pragma: no cover + self.status = 'sig created' + elif key == "SIGEXPIRED": # pragma: no cover + self.status = 'sig expired' + elif key == "ENC_TO": # pragma: no cover + # ENC_TO + self.key_id = value.split(' ', 1)[0] + elif key in ("USERID_HINT", "GOODMDC", + "END_DECRYPTION", "CARDCTRL", "BADMDC", + "SC_OP_FAILURE", "SC_OP_SUCCESS", + "PINENTRY_LAUNCHED", "KEY_CONSIDERED"): + pass + else: + Verify.handle_status(self, key, value) + +class GenKey(object): + "Handle status messages for --gen-key" + def __init__(self, gpg): + self.gpg = gpg + self.type = None + self.fingerprint = None + + def __nonzero__(self): + if self.fingerprint: return True + return False + + __bool__ = __nonzero__ + + def __str__(self): + return self.fingerprint or '' + + def handle_status(self, key, value): + if key in ("WARNING", "ERROR"): # pragma: no cover + logger.warning('potential problem: %s: %s', key, value) + elif key == "KEY_CREATED": + (self.type,self.fingerprint) = value.split() + elif key in ("PROGRESS", "GOOD_PASSPHRASE", "KEY_NOT_CREATED"): + pass + else: # pragma: no cover + logger.debug('message ignored: %s, %s', key, value) + +class ExportResult(GenKey): + """Handle status messages for --export[-secret-key]. + + For now, just use an existing class to base it on - if needed, we + can override handle_status for more specific message handling. + """ + def handle_status(self, key, value): + if key in ("EXPORTED", "EXPORT_RES"): + pass + else: + super(ExportResult, self).handle_status(key, value) + +class DeleteResult(object): + "Handle status messages for --delete-key and --delete-secret-key" + def __init__(self, gpg): + self.gpg = gpg + self.status = 'ok' + + def __str__(self): + return self.status + + problem_reason = { + '1': 'No such key', + '2': 'Must delete secret key first', + '3': 'Ambiguous specification', + } + + def handle_status(self, key, value): + if key == "DELETE_PROBLEM": # pragma: no cover + self.status = self.problem_reason.get(value, + "Unknown error: %r" % value) + else: # pragma: no cover + logger.debug('message ignored: %s, %s', key, value) + + def __nonzero__(self): + return self.status == 'ok' + + __bool__ = __nonzero__ + + +class TrustResult(DeleteResult): + pass + + +class Sign(TextHandler): + "Handle status messages for --sign" + def __init__(self, gpg): + self.gpg = gpg + self.type = None + self.hash_algo = None + self.fingerprint = None + self.status = None + self.key_id = None + self.username = None + + def __nonzero__(self): + return self.fingerprint is not None + + __bool__ = __nonzero__ + + def handle_status(self, key, value): + if key in ("WARNING", "ERROR", "FAILURE"): # pragma: no cover + logger.warning('potential problem: %s: %s', key, value) + elif key in ("KEYEXPIRED", "SIGEXPIRED"): # pragma: no cover + self.status = 'key expired' + elif key == "KEYREVOKED": # pragma: no cover + self.status = 'key revoked' + elif key == "SIG_CREATED": + (self.type, + algo, self.hash_algo, cls, self.timestamp, self.fingerprint + ) = value.split() + self.status = 'signature created' + elif key == "USERID_HINT": # pragma: no cover + self.key_id, self.username = value.split(' ', 1) + elif key == "BAD_PASSPHRASE": + self.status = 'bad passphrase' + elif key in ("NEED_PASSPHRASE", "GOOD_PASSPHRASE", "BEGIN_SIGNING"): + pass + else: # pragma: no cover + logger.debug('message ignored: %s, %s', key, value) + +VERSION_RE = re.compile(r'gpg \(GnuPG(?:/MacGPG2)?\) (\d+(\.\d+)*)'.encode('ascii'), re.I) +HEX_DIGITS_RE = re.compile(r'[0-9a-f]+$', re.I) + +class GPG(object): + + error_map = None + + decode_errors = 'strict' + + result_map = { + 'crypt': Crypt, + 'delete': DeleteResult, + 'generate': GenKey, + 'import': ImportResult, + 'send': SendResult, + 'list': ListKeys, + 'scan': ScanKeys, + 'search': SearchKeys, + 'sign': Sign, + 'trust': TrustResult, + 'verify': Verify, + 'export': ExportResult, + } + + "Encapsulate access to the gpg executable" + def __init__(self, gpgbinary='gpg', gnupghome=None, verbose=False, + use_agent=False, keyring=None, options=None, + secret_keyring=None): + """Initialize a GPG process wrapper. Options are: + + gpgbinary -- full pathname for GPG binary. + + gnupghome -- full pathname to where we can find the public and + private keyrings. Default is whatever gpg defaults to. + keyring -- name of alternative keyring file to use, or list of such + keyrings. If specified, the default keyring is not used. + options =-- a list of additional options to pass to the GPG binary. + secret_keyring -- name of alternative secret keyring file to use, or + list of such keyrings. + """ + self.gpgbinary = gpgbinary + self.gnupghome = gnupghome + # issue 112: fail if the specified value isn't a directory + if gnupghome and not os.path.isdir(gnupghome): + raise ValueError('gnupghome should be a directory (it isn\'t): %s' % gnupghome) + if keyring: + # Allow passing a string or another iterable. Make it uniformly + # a list of keyring filenames + if isinstance(keyring, string_types): + keyring = [keyring] + self.keyring = keyring + if secret_keyring: + # Allow passing a string or another iterable. Make it uniformly + # a list of keyring filenames + if isinstance(secret_keyring, string_types): + secret_keyring = [secret_keyring] + self.secret_keyring = secret_keyring + self.verbose = verbose + self.use_agent = use_agent + if isinstance(options, str): # pragma: no cover + options = [options] + self.options = options + self.on_data = None # or a callable - will be called with data chunks + # Changed in 0.3.7 to use Latin-1 encoding rather than + # locale.getpreferredencoding falling back to sys.stdin.encoding + # falling back to utf-8, because gpg itself uses latin-1 as the default + # encoding. + self.encoding = 'latin-1' + if gnupghome and not os.path.isdir(self.gnupghome): + os.makedirs(self.gnupghome,0x1C0) + try: + p = self._open_subprocess(["--version"]) + except OSError: + msg = 'Unable to run gpg (%s) - it may not be available.' % self.gpgbinary + logger.exception(msg) + raise OSError(msg) + result = self.result_map['verify'](self) # any result will do for this + self._collect_output(p, result, stdin=p.stdin) + if p.returncode != 0: # pragma: no cover + raise ValueError("Error invoking gpg: %s: %s" % (p.returncode, + result.stderr)) + m = VERSION_RE.match(result.data) + if not m: # pragma: no cover + self.version = None + else: + dot = '.'.encode('ascii') + self.version = tuple([int(s) for s in m.groups()[0].split(dot)]) + + # See issue #97. It seems gpg allow duplicate keys in keyrings, so we + # can't be too strict. + self.check_fingerprint_collisions = False + + def make_args(self, args, passphrase): + """ + Make a list of command line elements for GPG. The value of ``args`` + will be appended. The ``passphrase`` argument needs to be True if + a passphrase will be sent to GPG, else False. + """ + cmd = [self.gpgbinary, '--status-fd', '2', '--no-tty', '--no-verbose'] + if 'DEBUG_IPC' in os.environ: + cmd.extend(['--debug', 'ipc']) + if passphrase and hasattr(self, 'version'): + if self.version >= (2, 1): + cmd[1:1] = ['--pinentry-mode', 'loopback'] + cmd.extend(['--fixed-list-mode', '--batch', '--with-colons']) + if self.gnupghome: + cmd.extend(['--homedir', no_quote(self.gnupghome)]) + if self.keyring: + cmd.append('--no-default-keyring') + for fn in self.keyring: + cmd.extend(['--keyring', no_quote(fn)]) + if self.secret_keyring: + for fn in self.secret_keyring: + cmd.extend(['--secret-keyring', no_quote(fn)]) + if passphrase: + cmd.extend(['--passphrase-fd', '0']) + if self.use_agent: # pragma: no cover + cmd.append('--use-agent') + if self.options: + cmd.extend(self.options) + cmd.extend(args) + return cmd + + def _open_subprocess(self, args, passphrase=False): + # Internal method: open a pipe to a GPG subprocess and return + # the file objects for communicating with it. + + # def debug_print(cmd): + # result = [] + # for c in cmd: + # if ' ' not in c: + # result.append(c) + # else: + # if '"' not in c: + # result.append('"%s"' % c) + # elif "'" not in c: + # result.append("'%s'" % c) + # else: + # result.append(c) # give up + # return ' '.join(cmd) + from subprocess import list2cmdline as debug_print + + cmd = self.make_args(args, passphrase) + if self.verbose: # pragma: no cover + print(debug_print(cmd)) + if not STARTUPINFO: + si = None + else: # pragma: no cover + si = STARTUPINFO() + si.dwFlags = STARTF_USESHOWWINDOW + si.wShowWindow = SW_HIDE + result = Popen(cmd, shell=False, stdin=PIPE, stdout=PIPE, stderr=PIPE, + startupinfo=si) + logger.debug("%s: %s", result.pid, debug_print(cmd)) + return result + + def _read_response(self, stream, result): + # Internal method: reads all the stderr output from GPG, taking notice + # only of lines that begin with the magic [GNUPG:] prefix. + # + # Calls methods on the response object for each valid token found, + # with the arg being the remainder of the status line. + lines = [] + while True: + line = stream.readline() + if len(line) == 0: + break + lines.append(line) + line = line.rstrip() + if self.verbose: # pragma: no cover + print(line) + logger.debug("%s", line) + if line[0:9] == '[GNUPG:] ': + # Chop off the prefix + line = line[9:] + L = line.split(None, 1) + keyword = L[0] + if len(L) > 1: + value = L[1] + else: + value = "" + result.handle_status(keyword, value) + result.stderr = ''.join(lines) + + def _read_data(self, stream, result, on_data=None): + # Read the contents of the file from GPG's stdout + chunks = [] + while True: + data = stream.read(1024) + if len(data) == 0: + if on_data: + on_data(data) + break + logger.debug("chunk: %r" % data[:256]) + append = True + if on_data: + append = on_data(data) != False + if append: + chunks.append(data) + if _py3k: + # Join using b'' or '', as appropriate + result.data = type(data)().join(chunks) + else: + result.data = ''.join(chunks) + + def _collect_output(self, process, result, writer=None, stdin=None): + """ + Drain the subprocesses output streams, writing the collected output + to the result. If a writer thread (writing to the subprocess) is given, + make sure it's joined before returning. If a stdin stream is given, + close it before returning. + """ + stderr = codecs.getreader(self.encoding)(process.stderr) + rr = threading.Thread(target=self._read_response, args=(stderr, result)) + rr.setDaemon(True) + logger.debug('stderr reader: %r', rr) + rr.start() + + stdout = process.stdout + dr = threading.Thread(target=self._read_data, args=(stdout, result, + self.on_data)) + dr.setDaemon(True) + logger.debug('stdout reader: %r', dr) + dr.start() + + dr.join() + rr.join() + if writer is not None: + writer.join() + process.wait() + rc = process.returncode + if rc != 0: + logger.warning('gpg returned a non-zero error code: %d', rc) + if stdin is not None: + try: + stdin.close() + except IOError: # pragma: no cover + pass + stderr.close() + stdout.close() + + def _handle_io(self, args, fileobj, result, passphrase=None, binary=False): + "Handle a call to GPG - pass input data, collect output data" + # Handle a basic data call - pass data to GPG, handle the output + # including status information. Garbage In, Garbage Out :) + p = self._open_subprocess(args, passphrase is not None) + if not binary: # pragma: no cover + stdin = codecs.getwriter(self.encoding)(p.stdin) + else: + stdin = p.stdin + if passphrase: + _write_passphrase(stdin, passphrase, self.encoding) + writer = _threaded_copy_data(fileobj, stdin) + self._collect_output(p, result, writer, stdin) + return result + + # + # SIGNATURE METHODS + # + def sign(self, message, **kwargs): + """sign message""" + f = _make_binary_stream(message, self.encoding) + result = self.sign_file(f, **kwargs) + f.close() + return result + + def set_output_without_confirmation(self, args, output): + "If writing to a file which exists, avoid a confirmation message." + if os.path.exists(output): + # We need to avoid an overwrite confirmation message + args.extend(['--yes']) + args.extend(['--output', no_quote(output)]) + + def is_valid_passphrase(self, passphrase): + """ + Confirm that the passphrase doesn't contain newline-type characters - + it is passed in a pipe to gpg, and so not checking could lead to + spoofing attacks by passing arbitrary text after passphrase and newline. + """ + return ('\n' not in passphrase and '\r' not in passphrase and + '\x00' not in passphrase) + + def sign_file(self, file, keyid=None, passphrase=None, clearsign=True, + detach=False, binary=False, output=None, extra_args=None): + """sign file""" + if passphrase and not self.is_valid_passphrase(passphrase): + raise ValueError('Invalid passphrase') + logger.debug("sign_file: %s", file) + if binary: # pragma: no cover + args = ['-s'] + else: + args = ['-sa'] + # You can't specify detach-sign and clearsign together: gpg ignores + # the detach-sign in that case. + if detach: + args.append("--detach-sign") + elif clearsign: + args.append("--clearsign") + if keyid: + args.extend(['--default-key', no_quote(keyid)]) + if output: # write the output to a file with the specified name + self.set_output_without_confirmation(args, output) + + if extra_args: + args.extend(extra_args) + result = self.result_map['sign'](self) + #We could use _handle_io here except for the fact that if the + #passphrase is bad, gpg bails and you can't write the message. + p = self._open_subprocess(args, passphrase is not None) + try: + stdin = p.stdin + if passphrase: + _write_passphrase(stdin, passphrase, self.encoding) + writer = _threaded_copy_data(file, stdin) + except IOError: # pragma: no cover + logging.exception("error writing message") + writer = None + self._collect_output(p, result, writer, stdin) + return result + + def verify(self, data, **kwargs): + """Verify the signature on the contents of the string 'data' + + >>> GPGBINARY = os.environ.get('GPGBINARY', 'gpg') + >>> if not os.path.isdir('keys'): os.mkdir('keys') + >>> gpg = GPG(gpgbinary=GPGBINARY, gnupghome='keys') + >>> input = gpg.gen_key_input(passphrase='foo') + >>> key = gpg.gen_key(input) + >>> assert key + >>> sig = gpg.sign('hello',keyid=key.fingerprint,passphrase='bar') + >>> assert not sig + >>> sig = gpg.sign('hello',keyid=key.fingerprint,passphrase='foo') + >>> assert sig + >>> verify = gpg.verify(sig.data) + >>> assert verify + + """ + f = _make_binary_stream(data, self.encoding) + result = self.verify_file(f, **kwargs) + f.close() + return result + + def verify_file(self, file, data_filename=None, close_file=True, extra_args=None): + "Verify the signature on the contents of the file-like object 'file'" + logger.debug('verify_file: %r, %r', file, data_filename) + result = self.result_map['verify'](self) + args = ['--verify'] + if extra_args: + args.extend(extra_args) + if data_filename is None: + self._handle_io(args, file, result, binary=True) + else: + logger.debug('Handling detached verification') + import tempfile + fd, fn = tempfile.mkstemp(prefix='pygpg') + s = file.read() + if close_file: + file.close() + logger.debug('Wrote to temp file: %r', s) + os.write(fd, s) + os.close(fd) + args.append(no_quote(fn)) + args.append(no_quote(data_filename)) + try: + p = self._open_subprocess(args) + self._collect_output(p, result, stdin=p.stdin) + finally: + os.unlink(fn) + return result + + def verify_data(self, sig_filename, data, extra_args=None): + "Verify the signature in sig_filename against data in memory" + logger.debug('verify_data: %r, %r ...', sig_filename, data[:16]) + result = self.result_map['verify'](self) + args = ['--verify'] + if extra_args: + args.extend(extra_args) + args.extend([no_quote(sig_filename), '-']) + stream = _make_memory_stream(data) + self._handle_io(args, stream, result, binary=True) + return result + + # + # KEY MANAGEMENT + # + + def import_keys(self, key_data, extra_args=None): + """ + Import the key_data into our keyring. + """ + result = self.result_map['import'](self) + logger.debug('import_keys: %r', key_data[:256]) + data = _make_binary_stream(key_data, self.encoding) + args = ['--import'] + if extra_args: + args.extend(extra_args) + self._handle_io(args, data, result, binary=True) + logger.debug('import_keys result: %r', result.__dict__) + data.close() + return result + + def recv_keys(self, keyserver, *keyids): + """Import a key from a keyserver + + >>> import shutil + >>> shutil.rmtree("keys", ignore_errors=True) + >>> GPGBINARY = os.environ.get('GPGBINARY', 'gpg') + >>> if not os.path.isdir('keys'): os.mkdir('keys') + >>> gpg = GPG(gpgbinary=GPGBINARY, gnupghome='keys') + >>> os.chmod('keys', 0x1C0) + >>> result = gpg.recv_keys('pgp.mit.edu', '92905378') + >>> if 'NO_EXTERNAL_TESTS' not in os.environ: assert result + + """ + result = self.result_map['import'](self) + logger.debug('recv_keys: %r', keyids) + data = _make_binary_stream("", self.encoding) + #data = "" + args = ['--keyserver', no_quote(keyserver), '--recv-keys'] + args.extend([no_quote(k) for k in keyids]) + self._handle_io(args, data, result, binary=True) + logger.debug('recv_keys result: %r', result.__dict__) + data.close() + return result + + def send_keys(self, keyserver, *keyids): + """Send a key to a keyserver. + + Note: it's not practical to test this function without sending + arbitrary data to live keyservers. + """ + result = self.result_map['send'](self) + logger.debug('send_keys: %r', keyids) + data = _make_binary_stream('', self.encoding) + #data = "" + args = ['--keyserver', no_quote(keyserver), '--send-keys'] + args.extend([no_quote(k) for k in keyids]) + self._handle_io(args, data, result, binary=True) + logger.debug('send_keys result: %r', result.__dict__) + data.close() + return result + + def delete_keys(self, fingerprints, secret=False, passphrase=None, + expect_passphrase=True): + """ + Delete the indicated keys. + + Since GnuPG 2.1, you can't delete secret keys without providing a + passphrase. However, if you're expecting the passphrase to go to gpg + via pinentry, you should specify expect_passphrase=False. (It's only + checked for GnuPG >= 2.1). + """ + if passphrase and not self.is_valid_passphrase(passphrase): + raise ValueError('Invalid passphrase') + which='key' + if secret: # pragma: no cover + if (self.version >= (2, 1) and passphrase is None and + expect_passphrase): + raise ValueError('For GnuPG >= 2.1, deleting secret keys ' + 'needs a passphrase to be provided') + which='secret-key' + if _is_sequence(fingerprints): # pragma: no cover + fingerprints = [no_quote(s) for s in fingerprints] + else: + fingerprints = [no_quote(fingerprints)] + args = ['--delete-%s' % which] + if secret and self.version >= (2, 1): + args.insert(0, '--yes') + args.extend(fingerprints) + result = self.result_map['delete'](self) + if not secret or self.version < (2, 1): + p = self._open_subprocess(args) + self._collect_output(p, result, stdin=p.stdin) + else: + # Need to send in a passphrase. + f = _make_binary_stream('', self.encoding) + try: + self._handle_io(args, f, result, passphrase=passphrase, + binary=True) + finally: + f.close() + return result + + def export_keys(self, keyids, secret=False, armor=True, minimal=False, + passphrase=None, expect_passphrase=True): + """ + Export the indicated keys. A 'keyid' is anything gpg accepts. + + Since GnuPG 2.1, you can't export secret keys without providing a + passphrase. However, if you're expecting the passphrase to go to gpg + via pinentry, you should specify expect_passphrase=False. (It's only + checked for GnuPG >= 2.1). + """ + if passphrase and not self.is_valid_passphrase(passphrase): + raise ValueError('Invalid passphrase') + which='' + if secret: + which='-secret-key' + if (self.version >= (2, 1) and passphrase is None and + expect_passphrase): + raise ValueError('For GnuPG >= 2.1, exporting secret keys ' + 'needs a passphrase to be provided') + if _is_sequence(keyids): + keyids = [no_quote(k) for k in keyids] + else: + keyids = [no_quote(keyids)] + args = ['--export%s' % which] + if armor: + args.append('--armor') + if minimal: # pragma: no cover + args.extend(['--export-options','export-minimal']) + args.extend(keyids) + # gpg --export produces no status-fd output; stdout will be + # empty in case of failure + #stdout, stderr = p.communicate() + result = self.result_map['export'](self) + if not secret or self.version < (2, 1): + p = self._open_subprocess(args) + self._collect_output(p, result, stdin=p.stdin) + else: + # Need to send in a passphrase. + f = _make_binary_stream('', self.encoding) + try: + self._handle_io(args, f, result, passphrase=passphrase, + binary=True) + finally: + f.close() + logger.debug('export_keys result: %r', result.data) + # Issue #49: Return bytes if armor not specified, else text + result = result.data + if armor: + result = result.decode(self.encoding, self.decode_errors) + return result + + def _get_list_output(self, p, kind): + # Get the response information + result = self.result_map[kind](self) + self._collect_output(p, result, stdin=p.stdin) + lines = result.data.decode(self.encoding, + self.decode_errors).splitlines() + valid_keywords = 'pub uid sec fpr sub ssb sig'.split() + for line in lines: + if self.verbose: # pragma: no cover + print(line) + logger.debug("line: %r", line.rstrip()) + if not line: # pragma: no cover + break + L = line.strip().split(':') + if not L: # pragma: no cover + continue + keyword = L[0] + if keyword in valid_keywords: + getattr(result, keyword)(L) + return result + + def list_keys(self, secret=False, keys=None, sigs=False): + """ list the keys currently in the keyring + + >>> import shutil + >>> shutil.rmtree("keys", ignore_errors=True) + >>> GPGBINARY = os.environ.get('GPGBINARY', 'gpg') + >>> if not os.path.isdir('keys'): os.mkdir('keys') + >>> gpg = GPG(gpgbinary=GPGBINARY, gnupghome='keys') + >>> input = gpg.gen_key_input(passphrase='foo') + >>> result = gpg.gen_key(input) + >>> fp1 = result.fingerprint + >>> result = gpg.gen_key(input) + >>> fp2 = result.fingerprint + >>> pubkeys = gpg.list_keys() + >>> assert fp1 in pubkeys.fingerprints + >>> assert fp2 in pubkeys.fingerprints + + """ + + if sigs: + which = 'sigs' + else: + which = 'keys' + if secret: + which='secret-keys' + args = ['--list-%s' % which, + '--fingerprint', '--fingerprint'] # get subkey FPs, too + if keys: + if isinstance(keys, string_types): + keys = [keys] + args.extend(keys) + p = self._open_subprocess(args) + return self._get_list_output(p, 'list') + + def scan_keys(self, filename): + """ + List details of an ascii armored or binary key file + without first importing it to the local keyring. + + The function achieves this on modern GnuPG by running: + + $ gpg --dry-run --import-options import-show --import + + On older versions, it does the *much* riskier: + + $ gpg --with-fingerprint --with-colons filename + """ + if self.version >= (2, 1): + args = ['--dry-run', '--import-options', 'import-show', '--import'] + else: + logger.warning('Trying to list packets, but if the file is not a ' + 'keyring, might accidentally decrypt') + args = ['--with-fingerprint', '--with-colons', '--fixed-list-mode'] + args.append(no_quote(filename)) + p = self._open_subprocess(args) + return self._get_list_output(p, 'scan') + + def search_keys(self, query, keyserver='pgp.mit.edu'): + """ search keyserver by query (using --search-keys option) + + >>> import shutil + >>> shutil.rmtree('keys', ignore_errors=True) + >>> GPGBINARY = os.environ.get('GPGBINARY', 'gpg') + >>> if not os.path.isdir('keys'): os.mkdir('keys') + >>> gpg = GPG(gpgbinary=GPGBINARY, gnupghome='keys') + >>> os.chmod('keys', 0x1C0) + >>> result = gpg.search_keys('') + >>> if 'NO_EXTERNAL_TESTS' not in os.environ: assert result, 'Failed using default keyserver' + >>> #keyserver = 'keyserver.ubuntu.com' + >>> #result = gpg.search_keys('', keyserver) + >>> #assert result, 'Failed using keyserver.ubuntu.com' + + """ + query = query.strip() + if HEX_DIGITS_RE.match(query): + query = '0x' + query + args = ['--fingerprint', + '--keyserver', no_quote(keyserver), '--search-keys', + no_quote(query)] + p = self._open_subprocess(args) + + # Get the response information + result = self.result_map['search'](self) + self._collect_output(p, result, stdin=p.stdin) + lines = result.data.decode(self.encoding, + self.decode_errors).splitlines() + valid_keywords = ['pub', 'uid'] + for line in lines: + if self.verbose: # pragma: no cover + print(line) + logger.debug('line: %r', line.rstrip()) + if not line: # sometimes get blank lines on Windows + continue + L = line.strip().split(':') + if not L: # pragma: no cover + continue + keyword = L[0] + if keyword in valid_keywords: + getattr(result, keyword)(L) + return result + + def gen_key(self, input): + """Generate a key; you might use gen_key_input() to create the + control input. + + >>> GPGBINARY = os.environ.get('GPGBINARY', 'gpg') + >>> if not os.path.isdir('keys'): os.mkdir('keys') + >>> gpg = GPG(gpgbinary=GPGBINARY, gnupghome='keys') + >>> input = gpg.gen_key_input(passphrase='foo') + >>> result = gpg.gen_key(input) + >>> assert result + >>> result = gpg.gen_key('foo') + >>> assert not result + + """ + args = ["--gen-key"] + result = self.result_map['generate'](self) + f = _make_binary_stream(input, self.encoding) + self._handle_io(args, f, result, binary=True) + f.close() + return result + + def gen_key_input(self, **kwargs): + """ + Generate --gen-key input per gpg doc/DETAILS + """ + parms = {} + for key, val in list(kwargs.items()): + key = key.replace('_','-').title() + if str(val).strip(): # skip empty strings + parms[key] = val + parms.setdefault('Key-Type','RSA') + if 'key_curve' not in kwargs: + parms.setdefault('Key-Length',2048) + parms.setdefault('Name-Real', "Autogenerated Key") + logname = (os.environ.get('LOGNAME') or os.environ.get('USERNAME') or + 'unspecified') + hostname = socket.gethostname() + parms.setdefault('Name-Email', "%s@%s" % (logname.replace(' ', '_'), + hostname)) + out = "Key-Type: %s\n" % parms.pop('Key-Type') + for key, val in list(parms.items()): + out += "%s: %s\n" % (key, val) + out += "%commit\n" + return out + + # Key-Type: RSA + # Key-Length: 1024 + # Name-Real: ISdlink Server on %s + # Name-Comment: Created by %s + # Name-Email: isdlink@%s + # Expire-Date: 0 + # %commit + # + # + # Key-Type: DSA + # Key-Length: 1024 + # Subkey-Type: ELG-E + # Subkey-Length: 1024 + # Name-Real: Joe Tester + # Name-Comment: with stupid passphrase + # Name-Email: joe@foo.bar + # Expire-Date: 0 + # Passphrase: abc + # %pubring foo.pub + # %secring foo.sec + # %commit + + # + # ENCRYPTION + # + def encrypt_file(self, file, recipients, sign=None, + always_trust=False, passphrase=None, + armor=True, output=None, symmetric=False, extra_args=None): + "Encrypt the message read from the file-like object 'file'" + if passphrase and not self.is_valid_passphrase(passphrase): + raise ValueError('Invalid passphrase') + args = ['--encrypt'] + if symmetric: + # can't be False or None - could be True or a cipher algo value + # such as AES256 + args = ['--symmetric'] + if symmetric is not True: + args.extend(['--cipher-algo', no_quote(symmetric)]) + # else use the default, currently CAST5 + else: + if not recipients: + raise ValueError('No recipients specified with asymmetric ' + 'encryption') + if not _is_sequence(recipients): + recipients = (recipients,) + for recipient in recipients: + args.extend(['--recipient', no_quote(recipient)]) + if armor: # create ascii-armored output - False for binary output + args.append('--armor') + if output: # write the output to a file with the specified name + self.set_output_without_confirmation(args, output) + if sign is True: # pragma: no cover + args.append('--sign') + elif sign: # pragma: no cover + args.extend(['--sign', '--default-key', no_quote(sign)]) + if always_trust: # pragma: no cover + args.append('--always-trust') + if extra_args: + args.extend(extra_args) + result = self.result_map['crypt'](self) + self._handle_io(args, file, result, passphrase=passphrase, binary=True) + logger.debug('encrypt result: %r', result.data) + return result + + def encrypt(self, data, recipients, **kwargs): + """Encrypt the message contained in the string 'data' + + >>> import shutil + >>> if os.path.exists("keys"): + ... shutil.rmtree("keys", ignore_errors=True) + >>> GPGBINARY = os.environ.get('GPGBINARY', 'gpg') + >>> if not os.path.isdir('keys'): os.mkdir('keys') + >>> gpg = GPG(gpgbinary=GPGBINARY, gnupghome='keys') + >>> input = gpg.gen_key_input(name_email='user1@test', passphrase='pp1') + >>> result = gpg.gen_key(input) + >>> fp1 = result.fingerprint + >>> input = gpg.gen_key_input(name_email='user2@test', passphrase='pp2') + >>> result = gpg.gen_key(input) + >>> fp2 = result.fingerprint + >>> result = gpg.encrypt("hello",fp2) + >>> message = str(result) + >>> assert message != 'hello' + >>> result = gpg.decrypt(message, passphrase='pp2') + >>> assert result + >>> str(result) + 'hello' + >>> result = gpg.encrypt("hello again", fp1) + >>> message = str(result) + >>> result = gpg.decrypt(message, passphrase='bar') + >>> result.status in ('decryption failed', 'bad passphrase') + True + >>> assert not result + >>> result = gpg.decrypt(message, passphrase='pp1') + >>> result.status == 'decryption ok' + True + >>> str(result) + 'hello again' + >>> result = gpg.encrypt("signed hello", fp2, sign=fp1, passphrase='pp1') + >>> result.status == 'encryption ok' + True + >>> message = str(result) + >>> result = gpg.decrypt(message, passphrase='pp2') + >>> result.status == 'decryption ok' + True + >>> assert result.fingerprint == fp1 + + """ + data = _make_binary_stream(data, self.encoding) + result = self.encrypt_file(data, recipients, **kwargs) + data.close() + return result + + def decrypt(self, message, **kwargs): + data = _make_binary_stream(message, self.encoding) + result = self.decrypt_file(data, **kwargs) + data.close() + return result + + def decrypt_file(self, file, always_trust=False, passphrase=None, + output=None, extra_args=None): + if passphrase and not self.is_valid_passphrase(passphrase): + raise ValueError('Invalid passphrase') + args = ["--decrypt"] + if output: # write the output to a file with the specified name + self.set_output_without_confirmation(args, output) + if always_trust: # pragma: no cover + args.append("--always-trust") + if extra_args: + args.extend(extra_args) + result = self.result_map['crypt'](self) + self._handle_io(args, file, result, passphrase, binary=True) + logger.debug('decrypt result: %r', result.data) + return result + + def trust_keys(self, fingerprints, trustlevel): + levels = Verify.TRUST_LEVELS + if trustlevel not in levels: + poss = ', '.join(sorted(levels)) + raise ValueError('Invalid trust level: "%s" (must be one of %s)' % + (trustlevel, poss)) + trustlevel = levels[trustlevel] + 2 + import tempfile + try: + fd, fn = tempfile.mkstemp() + lines = [] + if isinstance(fingerprints, string_types): + fingerprints = [fingerprints] + for f in fingerprints: + lines.append('%s:%s:' % (f, trustlevel)) + # The trailing newline is required! + s = os.linesep.join(lines) + os.linesep + logger.debug('writing ownertrust info: %s', s); + os.write(fd, s.encode(self.encoding)) + os.close(fd) + result = self.result_map['trust'](self) + p = self._open_subprocess(['--import-ownertrust', fn]) + self._collect_output(p, result, stdin=p.stdin) + if p.returncode != 0: + raise ValueError('gpg returned an error - return code %d' % + p.returncode) + finally: + os.remove(fn) + return result diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/oyaml.py b/insights/client/apps/ansible/playbook_verifier/contrib/oyaml.py new file mode 100644 index 0000000000..9de26fa928 --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/oyaml.py @@ -0,0 +1,53 @@ +import platform +import sys +from collections import OrderedDict + +import yaml as pyyaml + + +_items = "viewitems" if sys.version_info < (3,) else "items" +_std_dict_is_order_preserving = sys.version_info >= (3, 7) or ( + sys.version_info >= (3, 6) and platform.python_implementation() == "CPython" +) + + +def map_representer(dumper, data): + return dumper.represent_dict(getattr(data, _items)()) + + +def map_constructor(loader, node): + loader.flatten_mapping(node) + pairs = loader.construct_pairs(node) + try: + return OrderedDict(pairs) + except TypeError: + loader.construct_mapping(node) # trigger any contextual error + raise + + +_loaders = [getattr(pyyaml.loader, x) for x in pyyaml.loader.__all__] +_dumpers = [getattr(pyyaml.dumper, x) for x in pyyaml.dumper.__all__] +try: + _cyaml = pyyaml.cyaml.__all__ +except AttributeError: + pass +else: + _loaders += [getattr(pyyaml.cyaml, x) for x in _cyaml if x.endswith("Loader")] + _dumpers += [getattr(pyyaml.cyaml, x) for x in _cyaml if x.endswith("Dumper")] + +Dumper = None +for Dumper in _dumpers: + pyyaml.add_representer(dict, map_representer, Dumper=Dumper) + pyyaml.add_representer(OrderedDict, map_representer, Dumper=Dumper) + +Loader = None +if not _std_dict_is_order_preserving: + for Loader in _loaders: + pyyaml.add_constructor("tag:yaml.org,2002:map", map_constructor, Loader=Loader) + + +# Merge PyYAML namespace into ours. +# This allows users a drop-in replacement: +# import oyaml as yaml +del map_constructor, map_representer, Loader, Dumper +from yaml import * diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/__init__.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel.yaml-0.16.13-py3.8-nspkg.pth b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel.yaml-0.16.13-py3.8-nspkg.pth new file mode 100644 index 0000000000..68e19a260a --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel.yaml-0.16.13-py3.8-nspkg.pth @@ -0,0 +1 @@ +import sys, types, os;has_mfs = sys.version_info > (3, 5);p = os.path.join(sys._getframe(1).f_locals['sitedir'], *('ruamel',));importlib = has_mfs and __import__('importlib.util');has_mfs and __import__('importlib.machinery');m = has_mfs and sys.modules.setdefault('ruamel', importlib.util.module_from_spec(importlib.machinery.PathFinder.find_spec('ruamel', [os.path.dirname(p)])));m = m or sys.modules.setdefault('ruamel', types.ModuleType('ruamel'));mp = (m or []) and m.__dict__.setdefault('__path__',[]);(p not in mp) and mp.append(p) diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel.yaml-0.16.13.dist-info/INSTALLER b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel.yaml-0.16.13.dist-info/INSTALLER new file mode 100644 index 0000000000..a1b589e38a --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel.yaml-0.16.13.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel.yaml-0.16.13.dist-info/LICENSE b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel.yaml-0.16.13.dist-info/LICENSE new file mode 100644 index 0000000000..3f65b07a8c --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel.yaml-0.16.13.dist-info/LICENSE @@ -0,0 +1,21 @@ + The MIT License (MIT) + + Copyright (c) 2014-2021 Anthon van der Neut, Ruamel bvba + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel.yaml-0.16.13.dist-info/METADATA b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel.yaml-0.16.13.dist-info/METADATA new file mode 100644 index 0000000000..92fc1d4906 --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel.yaml-0.16.13.dist-info/METADATA @@ -0,0 +1,815 @@ +Metadata-Version: 2.1 +Name: ruamel.yaml +Version: 0.16.13 +Summary: ruamel.yaml is a YAML parser/emitter that supports roundtrip preservation of comments, seq/map flow style, and map key order +Home-page: https://sourceforge.net/p/ruamel-yaml/code/ci/default/tree +Author: Anthon van der Neut +Author-email: a.van.der.neut@ruamel.eu +License: MIT license +Keywords: yaml 1.2 parser round-trip preserve quotes order config +Platform: UNKNOWN +Classifier: Development Status :: 4 - Beta +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: Jython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Text Processing :: Markup +Classifier: Typing :: Typed +Description-Content-Type: text/x-rst +Requires-Dist: ruamel.yaml.clib (>=0.1.2) ; platform_python_implementation=="CPython" and python_version<"3.10" +Requires-Dist: ruamel.ordereddict ; platform_python_implementation=="CPython" and python_version<="2.7" +Provides-Extra: docs +Requires-Dist: ryd ; extra == 'docs' +Provides-Extra: jinja2 +Requires-Dist: ruamel.yaml.jinja2 (>=0.2) ; extra == 'jinja2' + + +ruamel.yaml +=========== + +``ruamel.yaml`` is a YAML 1.2 loader/dumper package for Python. + +:version: 0.16.13 +:updated: 2021-03-05 +:documentation: http://yaml.readthedocs.io +:repository: https://sourceforge.net/projects/ruamel-yaml/ +:pypi: https://pypi.org/project/ruamel.yaml/ + +*The 0.16.13 release is the last that will tested to be working on Python 2.7. +The 0.17 series will still be tested on Python 3.5, but the 0.18 will not. The +0.17 series will also stop support for the old PyYAML functions, so a `YAML()` instance +will need to be created.* + +*Please adjust your dependencies accordingly if necessary.* + + +Starting with version 0.15.0 the way YAML files are loaded and dumped +is changing. See the API doc for details. Currently existing +functionality will throw a warning before being changed/removed. +**For production systems you should pin the version being used with +``ruamel.yaml<=0.15``**. There might be bug fixes in the 0.14 series, +but new functionality is likely only to be available via the new API. + +If your package uses ``ruamel.yaml`` and is not listed on PyPI, drop +me an email, preferably with some information on how you use the +package (or a link to bitbucket/github) and I'll keep you informed +when the status of the API is stable enough to make the transition. + +* `Overview `_ +* `Installing `_ +* `Basic Usage `_ +* `Details `_ +* `Examples `_ +* `API `_ +* `Differences with PyYAML `_ + +.. image:: https://readthedocs.org/projects/yaml/badge/?version=stable + :target: https://yaml.readthedocs.org/en/stable + +.. image:: https://bestpractices.coreinfrastructure.org/projects/1128/badge + :target: https://bestpractices.coreinfrastructure.org/projects/1128 + +.. image:: https://sourceforge.net/p/ruamel-yaml/code/ci/default/tree/_doc/_static/license.svg?format=raw + :target: https://opensource.org/licenses/MIT + +.. image:: https://sourceforge.net/p/ruamel-yaml/code/ci/default/tree/_doc/_static/pypi.svg?format=raw + :target: https://pypi.org/project/ruamel.yaml/ + +.. image:: https://sourceforge.net/p/oitnb/code/ci/default/tree/_doc/_static/oitnb.svg?format=raw + :target: https://pypi.org/project/oitnb/ + +.. image:: http://www.mypy-lang.org/static/mypy_badge.svg + :target: http://mypy-lang.org/ + +ChangeLog +========= + +.. should insert NEXT: at the beginning of line for next key (with empty line) + +0.16.13 (2021-03-05): + - fix for issue 359: could not update() CommentedMap with keyword arguments + (reported by `Steve Franchak `__) + - fix for issue 365: unable to dump mutated TimeStamp objects + (reported by Anton Akmerov `__) + - fix for issue 371: unable to addd comment without starting space + (reported by 'Mark Grandi `__) + - fix for issue 373: recursive call to walk_tree not preserving all params + (reported by `eulores `__) + - a None value in a flow-style sequence is now dumped as `null` instead + of `!!null ''` (reported by mcarans on + `StackOverlow `__) + +0.16.12 (2020-09-04): + - update links in doc + +0.16.11 (2020-09-03): + - workaround issue with setuptools 0.50 and importing pip ( fix by jaraco + https://github.com/pypa/setuptools/issues/2355#issuecomment-685159580 ) + +0.16.10 (2020-02-12): + - (auto) updated image references in README to sourceforge + +0.16.9 (2020-02-11): + - update CHANGES + +0.16.8 (2020-02-11): + - update requirements so that ruamel.yaml.clib is installed for 3.8, + as it has become available (via manylinux builds) + +0.16.7 (2020-01-30): + - fix typchecking issue on TaggedScalar (reported by Jens Nielsen) + - fix error in dumping literal scalar in sequence with comments before element + (reported by `EJ Etherington `__) + +0.16.6 (2020-01-20): + - fix empty string mapping key roundtripping with preservation of quotes as `? ''` + (reported via email by Tomer Aharoni). + - fix incorrect state setting in class constructor (reported by `Douglas Raillard + `__) + - adjust deprecation warning test for Hashable, as that no longer warns (reported + by `Jason Montleon `__) + +0.16.5 (2019-08-18): + - allow for ``YAML(typ=['unsafe', 'pytypes'])`` + +0.16.4 (2019-08-16): + - fix output of TAG directives with # (reported by `Thomas Smith + `__) + + +0.16.3 (2019-08-15): + - split construct_object + - change stuff back to keep mypy happy + - move setting of version based on YAML directive to scanner, allowing to + check for file version during TAG directive scanning + +0.16.2 (2019-08-15): + - preserve YAML and TAG directives on roundtrip, correctly output # + in URL for YAML 1.2 (both reported by `Thomas Smith + `__) + +0.16.1 (2019-08-08): + - Force the use of new version of ruamel.yaml.clib (reported by `Alex Joz + `__) + - Allow '#' in tag URI as these are allowed in YAML 1.2 (reported by + `Thomas Smith + `__) + +0.16.0 (2019-07-25): + - split of C source that generates .so file to ruamel.yaml.clib + - duplicate keys are now an error when working with the old API as well + +0.15.100 (2019-07-17): + - fixing issue with dumping deep-copied data from commented YAML, by + providing both the memo parameter to __deepcopy__, and by allowing + startmarks to be compared on their content (reported by `Theofilos + Petsios + `__) + +0.15.99 (2019-07-12): + - add `py.typed` to distribution, based on a PR submitted by + `Michael Crusoe + `__ + - merge PR 40 (also by Michael Crusoe) to more accurately specify + repository in the README (also reported in a misunderstood issue + some time ago) + +0.15.98 (2019-07-09): + - regenerate ext/_ruamel_yaml.c with Cython version 0.29.12, needed + for Python 3.8.0b2 (reported by `John Vandenberg + `__) + +0.15.97 (2019-06-06): + - regenerate ext/_ruamel_yaml.c with Cython version 0.29.10, needed for + Python 3.8.0b1 + - regenerate ext/_ruamel_yaml.c with Cython version 0.29.9, needed for + Python 3.8.0a4 (reported by `Anthony Sottile + `__) + +0.15.96 (2019-05-16): + - fix failure to indent comments on round-trip anchored block style + scalars in block sequence (reported by `William Kimball + `__) + +0.15.95 (2019-05-16): + - fix failure to round-trip anchored scalars in block sequence + (reported by `William Kimball + `__) + - wheel files for Python 3.4 no longer provided (`Python 3.4 EOL 2019-03-18 + `__) + +0.15.94 (2019-04-23): + - fix missing line-break after end-of-file comments not ending in + line-break (reported by `Philip Thompson + `__) + +0.15.93 (2019-04-21): + - fix failure to parse empty implicit flow mapping key + - in YAML 1.1 plains scalars `y`, 'n', `Y`, and 'N' are now + correctly recognised as booleans and such strings dumped quoted + (reported by `Marcel Bollmann + `__) + +0.15.92 (2019-04-16): + - fix failure to parse empty implicit block mapping key (reported by + `Nolan W `__) + +0.15.91 (2019-04-05): + - allowing duplicate keys would not work for merge keys (reported by mamacdon on + `StackOverflow `__ + +0.15.90 (2019-04-04): + - fix issue with updating `CommentedMap` from list of tuples (reported by + `Peter Henry `__) + +0.15.89 (2019-02-27): + - fix for items with flow-mapping in block sequence output on single line + (reported by `Zahari Dim `__) + - fix for safe dumping erroring in creation of representereror when dumping namedtuple + (reported and solution by `Jaakko Kantojärvi `__) + +0.15.88 (2019-02-12): + - fix inclusing of python code from the subpackage data (containing extra tests, + reported by `Florian Apolloner `__) + +0.15.87 (2019-01-22): + - fix problem with empty lists and the code to reinsert merge keys (reported via email + by Zaloo) + +0.15.86 (2019-01-16): + - reinsert merge key in its old position (reported by grumbler on + `StackOverflow `__) + - fix for issue with non-ASCII anchor names (reported and fix + provided by Dandaleon Flux via email) + - fix for issue when parsing flow mapping value starting with colon (in pure Python only) + (reported by `FichteFoll `__) + +0.15.85 (2019-01-08): + - the types used by ``SafeConstructor`` for mappings and sequences can + now by set by assigning to ``XXXConstructor.yaml_base_dict_type`` + (and ``..._list_type``), preventing the need to copy two methods + with 50+ lines that had ``var = {}`` hardcoded. (Implemented to + help solve an feature request by `Anthony Sottile + `__ in an easier way) + +0.15.84 (2019-01-07): + - fix for ``CommentedMap.copy()`` not returning ``CommentedMap``, let alone copying comments etc. + (reported by `Anthony Sottile `__) + +0.15.83 (2019-01-02): + - fix for bug in roundtripping aliases used as key (reported via email by Zaloo) + +0.15.82 (2018-12-28): + - anchors and aliases on scalar int, float, string and bool are now preserved. Anchors + do not need a referring alias for these (reported by + `Alex Harvey `__) + - anchors no longer lost on tagged objects when roundtripping (reported by `Zaloo + `__) + +0.15.81 (2018-12-06): + - fix issue dumping methods of metaclass derived classes (reported and fix provided + by `Douglas Raillard `__) + +0.15.80 (2018-11-26): + - fix issue emitting BEL character when round-tripping invalid folded input + (reported by Isaac on `StackOverflow `__) + +0.15.79 (2018-11-21): + - fix issue with anchors nested deeper than alias (reported by gaFF on + `StackOverflow `__) + +0.15.78 (2018-11-15): + - fix setup issue for 3.8 (reported by `Sidney Kuyateh + `__) + +0.15.77 (2018-11-09): + - setting `yaml.sort_base_mapping_type_on_output = False`, will prevent + explicit sorting by keys in the base representer of mappings. Roundtrip + already did not do this. Usage only makes real sense for Python 3.6+ + (feature request by `Sebastian Gerber `__). + - implement Python version check in YAML metadata in ``_test/test_z_data.py`` + +0.15.76 (2018-11-01): + - fix issue with empty mapping and sequence loaded as flow-style + (mapping reported by `Min RK `__, sequence + by `Maged Ahmed `__) + +0.15.75 (2018-10-27): + - fix issue with single '?' scalar (reported by `Terrance + `__) + - fix issue with duplicate merge keys (prompted by `answering + `__ a + `StackOverflow question `__ + by `math `__) + +0.15.74 (2018-10-17): + - fix dropping of comment on rt before sequence item that is sequence item + (reported by `Thorsten Kampe `__) + +0.15.73 (2018-10-16): + - fix irregular output on pre-comment in sequence within sequence (reported + by `Thorsten Kampe `__) + - allow non-compact (i.e. next line) dumping sequence/mapping within sequence. + +0.15.72 (2018-10-06): + - fix regression on explicit 1.1 loading with the C based scanner/parser + (reported by `Tomas Vavra `__) + +0.15.71 (2018-09-26): + - some of the tests now live in YAML files in the + `yaml.data `__ repository. + ``_test/test_z_data.py`` processes these. + - fix regression where handcrafted CommentedMaps could not be initiated (reported by + `Dan Helfman `__) + - fix regression with non-root literal scalars that needed indent indicator + (reported by `Clark Breyman `__) + - tag:yaml.org,2002:python/object/apply now also uses __qualname__ on PY3 + (reported by `Douglas RAILLARD `__) + - issue with self-referring object creation + (reported and fix by `Douglas RAILLARD `__) + +0.15.70 (2018-09-21): + - reverted CommentedMap and CommentedSeq to subclass ordereddict resp. list, + reimplemented merge maps so that both ``dict(**commented_map_instance)`` and JSON + dumping works. This also allows checking with ``isinstance()`` on ``dict`` resp. ``list``. + (Proposed by `Stuart Berg `__, with feedback + from `blhsing `__ on + `StackOverflow `__) + +0.15.69 (2018-09-20): + - fix issue with dump_all gobbling end-of-document comments on parsing + (reported by `Pierre B. `__) + +0.15.68 (2018-09-20): + - fix issue with parsabel, but incorrect output with nested flow-style sequences + (reported by `Dougal Seeley `__) + - fix issue with loading Python objects that have __setstate__ and recursion in parameters + (reported by `Douglas RAILLARD `__) + +0.15.67 (2018-09-19): + - fix issue with extra space inserted with non-root literal strings + (Issue reported and PR with fix provided by + `Naomi Seyfer `__.) + +0.15.66 (2018-09-07): + - fix issue with fold indicating characters inserted in safe_load-ed folded strings + (reported by `Maximilian Hils `__). + +0.15.65 (2018-09-07): + - fix issue #232 revert to throw ParserError for unexcpected ``]`` + and ``}`` instead of IndexError. (Issue reported and PR with fix + provided by `Naomi Seyfer `__.) + - added ``key`` and ``reverse`` parameter (suggested by Jannik Klemm via email) + - indent root level literal scalars that have directive or document end markers + at the beginning of a line + +0.15.64 (2018-08-30): + - support round-trip of tagged sequences: ``!Arg [a, {b: 1}]`` + - single entry mappings in flow sequences now written by default without braces, + set ``yaml.brace_single_entry_mapping_in_flow_sequence=True`` to force + getting ``[a, {b: 1}, {c: {d: 2}}]`` instead of the default ``[a, b: 1, c: {d: 2}]`` + - fix issue when roundtripping floats starting with a dot such as ``.5`` + (reported by `Harrison Gregg `__) + +0.15.63 (2018-08-29): + - small fix only necessary for Windows users that don't use wheels. + +0.15.62 (2018-08-29): + - C based reader/scanner & emitter now allow setting of 1.2 as YAML version. + ** The loading/dumping is still YAML 1.1 code**, so use the common subset of + YAML 1.2 and 1.1 (reported by `Ge Yang `__) + +0.15.61 (2018-08-23): + - support for round-tripping folded style scalars (initially requested + by `Johnathan Viduchinsky `__) + - update of C code + - speed up of scanning (~30% depending on the input) + +0.15.60 (2018-08-18): + - again allow single entry map in flow sequence context (reported by + `Lee Goolsbee `__) + - cleanup for mypy + - spurious print in library (reported by + `Lele Gaifax `__), now automatically checked + +0.15.59 (2018-08-17): + - issue with C based loader and leading zeros (reported by + `Tom Hamilton Stubber `__) + +0.15.58 (2018-08-17): + - simple mappings can now be used as keys when round-tripping:: + + {a: 1, b: 2}: hello world + + although using the obvious operations (del, popitem) on the key will + fail, you can mutilate it by going through its attributes. If you load the + above YAML in `d`, then changing the value is cumbersome: + + d = {CommentedKeyMap([('a', 1), ('b', 2)]): "goodbye"} + + and changing the key even more so: + + d[CommentedKeyMap([('b', 1), ('a', 2)])] = d.pop( + CommentedKeyMap([('a', 1), ('b', 2)])) + + (you can use a `dict` instead of a list of tuples (or ordereddict), but that might result + in a different order, of the keys of the key, in the output) + - check integers to dump with 1.2 patterns instead of 1.1 (reported by + `Lele Gaifax `__) + + +0.15.57 (2018-08-15): + - Fix that CommentedSeq could no longer be used in adding or do a sort + (reported by `Christopher Wright `__) + +0.15.56 (2018-08-15): + - fix issue with ``python -O`` optimizing away code (reported, and detailed cause + pinpointed, by `Alex Grönholm `__) + +0.15.55 (2018-08-14): + - unmade ``CommentedSeq`` a subclass of ``list``. It is now + indirectly a subclass of the standard + ``collections.abc.MutableSequence`` (without .abc if you are + still on Python2.7). If you do ``isinstance(yaml.load('[1, 2]'), + list)``) anywhere in your code replace ``list`` with + ``MutableSequence``. Directly, ``CommentedSeq`` is a subclass of + the abstract baseclass ``ruamel.yaml.compat.MutableScliceableSequence``, + with the result that *(extended) slicing is supported on + ``CommentedSeq``*. + (reported by `Stuart Berg `__) + - duplicate keys (or their values) with non-ascii now correctly + report in Python2, instead of raising a Unicode error. + (Reported by `Jonathan Pyle `__) + +0.15.54 (2018-08-13): + - fix issue where a comment could pop-up twice in the output (reported by + `Mike Kazantsev `__ and by + `Nate Peterson `__) + - fix issue where JSON object (mapping) without spaces was not parsed + properly (reported by `Marc Schmidt `__) + - fix issue where comments after empty flow-style mappings were not emitted + (reported by `Qinfench Chen `__) + +0.15.53 (2018-08-12): + - fix issue with flow style mapping with comments gobbled newline (reported + by `Christopher Lambert `__) + - fix issue where single '+' under YAML 1.2 was interpreted as + integer, erroring out (reported by `Jethro Yu + `__) + +0.15.52 (2018-08-09): + - added `.copy()` mapping representation for round-tripping + (``CommentedMap``) to fix incomplete copies of merged mappings + (reported by `Will Richards + `__) + - Also unmade that class a subclass of ordereddict to solve incorrect behaviour + for ``{**merged-mapping}`` and ``dict(**merged-mapping)`` (reported independently by + `Tim Olsson `__ and + `Filip Matzner `__) + +0.15.51 (2018-08-08): + - Fix method name dumps (were not dotted) and loads (reported by `Douglas Raillard + `__) + - Fix spurious trailing white-space caused when the comment start + column was no longer reached and there was no actual EOL comment + (e.g. following empty line) and doing substitutions, or when + quotes around scalars got dropped. (reported by `Thomas Guillet + `__) + +0.15.50 (2018-08-05): + - Allow ``YAML()`` as a context manager for output, thereby making it much easier + to generate multi-documents in a stream. + - Fix issue with incorrect type information for `load()` and `dump()` (reported + by `Jimbo Jim `__) + +0.15.49 (2018-08-05): + - fix preservation of leading newlines in root level literal style scalar, + and preserve comment after literal style indicator (``| # some comment``) + Both needed for round-tripping multi-doc streams in + `ryd `__. + +0.15.48 (2018-08-03): + - housekeeping: ``oitnb`` for formatting, mypy 0.620 upgrade and conformity + +0.15.47 (2018-07-31): + - fix broken 3.6 manylinux1, the result of an unclean ``build`` (reported by + `Roman Sichnyi `__) + + +0.15.46 (2018-07-29): + - fixed DeprecationWarning for importing from ``collections`` on 3.7 + (issue 210, reported by `Reinoud Elhorst + `__). It was `difficult to find + why tox/pytest did not report + `__ and as time + consuming to actually `fix + `__ the tests. + +0.15.45 (2018-07-26): + - After adding failing test for ``YAML.load_all(Path())``, remove StopIteration + (PR provided by `Zachary Buhman `__, + also reported by `Steven Hiscocks `__. + +0.15.44 (2018-07-14): + - Correct loading plain scalars consisting of numerals only and + starting with `0`, when not explicitly specifying YAML version + 1.1. This also fixes the issue about dumping string `'019'` as + plain scalars as reported by `Min RK + `__, that prompted this chance. + +0.15.43 (2018-07-12): + - merge PR33: Python2.7 on Windows is narrow, but has no + ``sysconfig.get_config_var('Py_UNICODE_SIZE')``. (merge provided by + `Marcel Bargull `__) + - ``register_class()`` now returns class (proposed by + `Mike Nerone `__} + +0.15.42 (2018-07-01): + - fix regression showing only on narrow Python 2.7 (py27mu) builds + (with help from + `Marcel Bargull `__ and + `Colm O'Connor `__). + - run pre-commit ``tox`` on Python 2.7 wide and narrow, as well as + 3.4/3.5/3.6/3.7/pypy + +0.15.41 (2018-06-27): + - add detection of C-compile failure (investigation prompted by + `StackOverlow `__ by + `Emmanuel Blot `__), + which was removed while no longer dependent on ``libyaml``, C-extensions + compilation still needs a compiler though. + +0.15.40 (2018-06-18): + - added links to landing places as suggested in issue 190 by + `KostisA `__ + - fixes issue #201: decoding unicode escaped tags on Python2, reported + by `Dan Abolafia `__ + +0.15.39 (2018-06-17): + - merge PR27 improving package startup time (and loading when regexp not + actually used), provided by + `Marcel Bargull `__ + +0.15.38 (2018-06-13): + - fix for losing precision when roundtripping floats by + `Rolf Wojtech `__ + - fix for hardcoded dir separator not working for Windows by + `Nuno André `__ + - typo fix by `Andrey Somov `__ + +0.15.37 (2018-03-21): + - again trying to create installable files for 187 + +0.15.36 (2018-02-07): + - fix issue 187, incompatibility of C extension with 3.7 (reported by + Daniel Blanchard) + +0.15.35 (2017-12-03): + - allow ``None`` as stream when specifying ``transform`` parameters to + ``YAML.dump()``. + This is useful if the transforming function doesn't return a meaningful value + (inspired by `StackOverflow `__ by + `rsaw `__). + +0.15.34 (2017-09-17): + - fix for issue 157: CDumper not dumping floats (reported by Jan Smitka) + +0.15.33 (2017-08-31): + - support for "undefined" round-tripping tagged scalar objects (in addition to + tagged mapping object). Inspired by a use case presented by Matthew Patton + on `StackOverflow `__. + - fix issue 148: replace cryptic error message when using !!timestamp with an + incorrectly formatted or non- scalar. Reported by FichteFoll. + +0.15.32 (2017-08-21): + - allow setting ``yaml.default_flow_style = None`` (default: ``False``) for + for ``typ='rt'``. + - fix for issue 149: multiplications on ``ScalarFloat`` now return ``float`` + (reported by jan.brezina@tul.cz) + +0.15.31 (2017-08-15): + - fix Comment dumping + +0.15.30 (2017-08-14): + - fix for issue with "compact JSON" not parsing: ``{"in":{},"out":{}}`` + (reported on `StackOverflow `__ by + `mjalkio `_ + +0.15.29 (2017-08-14): + - fix issue #51: different indents for mappings and sequences (reported by + Alex Harvey) + - fix for flow sequence/mapping as element/value of block sequence with + sequence-indent minus dash-offset not equal two. + +0.15.28 (2017-08-13): + - fix issue #61: merge of merge cannot be __repr__-ed (reported by Tal Liron) + +0.15.27 (2017-08-13): + - fix issue 62, YAML 1.2 allows ``?`` and ``:`` in plain scalars if non-ambigious + (reported by nowox) + - fix lists within lists which would make comments disappear + +0.15.26 (2017-08-10): + - fix for disappearing comment after empty flow sequence (reported by + oit-tzhimmash) + +0.15.25 (2017-08-09): + - fix for problem with dumping (unloaded) floats (reported by eyenseo) + +0.15.24 (2017-08-09): + - added ScalarFloat which supports roundtripping of 23.1, 23.100, + 42.00E+56, 0.0, -0.0 etc. while keeping the format. Underscores in mantissas + are not preserved/supported (yet, is anybody using that?). + - (finally) fixed longstanding issue 23 (reported by `Antony Sottile + `__), now handling comment between block + mapping key and value correctly + - warn on YAML 1.1 float input that is incorrect (triggered by invalid YAML + provided by Cecil Curry) + - allow setting of boolean representation (`false`, `true`) by using: + ``yaml.boolean_representation = [u'False', u'True']`` + +0.15.23 (2017-08-01): + - fix for round_tripping integers on 2.7.X > sys.maxint (reported by ccatterina) + +0.15.22 (2017-07-28): + - fix for round_tripping singe excl. mark tags doubling (reported and fix by Jan Brezina) + +0.15.21 (2017-07-25): + - fix for writing unicode in new API, (reported on + `StackOverflow `__ + +0.15.20 (2017-07-23): + - wheels for windows including C extensions + +0.15.19 (2017-07-13): + - added object constructor for rt, decorator ``yaml_object`` to replace YAMLObject. + - fix for problem using load_all with Path() instance + - fix for load_all in combination with zero indent block style literal + (``pure=True`` only!) + +0.15.18 (2017-07-04): + - missing ``pure`` attribute on ``YAML`` useful for implementing `!include` tag + constructor for `including YAML files in a YAML file + `__ + - some documentation improvements + - trigger of doc build on new revision + +0.15.17 (2017-07-03): + - support for Unicode supplementary Plane **output** + (input was already supported, triggered by + `this `__ Stack Overflow Q&A) + +0.15.16 (2017-07-01): + - minor typing issues (reported and fix provided by + `Manvendra Singh `__ + - small doc improvements + +0.15.15 (2017-06-27): + - fix for issue 135, typ='safe' not dumping in Python 2.7 + (reported by Andrzej Ostrowski `__) + +0.15.14 (2017-06-25): + - fix for issue 133, in setup.py: change ModuleNotFoundError to + ImportError (reported and fix by + `Asley Drake `__) + +0.15.13 (2017-06-24): + - suppress duplicate key warning on mappings with merge keys (reported by + Cameron Sweeney) + +0.15.12 (2017-06-24): + - remove fatal dependency of setup.py on wheel package (reported by + Cameron Sweeney) + +0.15.11 (2017-06-24): + - fix for issue 130, regression in nested merge keys (reported by + `David Fee `__) + +0.15.10 (2017-06-23): + - top level PreservedScalarString not indented if not explicitly asked to + - remove Makefile (not very useful anyway) + - some mypy additions + +0.15.9 (2017-06-16): + - fix for issue 127: tagged scalars were always quoted and seperated + by a newline when in a block sequence (reported and largely fixed by + `Tommy Wang `__) + +0.15.8 (2017-06-15): + - allow plug-in install via ``install ruamel.yaml[jinja2]`` + +0.15.7 (2017-06-14): + - add plug-in mechanism for load/dump pre resp. post-processing + +0.15.6 (2017-06-10): + - a set() with duplicate elements now throws error in rt loading + - support for toplevel column zero literal/folded scalar in explicit documents + +0.15.5 (2017-06-08): + - repeat `load()` on a single `YAML()` instance would fail. + +0.15.4 (2017-06-08): + - `transform` parameter on dump that expects a function taking a + string and returning a string. This allows transformation of the output + before it is written to stream. This forces creation of the complete output in memory! + - some updates to the docs + +0.15.3 (2017-06-07): + - No longer try to compile C extensions on Windows. Compilation can be forced by setting + the environment variable `RUAMEL_FORCE_EXT_BUILD` to some value + before starting the `pip install`. + +0.15.2 (2017-06-07): + - update to conform to mypy 0.511: mypy --strict + +0.15.1 (2017-06-07): + - `duplicate keys `__ + in mappings generate an error (in the old API this change generates a warning until 0.16) + - dependecy on ruamel.ordereddict for 2.7 now via extras_require + +0.15.0 (2017-06-04): + - it is now allowed to pass in a ``pathlib.Path`` as "stream" parameter to all + load/dump functions + - passing in a non-supported object (e.g. a string) as "stream" will result in a + much more meaningful YAMLStreamError. + - assigning a normal string value to an existing CommentedMap key or CommentedSeq + element will result in a value cast to the previous value's type if possible. + - added ``YAML`` class for new API + +0.14.12 (2017-05-14): + - fix for issue 119, deepcopy not returning subclasses (reported and PR by + Constantine Evans ) + +0.14.11 (2017-05-01): + - fix for issue 103 allowing implicit documents after document end marker line (``...``) + in YAML 1.2 + +0.14.10 (2017-04-26): + - fix problem with emitting using cyaml + +0.14.9 (2017-04-22): + - remove dependency on ``typing`` while still supporting ``mypy`` + (http://stackoverflow.com/a/43516781/1307905) + - fix unclarity in doc that stated 2.6 is supported (reported by feetdust) + +0.14.8 (2017-04-19): + - fix Text not available on 3.5.0 and 3.5.1, now proactively setting version guards + on all files (reported by `João Paulo Magalhães `__) + +0.14.7 (2017-04-18): + - round trip of integers (decimal, octal, hex, binary) now preserve + leading zero(s) padding and underscores. Underscores are presumed + to be at regular distances (i.e. ``0o12_345_67`` dumps back as + ``0o1_23_45_67`` as the space from the last digit to the + underscore before that is the determining factor). + +0.14.6 (2017-04-14): + - binary, octal and hex integers are now preserved by default. This + was a known deficiency. Working on this was prompted by the issue report (112) + from devnoname120, as well as the additional experience with `.replace()` + on `scalarstring` classes. + - fix issues 114: cannot install on Buildozer (reported by mixmastamyk). + Setting env. var ``RUAMEL_NO_PIP_INSTALL_CHECK`` will suppress ``pip``-check. + +0.14.5 (2017-04-04): + - fix issue 109: None not dumping correctly at top level (reported by Andrea Censi) + - fix issue 110: .replace on Preserved/DoubleQuoted/SingleQuoted ScalarString + would give back "normal" string (reported by sandres23) + +0.14.4 (2017-03-31): + - fix readme + +0.14.3 (2017-03-31): + - fix for 0o52 not being a string in YAML 1.1 (reported on + `StackOverflow Q&A 43138503 `__ by + `Frank D `__) + +0.14.2 (2017-03-23): + - fix for old default pip on Ubuntu 14.04 (reported by Sébastien Maccagnoni-Munch) + +0.14.1 (2017-03-22): + - fix Text not available on 3.5.0 and 3.5.1 (reported by Charles Bouchard-Légaré) + +0.14.0 (2017-03-21): + - updates for mypy --strict + - preparation for moving away from inheritance in Loader and Dumper, calls from e.g. + the Representer to the Serializer.serialize() are now done via the attribute + .serializer.serialize(). Usage of .serialize() outside of Serializer will be + deprecated soon + - some extra tests on main.py functions + +---- + +For older changes see the file +`CHANGES `_ + + diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel.yaml-0.16.13.dist-info/RECORD b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel.yaml-0.16.13.dist-info/RECORD new file mode 100644 index 0000000000..3158ffc8f9 --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel.yaml-0.16.13.dist-info/RECORD @@ -0,0 +1,66 @@ +ruamel.yaml-0.16.13-py3.8-nspkg.pth,sha256=REN23ka76qAVtiuuP-WSrHAD4leicUsFB_AVCDfRe8U,539 +ruamel.yaml-0.16.13.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +ruamel.yaml-0.16.13.dist-info/LICENSE,sha256=wjyOB0soSsZk6bvkLuDrECh_0MViEw8Wlpb0UqCqVIU,1121 +ruamel.yaml-0.16.13.dist-info/METADATA,sha256=aO-BSNc5uLDwa1bC02EcSHf_Fr1gyhCL51tA98ZSd5g,36293 +ruamel.yaml-0.16.13.dist-info/RECORD,, +ruamel.yaml-0.16.13.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +ruamel.yaml-0.16.13.dist-info/WHEEL,sha256=kGT74LWyRUZrL4VgLh6_g12IeVl_9u9ZVhadrgXZUEY,110 +ruamel.yaml-0.16.13.dist-info/namespace_packages.txt,sha256=lu5ar9ilvyS03jNvS5x9I0_3NwCKkvIlY2k0QH9AArk,7 +ruamel.yaml-0.16.13.dist-info/top_level.txt,sha256=lu5ar9ilvyS03jNvS5x9I0_3NwCKkvIlY2k0QH9AArk,7 +ruamel/yaml/__init__.py,sha256=xElolpcdkbJ9aVecGFN3lSCt3ERtN0Fpp6PiOckm_w8,2160 +ruamel/yaml/__pycache__/__init__.cpython-36.pyc,, +ruamel/yaml/__pycache__/anchor.cpython-36.pyc,, +ruamel/yaml/__pycache__/comments.cpython-36.pyc,, +ruamel/yaml/__pycache__/compat.cpython-36.pyc,, +ruamel/yaml/__pycache__/composer.cpython-36.pyc,, +ruamel/yaml/__pycache__/configobjwalker.cpython-36.pyc,, +ruamel/yaml/__pycache__/constructor.cpython-36.pyc,, +ruamel/yaml/__pycache__/cyaml.cpython-36.pyc,, +ruamel/yaml/__pycache__/dumper.cpython-36.pyc,, +ruamel/yaml/__pycache__/emitter.cpython-36.pyc,, +ruamel/yaml/__pycache__/error.cpython-36.pyc,, +ruamel/yaml/__pycache__/events.cpython-36.pyc,, +ruamel/yaml/__pycache__/loader.cpython-36.pyc,, +ruamel/yaml/__pycache__/main.cpython-36.pyc,, +ruamel/yaml/__pycache__/nodes.cpython-36.pyc,, +ruamel/yaml/__pycache__/parser.cpython-36.pyc,, +ruamel/yaml/__pycache__/reader.cpython-36.pyc,, +ruamel/yaml/__pycache__/representer.cpython-36.pyc,, +ruamel/yaml/__pycache__/resolver.cpython-36.pyc,, +ruamel/yaml/__pycache__/scalarbool.cpython-36.pyc,, +ruamel/yaml/__pycache__/scalarfloat.cpython-36.pyc,, +ruamel/yaml/__pycache__/scalarint.cpython-36.pyc,, +ruamel/yaml/__pycache__/scalarstring.cpython-36.pyc,, +ruamel/yaml/__pycache__/scanner.cpython-36.pyc,, +ruamel/yaml/__pycache__/serializer.cpython-36.pyc,, +ruamel/yaml/__pycache__/timestamp.cpython-36.pyc,, +ruamel/yaml/__pycache__/tokens.cpython-36.pyc,, +ruamel/yaml/__pycache__/util.cpython-36.pyc,, +ruamel/yaml/anchor.py,sha256=nuwuT1qRhXm1qw8sGrkJXyS83Z1V2y8s3CfBcVHOcFw,500 +ruamel/yaml/comments.py,sha256=-alQQy-DkutBSleoccs3fsjYnOhmNMYAX4VlTpvOL4k,35198 +ruamel/yaml/compat.py,sha256=b7Oo6_9etUsHTOwJYwqbtdis0PAgfLPX9_RPCp6bSY4,8720 +ruamel/yaml/composer.py,sha256=1Qq_e2UHJ7kg0hdFlZysckudMNCL9AbPxpafM208LqU,8292 +ruamel/yaml/configobjwalker.py,sha256=ceGXcllWyXuj3ZMlx250qcyKtWEQFCZzHv2U0zxGbGk,342 +ruamel/yaml/constructor.py,sha256=CfrevtL518frqAnq7UFstLeS-WoChtC_W7lxVXc6MtE,70520 +ruamel/yaml/cyaml.py,sha256=D7lSKxk_eJf4V4A1wlc7a7h_XGuKA_1x4lKD-cce3g8,6584 +ruamel/yaml/dumper.py,sha256=mneJV-_kKccjquDOVo4TGVpsx7w6bPadp9sw2h5WkLw,6625 +ruamel/yaml/emitter.py,sha256=L0SrncZ7rKWuwPfV3peJgfnB7QDyOAAKB1oXKi5F_T8,64430 +ruamel/yaml/error.py,sha256=4uu2Nzj8h8lP59tMJZA5g7HAHQosscj-TU6ihCh6gtg,8979 +ruamel/yaml/events.py,sha256=0WBCPgpFzv4MKDN7ijApePTP58bxzTNM7Me1Hi0HA8g,3902 +ruamel/yaml/loader.py,sha256=Ke8uCiUocDZcooIzB7-GhqywIteVnIQ3hhG9qDLihQk,2951 +ruamel/yaml/main.py,sha256=lAjb9ovyGlasu5x5DriDM9YFeq-ILtHepZoVIdVfUcA,54126 +ruamel/yaml/nodes.py,sha256=KglOPI3ex9RvVCOm7CPtkPkdeYQXKLYbd2Ji_siaDJ0,3716 +ruamel/yaml/parser.py,sha256=dkBWBKfuVxuuUd7yFNR1emYz7AMGR-Ratlz8uxCVXxI,33245 +ruamel/yaml/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +ruamel/yaml/reader.py,sha256=qu3kzrVpxDxvTyOyFjFUTbmq__hBauNFw5HTOSYJTCY,10876 +ruamel/yaml/representer.py,sha256=-AQjn4j6gxfxht8yIeoRdQuUkai4EHVsPfQKHukrXlw,48729 +ruamel/yaml/resolver.py,sha256=k8SuJkqeomSc_fEKJivVRaO0E19cLTZN1yTlsLZk0c0,15341 +ruamel/yaml/scalarbool.py,sha256=hob48OhRenryWj5HZp4500KUn7OXnxsadS5trKMPEok,1531 +ruamel/yaml/scalarfloat.py,sha256=I-WILi1s_-XfW3hZy_FrMeZavJzNx0caZ7fICm5Akzw,4406 +ruamel/yaml/scalarint.py,sha256=vhHYXeOsevwb6S99Xgt9Qweu-OkOjFWVD3HC3HuYO8s,4462 +ruamel/yaml/scalarstring.py,sha256=xJnrp7aUbI4l_nJIF1C__J59h-71bM5b2uIXln3x-Fs,4536 +ruamel/yaml/scanner.py,sha256=f_QQBVGlp1ZU0U172s31Ry8a2Ao1frXNKtbPNlY0wqA,72192 +ruamel/yaml/serializer.py,sha256=waa4VLbKgacMiwYVhgbaK-EmRAygEvBEC7aPReWujL0,8412 +ruamel/yaml/timestamp.py,sha256=c07UAzB4HcTZqC4NADL-gLVCOa9byI8gqJhIYk9UbtQ,1792 +ruamel/yaml/tokens.py,sha256=z1gPCgyz7dhdBdKKK3UPTw_EAwELaPRRExMa9KIV6q8,7471 +ruamel/yaml/util.py,sha256=B_SnRV9VV7OKLAVolZqepFLqAhnpJRWNrgdrKDkvEao,6127 diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel.yaml-0.16.13.dist-info/REQUESTED b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel.yaml-0.16.13.dist-info/REQUESTED new file mode 100644 index 0000000000..e69de29bb2 diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel.yaml-0.16.13.dist-info/WHEEL b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel.yaml-0.16.13.dist-info/WHEEL new file mode 100644 index 0000000000..ef99c6cf32 --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel.yaml-0.16.13.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.34.2) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/__init__.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/__init__.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/__init__.py new file mode 100644 index 0000000000..ae058138a5 --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/__init__.py @@ -0,0 +1,59 @@ +# coding: utf-8 + +from __future__ import print_function, absolute_import, division, unicode_literals + +if False: # MYPY + from typing import Dict, Any # NOQA + +_package_data = dict( + full_package_name='ruamel.yaml', + version_info=(0, 16, 13), + __version__='0.16.13', + author='Anthon van der Neut', + author_email='a.van.der.neut@ruamel.eu', + description='ruamel.yaml is a YAML parser/emitter that supports roundtrip preservation of comments, seq/map flow style, and map key order', # NOQA + entry_points=None, + since=2014, + extras_require={ + ':platform_python_implementation=="CPython" and python_version<="2.7"': ['ruamel.ordereddict'], # NOQA + ':platform_python_implementation=="CPython" and python_version<"3.10"': ['ruamel.yaml.clib>=0.1.2'], # NOQA + 'jinja2': ['ruamel.yaml.jinja2>=0.2'], + 'docs': ['ryd'], + }, + classifiers=[ + 'Programming Language :: Python :: 2.7', + 'Programming Language :: Python :: 3.5', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: Implementation :: CPython', + 'Programming Language :: Python :: Implementation :: PyPy', + 'Programming Language :: Python :: Implementation :: Jython', + 'Topic :: Software Development :: Libraries :: Python Modules', + 'Topic :: Text Processing :: Markup', + 'Typing :: Typed', + ], + keywords='yaml 1.2 parser round-trip preserve quotes order config', + read_the_docs='yaml', + supported=[(2, 7), (3, 5)], # minimum + tox=dict( + env='*', # remove 'pn', no longer test narrow Python 2.7 for unicode patterns and PyPy + deps='ruamel.std.pathlib', + fl8excl='_test/lib', + ), + universal=True, + rtfd='yaml', +) # type: Dict[Any, Any] + + +version_info = _package_data['version_info'] +__version__ = _package_data['__version__'] + +try: + from .cyaml import * # NOQA + + __with_libyaml__ = True +except (ImportError, ValueError): # for Jython + __with_libyaml__ = False + +from ...ruamel.yaml.main import * # NOQA diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/anchor.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/anchor.py new file mode 100644 index 0000000000..d702126039 --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/anchor.py @@ -0,0 +1,19 @@ +if False: # MYPY + from typing import Any, Dict, Optional, List, Union, Optional, Iterator # NOQA + +anchor_attrib = '_yaml_anchor' + + +class Anchor(object): + __slots__ = 'value', 'always_dump' + attrib = anchor_attrib + + def __init__(self): + # type: () -> None + self.value = None + self.always_dump = False + + def __repr__(self): + # type: () -> Any + ad = ', (always dump)' if self.always_dump else "" + return 'Anchor({!r}{})'.format(self.value, ad) diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/comments.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/comments.py new file mode 100644 index 0000000000..070597c2d6 --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/comments.py @@ -0,0 +1,1154 @@ +# coding: utf-8 + +from __future__ import absolute_import, print_function + +""" +stuff to deal with comments and formatting on dict/list/ordereddict/set +these are not really related, formatting could be factored out as +a separate base +""" + +import sys +import copy + + +from ...ruamel.yaml.compat import ordereddict # type: ignore +from ...ruamel.yaml.compat import PY2, string_types, MutableSliceableSequence +from ...ruamel.yaml.scalarstring import ScalarString +from ...ruamel.yaml.anchor import Anchor + +if PY2: + from collections import MutableSet, Sized, Set, Mapping +else: + from collections.abc import MutableSet, Sized, Set, Mapping + +if False: # MYPY + from typing import Any, Dict, Optional, List, Union, Optional, Iterator # NOQA + +# fmt: off +__all__ = ['CommentedSeq', 'CommentedKeySeq', + 'CommentedMap', 'CommentedOrderedMap', + 'CommentedSet', 'comment_attrib', 'merge_attrib'] +# fmt: on + +comment_attrib = '_yaml_comment' +format_attrib = '_yaml_format' +line_col_attrib = '_yaml_line_col' +merge_attrib = '_yaml_merge' +tag_attrib = '_yaml_tag' + + +class Comment(object): + # sys.getsize tested the Comment objects, __slots__ makes them bigger + # and adding self.end did not matter + __slots__ = 'comment', '_items', '_end', '_start' + attrib = comment_attrib + + def __init__(self): + # type: () -> None + self.comment = None # [post, [pre]] + # map key (mapping/omap/dict) or index (sequence/list) to a list of + # dict: post_key, pre_key, post_value, pre_value + # list: pre item, post item + self._items = {} # type: Dict[Any, Any] + # self._start = [] # should not put these on first item + self._end = [] # type: List[Any] # end of document comments + + def __str__(self): + # type: () -> str + if bool(self._end): + end = ',\n end=' + str(self._end) + else: + end = "" + return 'Comment(comment={0},\n items={1}{2})'.format(self.comment, self._items, end) + + @property + def items(self): + # type: () -> Any + return self._items + + @property + def end(self): + # type: () -> Any + return self._end + + @end.setter + def end(self, value): + # type: (Any) -> None + self._end = value + + @property + def start(self): + # type: () -> Any + return self._start + + @start.setter + def start(self, value): + # type: (Any) -> None + self._start = value + + +# to distinguish key from None +def NoComment(): + # type: () -> None + pass + + +class Format(object): + __slots__ = ('_flow_style',) + attrib = format_attrib + + def __init__(self): + # type: () -> None + self._flow_style = None # type: Any + + def set_flow_style(self): + # type: () -> None + self._flow_style = True + + def set_block_style(self): + # type: () -> None + self._flow_style = False + + def flow_style(self, default=None): + # type: (Optional[Any]) -> Any + """if default (the flow_style) is None, the flow style tacked on to + the object explicitly will be taken. If that is None as well the + default flow style rules the format down the line, or the type + of the constituent values (simple -> flow, map/list -> block)""" + if self._flow_style is None: + return default + return self._flow_style + + +class LineCol(object): + attrib = line_col_attrib + + def __init__(self): + # type: () -> None + self.line = None + self.col = None + self.data = None # type: Optional[Dict[Any, Any]] + + def add_kv_line_col(self, key, data): + # type: (Any, Any) -> None + if self.data is None: + self.data = {} + self.data[key] = data + + def key(self, k): + # type: (Any) -> Any + return self._kv(k, 0, 1) + + def value(self, k): + # type: (Any) -> Any + return self._kv(k, 2, 3) + + def _kv(self, k, x0, x1): + # type: (Any, Any, Any) -> Any + if self.data is None: + return None + data = self.data[k] + return data[x0], data[x1] + + def item(self, idx): + # type: (Any) -> Any + if self.data is None: + return None + return self.data[idx][0], self.data[idx][1] + + def add_idx_line_col(self, key, data): + # type: (Any, Any) -> None + if self.data is None: + self.data = {} + self.data[key] = data + + +class Tag(object): + """store tag information for roundtripping""" + + __slots__ = ('value',) + attrib = tag_attrib + + def __init__(self): + # type: () -> None + self.value = None + + def __repr__(self): + # type: () -> Any + return '{0.__class__.__name__}({0.value!r})'.format(self) + + +class CommentedBase(object): + @property + def ca(self): + # type: () -> Any + if not hasattr(self, Comment.attrib): + setattr(self, Comment.attrib, Comment()) + return getattr(self, Comment.attrib) + + def yaml_end_comment_extend(self, comment, clear=False): + # type: (Any, bool) -> None + if comment is None: + return + if clear or self.ca.end is None: + self.ca.end = [] + self.ca.end.extend(comment) + + def yaml_key_comment_extend(self, key, comment, clear=False): + # type: (Any, Any, bool) -> None + r = self.ca._items.setdefault(key, [None, None, None, None]) + if clear or r[1] is None: + if comment[1] is not None: + assert isinstance(comment[1], list) + r[1] = comment[1] + else: + r[1].extend(comment[0]) + r[0] = comment[0] + + def yaml_value_comment_extend(self, key, comment, clear=False): + # type: (Any, Any, bool) -> None + r = self.ca._items.setdefault(key, [None, None, None, None]) + if clear or r[3] is None: + if comment[1] is not None: + assert isinstance(comment[1], list) + r[3] = comment[1] + else: + r[3].extend(comment[0]) + r[2] = comment[0] + + def yaml_set_start_comment(self, comment, indent=0): + # type: (Any, Any) -> None + """overwrites any preceding comment lines on an object + expects comment to be without `#` and possible have multiple lines + """ + from .error import CommentMark + from .tokens import CommentToken + + pre_comments = self._yaml_get_pre_comment() + if comment[-1] == '\n': + comment = comment[:-1] # strip final newline if there + start_mark = CommentMark(indent) + for com in comment.split('\n'): + c = com.strip() + if len(c) > 0 and c[0] != '#': + com = '# ' + com + pre_comments.append(CommentToken(com + '\n', start_mark, None)) + + def yaml_set_comment_before_after_key( + self, key, before=None, indent=0, after=None, after_indent=None + ): + # type: (Any, Any, Any, Any, Any) -> None + """ + expects comment (before/after) to be without `#` and possible have multiple lines + """ + from ...ruamel.yaml.error import CommentMark + from ...ruamel.yaml.tokens import CommentToken + + def comment_token(s, mark): + # type: (Any, Any) -> Any + # handle empty lines as having no comment + return CommentToken(('# ' if s else "") + s + '\n', mark, None) + + if after_indent is None: + after_indent = indent + 2 + if before and (len(before) > 1) and before[-1] == '\n': + before = before[:-1] # strip final newline if there + if after and after[-1] == '\n': + after = after[:-1] # strip final newline if there + start_mark = CommentMark(indent) + c = self.ca.items.setdefault(key, [None, [], None, None]) + if before == '\n': + c[1].append(comment_token("", start_mark)) + elif before: + for com in before.split('\n'): + c[1].append(comment_token(com, start_mark)) + if after: + start_mark = CommentMark(after_indent) + if c[3] is None: + c[3] = [] + for com in after.split('\n'): + c[3].append(comment_token(com, start_mark)) # type: ignore + + @property + def fa(self): + # type: () -> Any + """format attribute + + set_flow_style()/set_block_style()""" + if not hasattr(self, Format.attrib): + setattr(self, Format.attrib, Format()) + return getattr(self, Format.attrib) + + def yaml_add_eol_comment(self, comment, key=NoComment, column=None): + # type: (Any, Optional[Any], Optional[Any]) -> None + """ + there is a problem as eol comments should start with ' #' + (but at the beginning of the line the space doesn't have to be before + the #. The column index is for the # mark + """ + from .tokens import CommentToken + from .error import CommentMark + + if column is None: + try: + column = self._yaml_get_column(key) + except AttributeError: + column = 0 + if comment[0] != '#': + comment = '# ' + comment + if column is None: + if comment[0] == '#': + comment = ' ' + comment + column = 0 + start_mark = CommentMark(column) + ct = [CommentToken(comment, start_mark, None), None] + self._yaml_add_eol_comment(ct, key=key) + + @property + def lc(self): + # type: () -> Any + if not hasattr(self, LineCol.attrib): + setattr(self, LineCol.attrib, LineCol()) + return getattr(self, LineCol.attrib) + + def _yaml_set_line_col(self, line, col): + # type: (Any, Any) -> None + self.lc.line = line + self.lc.col = col + + def _yaml_set_kv_line_col(self, key, data): + # type: (Any, Any) -> None + self.lc.add_kv_line_col(key, data) + + def _yaml_set_idx_line_col(self, key, data): + # type: (Any, Any) -> None + self.lc.add_idx_line_col(key, data) + + @property + def anchor(self): + # type: () -> Any + if not hasattr(self, Anchor.attrib): + setattr(self, Anchor.attrib, Anchor()) + return getattr(self, Anchor.attrib) + + def yaml_anchor(self): + # type: () -> Any + if not hasattr(self, Anchor.attrib): + return None + return self.anchor + + def yaml_set_anchor(self, value, always_dump=False): + # type: (Any, bool) -> None + self.anchor.value = value + self.anchor.always_dump = always_dump + + @property + def tag(self): + # type: () -> Any + if not hasattr(self, Tag.attrib): + setattr(self, Tag.attrib, Tag()) + return getattr(self, Tag.attrib) + + def yaml_set_tag(self, value): + # type: (Any) -> None + self.tag.value = value + + def copy_attributes(self, t, memo=None): + # type: (Any, Any) -> None + # fmt: off + for a in [Comment.attrib, Format.attrib, LineCol.attrib, Anchor.attrib, + Tag.attrib, merge_attrib]: + if hasattr(self, a): + if memo is not None: + setattr(t, a, copy.deepcopy(getattr(self, a, memo))) + else: + setattr(t, a, getattr(self, a)) + # fmt: on + + def _yaml_add_eol_comment(self, comment, key): + # type: (Any, Any) -> None + raise NotImplementedError + + def _yaml_get_pre_comment(self): + # type: () -> Any + raise NotImplementedError + + def _yaml_get_column(self, key): + # type: (Any) -> Any + raise NotImplementedError + + +class CommentedSeq(MutableSliceableSequence, list, CommentedBase): # type: ignore + __slots__ = (Comment.attrib, '_lst') + + def __init__(self, *args, **kw): + # type: (Any, Any) -> None + list.__init__(self, *args, **kw) + + def __getsingleitem__(self, idx): + # type: (Any) -> Any + return list.__getitem__(self, idx) + + def __setsingleitem__(self, idx, value): + # type: (Any, Any) -> None + # try to preserve the scalarstring type if setting an existing key to a new value + if idx < len(self): + if ( + isinstance(value, string_types) + and not isinstance(value, ScalarString) + and isinstance(self[idx], ScalarString) + ): + value = type(self[idx])(value) + list.__setitem__(self, idx, value) + + def __delsingleitem__(self, idx=None): + # type: (Any) -> Any + list.__delitem__(self, idx) + self.ca.items.pop(idx, None) # might not be there -> default value + for list_index in sorted(self.ca.items): + if list_index < idx: + continue + self.ca.items[list_index - 1] = self.ca.items.pop(list_index) + + def __len__(self): + # type: () -> int + return list.__len__(self) + + def insert(self, idx, val): + # type: (Any, Any) -> None + """the comments after the insertion have to move forward""" + list.insert(self, idx, val) + for list_index in sorted(self.ca.items, reverse=True): + if list_index < idx: + break + self.ca.items[list_index + 1] = self.ca.items.pop(list_index) + + def extend(self, val): + # type: (Any) -> None + list.extend(self, val) + + def __eq__(self, other): + # type: (Any) -> bool + return list.__eq__(self, other) + + def _yaml_add_comment(self, comment, key=NoComment): + # type: (Any, Optional[Any]) -> None + if key is not NoComment: + self.yaml_key_comment_extend(key, comment) + else: + self.ca.comment = comment + + def _yaml_add_eol_comment(self, comment, key): + # type: (Any, Any) -> None + self._yaml_add_comment(comment, key=key) + + def _yaml_get_columnX(self, key): + # type: (Any) -> Any + return self.ca.items[key][0].start_mark.column + + def _yaml_get_column(self, key): + # type: (Any) -> Any + column = None + sel_idx = None + pre, post = key - 1, key + 1 + if pre in self.ca.items: + sel_idx = pre + elif post in self.ca.items: + sel_idx = post + else: + # self.ca.items is not ordered + for row_idx, _k1 in enumerate(self): + if row_idx >= key: + break + if row_idx not in self.ca.items: + continue + sel_idx = row_idx + if sel_idx is not None: + column = self._yaml_get_columnX(sel_idx) + return column + + def _yaml_get_pre_comment(self): + # type: () -> Any + pre_comments = [] # type: List[Any] + if self.ca.comment is None: + self.ca.comment = [None, pre_comments] + else: + self.ca.comment[1] = pre_comments + return pre_comments + + def __deepcopy__(self, memo): + # type: (Any) -> Any + res = self.__class__() + memo[id(self)] = res + for k in self: + res.append(copy.deepcopy(k, memo)) + self.copy_attributes(res, memo=memo) + return res + + def __add__(self, other): + # type: (Any) -> Any + return list.__add__(self, other) + + def sort(self, key=None, reverse=False): # type: ignore + # type: (Any, bool) -> None + if key is None: + tmp_lst = sorted(zip(self, range(len(self))), reverse=reverse) + list.__init__(self, [x[0] for x in tmp_lst]) + else: + tmp_lst = sorted( + zip(map(key, list.__iter__(self)), range(len(self))), reverse=reverse + ) + list.__init__(self, [list.__getitem__(self, x[1]) for x in tmp_lst]) + itm = self.ca.items + self.ca._items = {} + for idx, x in enumerate(tmp_lst): + old_index = x[1] + if old_index in itm: + self.ca.items[idx] = itm[old_index] + + def __repr__(self): + # type: () -> Any + return list.__repr__(self) + + +class CommentedKeySeq(tuple, CommentedBase): # type: ignore + """This primarily exists to be able to roundtrip keys that are sequences""" + + def _yaml_add_comment(self, comment, key=NoComment): + # type: (Any, Optional[Any]) -> None + if key is not NoComment: + self.yaml_key_comment_extend(key, comment) + else: + self.ca.comment = comment + + def _yaml_add_eol_comment(self, comment, key): + # type: (Any, Any) -> None + self._yaml_add_comment(comment, key=key) + + def _yaml_get_columnX(self, key): + # type: (Any) -> Any + return self.ca.items[key][0].start_mark.column + + def _yaml_get_column(self, key): + # type: (Any) -> Any + column = None + sel_idx = None + pre, post = key - 1, key + 1 + if pre in self.ca.items: + sel_idx = pre + elif post in self.ca.items: + sel_idx = post + else: + # self.ca.items is not ordered + for row_idx, _k1 in enumerate(self): + if row_idx >= key: + break + if row_idx not in self.ca.items: + continue + sel_idx = row_idx + if sel_idx is not None: + column = self._yaml_get_columnX(sel_idx) + return column + + def _yaml_get_pre_comment(self): + # type: () -> Any + pre_comments = [] # type: List[Any] + if self.ca.comment is None: + self.ca.comment = [None, pre_comments] + else: + self.ca.comment[1] = pre_comments + return pre_comments + + +class CommentedMapView(Sized): + __slots__ = ('_mapping',) + + def __init__(self, mapping): + # type: (Any) -> None + self._mapping = mapping + + def __len__(self): + # type: () -> int + count = len(self._mapping) + return count + + +class CommentedMapKeysView(CommentedMapView, Set): # type: ignore + __slots__ = () + + @classmethod + def _from_iterable(self, it): + # type: (Any) -> Any + return set(it) + + def __contains__(self, key): + # type: (Any) -> Any + return key in self._mapping + + def __iter__(self): + # type: () -> Any # yield from self._mapping # not in py27, pypy + # for x in self._mapping._keys(): + for x in self._mapping: + yield x + + +class CommentedMapItemsView(CommentedMapView, Set): # type: ignore + __slots__ = () + + @classmethod + def _from_iterable(self, it): + # type: (Any) -> Any + return set(it) + + def __contains__(self, item): + # type: (Any) -> Any + key, value = item + try: + v = self._mapping[key] + except KeyError: + return False + else: + return v == value + + def __iter__(self): + # type: () -> Any + for key in self._mapping._keys(): + yield (key, self._mapping[key]) + + +class CommentedMapValuesView(CommentedMapView): + __slots__ = () + + def __contains__(self, value): + # type: (Any) -> Any + for key in self._mapping: + if value == self._mapping[key]: + return True + return False + + def __iter__(self): + # type: () -> Any + for key in self._mapping._keys(): + yield self._mapping[key] + + +class CommentedMap(ordereddict, CommentedBase): # type: ignore + __slots__ = (Comment.attrib, '_ok', '_ref') + + def __init__(self, *args, **kw): + # type: (Any, Any) -> None + self._ok = set() # type: MutableSet[Any] # own keys + self._ref = [] # type: List[CommentedMap] + ordereddict.__init__(self, *args, **kw) + + def _yaml_add_comment(self, comment, key=NoComment, value=NoComment): + # type: (Any, Optional[Any], Optional[Any]) -> None + """values is set to key to indicate a value attachment of comment""" + if key is not NoComment: + self.yaml_key_comment_extend(key, comment) + return + if value is not NoComment: + self.yaml_value_comment_extend(value, comment) + else: + self.ca.comment = comment + + def _yaml_add_eol_comment(self, comment, key): + # type: (Any, Any) -> None + """add on the value line, with value specified by the key""" + self._yaml_add_comment(comment, value=key) + + def _yaml_get_columnX(self, key): + # type: (Any) -> Any + return self.ca.items[key][2].start_mark.column + + def _yaml_get_column(self, key): + # type: (Any) -> Any + column = None + sel_idx = None + pre, post, last = None, None, None + for x in self: + if pre is not None and x != key: + post = x + break + if x == key: + pre = last + last = x + if pre in self.ca.items: + sel_idx = pre + elif post in self.ca.items: + sel_idx = post + else: + # self.ca.items is not ordered + for k1 in self: + if k1 >= key: + break + if k1 not in self.ca.items: + continue + sel_idx = k1 + if sel_idx is not None: + column = self._yaml_get_columnX(sel_idx) + return column + + def _yaml_get_pre_comment(self): + # type: () -> Any + pre_comments = [] # type: List[Any] + if self.ca.comment is None: + self.ca.comment = [None, pre_comments] + else: + self.ca.comment[1] = pre_comments + return pre_comments + + def update(self, *vals, **kw): + # type: (Any, Any) -> None + try: + ordereddict.update(self, *vals, **kw) + except TypeError: + # probably a dict that is used + for x in vals[0]: + self[x] = vals[0][x] + try: + self._ok.update(vals.keys()) # type: ignore + except AttributeError: + # assume one argument that is a list/tuple of two element lists/tuples + for x in vals[0]: + self._ok.add(x[0]) + if kw: + self._ok.add(*kw.keys()) + + def insert(self, pos, key, value, comment=None): + # type: (Any, Any, Any, Optional[Any]) -> None + """insert key value into given position + attach comment if provided + """ + ordereddict.insert(self, pos, key, value) + self._ok.add(key) + if comment is not None: + self.yaml_add_eol_comment(comment, key=key) + + def mlget(self, key, default=None, list_ok=False): + # type: (Any, Any, Any) -> Any + """multi-level get that expects dicts within dicts""" + if not isinstance(key, list): + return self.get(key, default) + # assume that the key is a list of recursively accessible dicts + + def get_one_level(key_list, level, d): + # type: (Any, Any, Any) -> Any + if not list_ok: + assert isinstance(d, dict) + if level >= len(key_list): + if level > len(key_list): + raise IndexError + return d[key_list[level - 1]] + return get_one_level(key_list, level + 1, d[key_list[level - 1]]) + + try: + return get_one_level(key, 1, self) + except KeyError: + return default + except (TypeError, IndexError): + if not list_ok: + raise + return default + + def __getitem__(self, key): + # type: (Any) -> Any + try: + return ordereddict.__getitem__(self, key) + except KeyError: + for merged in getattr(self, merge_attrib, []): + if key in merged[1]: + return merged[1][key] + raise + + def __setitem__(self, key, value): + # type: (Any, Any) -> None + # try to preserve the scalarstring type if setting an existing key to a new value + if key in self: + if ( + isinstance(value, string_types) + and not isinstance(value, ScalarString) + and isinstance(self[key], ScalarString) + ): + value = type(self[key])(value) + ordereddict.__setitem__(self, key, value) + self._ok.add(key) + + def _unmerged_contains(self, key): + # type: (Any) -> Any + if key in self._ok: + return True + return None + + def __contains__(self, key): + # type: (Any) -> bool + return bool(ordereddict.__contains__(self, key)) + + def get(self, key, default=None): + # type: (Any, Any) -> Any + try: + return self.__getitem__(key) + except: # NOQA + return default + + def __repr__(self): + # type: () -> Any + return ordereddict.__repr__(self).replace('CommentedMap', 'ordereddict') + + def non_merged_items(self): + # type: () -> Any + for x in ordereddict.__iter__(self): + if x in self._ok: + yield x, ordereddict.__getitem__(self, x) + + def __delitem__(self, key): + # type: (Any) -> None + # for merged in getattr(self, merge_attrib, []): + # if key in merged[1]: + # value = merged[1][key] + # break + # else: + # # not found in merged in stuff + # ordereddict.__delitem__(self, key) + # for referer in self._ref: + # referer.update_key_value(key) + # return + # + # ordereddict.__setitem__(self, key, value) # merge might have different value + # self._ok.discard(key) + self._ok.discard(key) + ordereddict.__delitem__(self, key) + for referer in self._ref: + referer.update_key_value(key) + + def __iter__(self): + # type: () -> Any + for x in ordereddict.__iter__(self): + yield x + + def _keys(self): + # type: () -> Any + for x in ordereddict.__iter__(self): + yield x + + def __len__(self): + # type: () -> int + return int(ordereddict.__len__(self)) + + def __eq__(self, other): + # type: (Any) -> bool + return bool(dict(self) == other) + + if PY2: + + def keys(self): + # type: () -> Any + return list(self._keys()) + + def iterkeys(self): + # type: () -> Any + return self._keys() + + def viewkeys(self): + # type: () -> Any + return CommentedMapKeysView(self) + + else: + + def keys(self): + # type: () -> Any + return CommentedMapKeysView(self) + + if PY2: + + def _values(self): + # type: () -> Any + for x in ordereddict.__iter__(self): + yield ordereddict.__getitem__(self, x) + + def values(self): + # type: () -> Any + return list(self._values()) + + def itervalues(self): + # type: () -> Any + return self._values() + + def viewvalues(self): + # type: () -> Any + return CommentedMapValuesView(self) + + else: + + def values(self): + # type: () -> Any + return CommentedMapValuesView(self) + + def _items(self): + # type: () -> Any + for x in ordereddict.__iter__(self): + yield x, ordereddict.__getitem__(self, x) + + if PY2: + + def items(self): + # type: () -> Any + return list(self._items()) + + def iteritems(self): + # type: () -> Any + return self._items() + + def viewitems(self): + # type: () -> Any + return CommentedMapItemsView(self) + + else: + + def items(self): + # type: () -> Any + return CommentedMapItemsView(self) + + @property + def merge(self): + # type: () -> Any + if not hasattr(self, merge_attrib): + setattr(self, merge_attrib, []) + return getattr(self, merge_attrib) + + def copy(self): + # type: () -> Any + x = type(self)() # update doesn't work + for k, v in self._items(): + x[k] = v + self.copy_attributes(x) + return x + + def add_referent(self, cm): + # type: (Any) -> None + if cm not in self._ref: + self._ref.append(cm) + + def add_yaml_merge(self, value): + # type: (Any) -> None + for v in value: + v[1].add_referent(self) + for k, v in v[1].items(): + if ordereddict.__contains__(self, k): + continue + ordereddict.__setitem__(self, k, v) + self.merge.extend(value) + + def update_key_value(self, key): + # type: (Any) -> None + if key in self._ok: + return + for v in self.merge: + if key in v[1]: + ordereddict.__setitem__(self, key, v[1][key]) + return + ordereddict.__delitem__(self, key) + + def __deepcopy__(self, memo): + # type: (Any) -> Any + res = self.__class__() + memo[id(self)] = res + for k in self: + res[k] = copy.deepcopy(self[k], memo) + self.copy_attributes(res, memo=memo) + return res + + +# based on brownie mappings +@classmethod # type: ignore +def raise_immutable(cls, *args, **kwargs): + # type: (Any, *Any, **Any) -> None + raise TypeError('{} objects are immutable'.format(cls.__name__)) + + +class CommentedKeyMap(CommentedBase, Mapping): # type: ignore + __slots__ = Comment.attrib, '_od' + """This primarily exists to be able to roundtrip keys that are mappings""" + + def __init__(self, *args, **kw): + # type: (Any, Any) -> None + if hasattr(self, '_od'): + raise_immutable(self) + try: + self._od = ordereddict(*args, **kw) + except TypeError: + if PY2: + self._od = ordereddict(args[0].items()) + else: + raise + + __delitem__ = __setitem__ = clear = pop = popitem = setdefault = update = raise_immutable + + # need to implement __getitem__, __iter__ and __len__ + def __getitem__(self, index): + # type: (Any) -> Any + return self._od[index] + + def __iter__(self): + # type: () -> Iterator[Any] + for x in self._od.__iter__(): + yield x + + def __len__(self): + # type: () -> int + return len(self._od) + + def __hash__(self): + # type: () -> Any + return hash(tuple(self.items())) + + def __repr__(self): + # type: () -> Any + if not hasattr(self, merge_attrib): + return self._od.__repr__() + return 'ordereddict(' + repr(list(self._od.items())) + ')' + + @classmethod + def fromkeys(keys, v=None): + # type: (Any, Any) -> Any + return CommentedKeyMap(dict.fromkeys(keys, v)) + + def _yaml_add_comment(self, comment, key=NoComment): + # type: (Any, Optional[Any]) -> None + if key is not NoComment: + self.yaml_key_comment_extend(key, comment) + else: + self.ca.comment = comment + + def _yaml_add_eol_comment(self, comment, key): + # type: (Any, Any) -> None + self._yaml_add_comment(comment, key=key) + + def _yaml_get_columnX(self, key): + # type: (Any) -> Any + return self.ca.items[key][0].start_mark.column + + def _yaml_get_column(self, key): + # type: (Any) -> Any + column = None + sel_idx = None + pre, post = key - 1, key + 1 + if pre in self.ca.items: + sel_idx = pre + elif post in self.ca.items: + sel_idx = post + else: + # self.ca.items is not ordered + for row_idx, _k1 in enumerate(self): + if row_idx >= key: + break + if row_idx not in self.ca.items: + continue + sel_idx = row_idx + if sel_idx is not None: + column = self._yaml_get_columnX(sel_idx) + return column + + def _yaml_get_pre_comment(self): + # type: () -> Any + pre_comments = [] # type: List[Any] + if self.ca.comment is None: + self.ca.comment = [None, pre_comments] + else: + self.ca.comment[1] = pre_comments + return pre_comments + + +class CommentedOrderedMap(CommentedMap): + __slots__ = (Comment.attrib,) + + +class CommentedSet(MutableSet, CommentedBase): # type: ignore # NOQA + __slots__ = Comment.attrib, 'odict' + + def __init__(self, values=None): + # type: (Any) -> None + self.odict = ordereddict() + MutableSet.__init__(self) + if values is not None: + self |= values # type: ignore + + def _yaml_add_comment(self, comment, key=NoComment, value=NoComment): + # type: (Any, Optional[Any], Optional[Any]) -> None + """values is set to key to indicate a value attachment of comment""" + if key is not NoComment: + self.yaml_key_comment_extend(key, comment) + return + if value is not NoComment: + self.yaml_value_comment_extend(value, comment) + else: + self.ca.comment = comment + + def _yaml_add_eol_comment(self, comment, key): + # type: (Any, Any) -> None + """add on the value line, with value specified by the key""" + self._yaml_add_comment(comment, value=key) + + def add(self, value): + # type: (Any) -> None + """Add an element.""" + self.odict[value] = None + + def discard(self, value): + # type: (Any) -> None + """Remove an element. Do not raise an exception if absent.""" + del self.odict[value] + + def __contains__(self, x): + # type: (Any) -> Any + return x in self.odict + + def __iter__(self): + # type: () -> Any + for x in self.odict: + yield x + + def __len__(self): + # type: () -> int + return len(self.odict) + + def __repr__(self): + # type: () -> str + return 'set({0!r})'.format(self.odict.keys()) + + +class TaggedScalar(CommentedBase): + # the value and style attributes are set during roundtrip construction + def __init__(self, value=None, style=None, tag=None): + # type: (Any, Any, Any) -> None + self.value = value + self.style = style + if tag is not None: + self.yaml_set_tag(tag) + + def __str__(self): + # type: () -> Any + return self.value + + +def dump_comments(d, name="", sep='.', out=sys.stdout): + # type: (Any, str, str, Any) -> None + """ + recursively dump comments, all but the toplevel preceded by the path + in dotted form x.0.a + """ + if isinstance(d, dict) and hasattr(d, 'ca'): + if name: + sys.stdout.write('{}\n'.format(name)) + out.write('{}\n'.format(d.ca)) # type: ignore + for k in d: + dump_comments(d[k], name=(name + sep + k) if name else k, sep=sep, out=out) + elif isinstance(d, list) and hasattr(d, 'ca'): + if name: + sys.stdout.write('{}\n'.format(name)) + out.write('{}\n'.format(d.ca)) # type: ignore + for idx, k in enumerate(d): + dump_comments( + k, name=(name + sep + str(idx)) if name else str(idx), sep=sep, out=out + ) diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/compat.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/compat.py new file mode 100644 index 0000000000..95f75b358b --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/compat.py @@ -0,0 +1,324 @@ +# coding: utf-8 + +from __future__ import print_function + +# partially from package six by Benjamin Peterson + +import sys +import os +import types +import traceback +from abc import abstractmethod + + +# fmt: off +if False: # MYPY + from typing import Any, Dict, Optional, List, Union, BinaryIO, IO, Text, Tuple # NOQA + from typing import Optional # NOQA +# fmt: on + +_DEFAULT_YAML_VERSION = (1, 2) + +try: + from ...ruamel.ordereddict import ordereddict +except: # NOQA + try: + from collections import OrderedDict + except ImportError: + from ordereddict import OrderedDict # type: ignore + # to get the right name import ... as ordereddict doesn't do that + + class ordereddict(OrderedDict): # type: ignore + if not hasattr(OrderedDict, 'insert'): + + def insert(self, pos, key, value): + # type: (int, Any, Any) -> None + if pos >= len(self): + self[key] = value + return + od = ordereddict() + od.update(self) + for k in od: + del self[k] + for index, old_key in enumerate(od): + if pos == index: + self[key] = value + self[old_key] = od[old_key] + + +PY2 = sys.version_info[0] == 2 +PY3 = sys.version_info[0] == 3 + + +if PY3: + + def utf8(s): + # type: (str) -> str + return s + + def to_str(s): + # type: (str) -> str + return s + + def to_unicode(s): + # type: (str) -> str + return s + + +else: + if False: + unicode = str + + def utf8(s): + # type: (unicode) -> str + return s.encode('utf-8') + + def to_str(s): + # type: (str) -> str + return str(s) + + def to_unicode(s): + # type: (str) -> unicode + return unicode(s) # NOQA + + +if PY3: + string_types = str + integer_types = int + class_types = type + text_type = str + binary_type = bytes + + MAXSIZE = sys.maxsize + unichr = chr + import io + + StringIO = io.StringIO + BytesIO = io.BytesIO + # have unlimited precision + no_limit_int = int + from collections.abc import Hashable, MutableSequence, MutableMapping, Mapping # NOQA + +else: + string_types = basestring # NOQA + integer_types = (int, long) # NOQA + class_types = (type, types.ClassType) + text_type = unicode # NOQA + binary_type = str + + # to allow importing + unichr = unichr + from StringIO import StringIO as _StringIO + + StringIO = _StringIO + import cStringIO + + BytesIO = cStringIO.StringIO + # have unlimited precision + no_limit_int = long # NOQA not available on Python 3 + from collections import Hashable, MutableSequence, MutableMapping, Mapping # NOQA + +if False: # MYPY + # StreamType = Union[BinaryIO, IO[str], IO[unicode], StringIO] + # StreamType = Union[BinaryIO, IO[str], StringIO] # type: ignore + StreamType = Any + + StreamTextType = StreamType # Union[Text, StreamType] + VersionType = Union[List[int], str, Tuple[int, int]] + +if PY3: + builtins_module = 'builtins' +else: + builtins_module = '__builtin__' + +UNICODE_SIZE = 4 if sys.maxunicode > 65535 else 2 + + +def with_metaclass(meta, *bases): + # type: (Any, Any) -> Any + """Create a base class with a metaclass.""" + return meta('NewBase', bases, {}) + + +DBG_TOKEN = 1 +DBG_EVENT = 2 +DBG_NODE = 4 + + +_debug = None # type: Optional[int] +if 'RUAMELDEBUG' in os.environ: + _debugx = os.environ.get('RUAMELDEBUG') + if _debugx is None: + _debug = 0 + else: + _debug = int(_debugx) + + +if bool(_debug): + + class ObjectCounter(object): + def __init__(self): + # type: () -> None + self.map = {} # type: Dict[Any, Any] + + def __call__(self, k): + # type: (Any) -> None + self.map[k] = self.map.get(k, 0) + 1 + + def dump(self): + # type: () -> None + for k in sorted(self.map): + sys.stdout.write('{} -> {}'.format(k, self.map[k])) + + object_counter = ObjectCounter() + + +# used from yaml util when testing +def dbg(val=None): + # type: (Any) -> Any + global _debug + if _debug is None: + # set to true or false + _debugx = os.environ.get('YAMLDEBUG') + if _debugx is None: + _debug = 0 + else: + _debug = int(_debugx) + if val is None: + return _debug + return _debug & val + + +class Nprint(object): + def __init__(self, file_name=None): + # type: (Any) -> None + self._max_print = None # type: Any + self._count = None # type: Any + self._file_name = file_name + + def __call__(self, *args, **kw): + # type: (Any, Any) -> None + if not bool(_debug): + return + out = sys.stdout if self._file_name is None else open(self._file_name, 'a') + dbgprint = print # to fool checking for print statements by dv utility + kw1 = kw.copy() + kw1['file'] = out + dbgprint(*args, **kw1) + out.flush() + if self._max_print is not None: + if self._count is None: + self._count = self._max_print + self._count -= 1 + if self._count == 0: + dbgprint('forced exit\n') + traceback.print_stack() + out.flush() + sys.exit(0) + if self._file_name: + out.close() + + def set_max_print(self, i): + # type: (int) -> None + self._max_print = i + self._count = None + + +nprint = Nprint() +nprintf = Nprint('/var/tmp/ruamel.yaml.log') + +# char checkers following production rules + + +def check_namespace_char(ch): + # type: (Any) -> bool + if u'\x21' <= ch <= u'\x7E': # ! to ~ + return True + if u'\xA0' <= ch <= u'\uD7FF': + return True + if (u'\uE000' <= ch <= u'\uFFFD') and ch != u'\uFEFF': # excl. byte order mark + return True + if u'\U00010000' <= ch <= u'\U0010FFFF': + return True + return False + + +def check_anchorname_char(ch): + # type: (Any) -> bool + if ch in u',[]{}': + return False + return check_namespace_char(ch) + + +def version_tnf(t1, t2=None): + # type: (Any, Any) -> Any + """ + return True if ruamel.yaml version_info < t1, None if t2 is specified and bigger else False + """ + from ...ruamel.yaml import version_info # NOQA + + if version_info < t1: + return True + if t2 is not None and version_info < t2: + return None + return False + + +class MutableSliceableSequence(MutableSequence): # type: ignore + __slots__ = () + + def __getitem__(self, index): + # type: (Any) -> Any + if not isinstance(index, slice): + return self.__getsingleitem__(index) + return type(self)([self[i] for i in range(*index.indices(len(self)))]) # type: ignore + + def __setitem__(self, index, value): + # type: (Any, Any) -> None + if not isinstance(index, slice): + return self.__setsingleitem__(index, value) + assert iter(value) + # nprint(index.start, index.stop, index.step, index.indices(len(self))) + if index.step is None: + del self[index.start : index.stop] + for elem in reversed(value): + self.insert(0 if index.start is None else index.start, elem) + else: + range_parms = index.indices(len(self)) + nr_assigned_items = (range_parms[1] - range_parms[0] - 1) // range_parms[2] + 1 + # need to test before changing, in case TypeError is caught + if nr_assigned_items < len(value): + raise TypeError( + 'too many elements in value {} < {}'.format(nr_assigned_items, len(value)) + ) + elif nr_assigned_items > len(value): + raise TypeError( + 'not enough elements in value {} > {}'.format( + nr_assigned_items, len(value) + ) + ) + for idx, i in enumerate(range(*range_parms)): + self[i] = value[idx] + + def __delitem__(self, index): + # type: (Any) -> None + if not isinstance(index, slice): + return self.__delsingleitem__(index) + # nprint(index.start, index.stop, index.step, index.indices(len(self))) + for i in reversed(range(*index.indices(len(self)))): + del self[i] + + @abstractmethod + def __getsingleitem__(self, index): + # type: (Any) -> Any + raise IndexError + + @abstractmethod + def __setsingleitem__(self, index, value): + # type: (Any, Any) -> None + raise IndexError + + @abstractmethod + def __delsingleitem__(self, index): + # type: (Any) -> None + raise IndexError diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/composer.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/composer.py new file mode 100644 index 0000000000..0d830e37d7 --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/composer.py @@ -0,0 +1,238 @@ +# coding: utf-8 + +from __future__ import absolute_import, print_function + +import warnings + +from ...ruamel.yaml.error import MarkedYAMLError, ReusedAnchorWarning +from ...ruamel.yaml.compat import utf8, nprint, nprintf # NOQA + +from ...ruamel.yaml.events import ( + StreamStartEvent, + StreamEndEvent, + MappingStartEvent, + MappingEndEvent, + SequenceStartEvent, + SequenceEndEvent, + AliasEvent, + ScalarEvent, +) +from ...ruamel.yaml.nodes import MappingNode, ScalarNode, SequenceNode + +if False: # MYPY + from typing import Any, Dict, Optional, List # NOQA + +__all__ = ['Composer', 'ComposerError'] + + +class ComposerError(MarkedYAMLError): + pass + + +class Composer(object): + def __init__(self, loader=None): + # type: (Any) -> None + self.loader = loader + if self.loader is not None and getattr(self.loader, '_composer', None) is None: + self.loader._composer = self + self.anchors = {} # type: Dict[Any, Any] + + @property + def parser(self): + # type: () -> Any + if hasattr(self.loader, 'typ'): + self.loader.parser + return self.loader._parser + + @property + def resolver(self): + # type: () -> Any + # assert self.loader._resolver is not None + if hasattr(self.loader, 'typ'): + self.loader.resolver + return self.loader._resolver + + def check_node(self): + # type: () -> Any + # Drop the STREAM-START event. + if self.parser.check_event(StreamStartEvent): + self.parser.get_event() + + # If there are more documents available? + return not self.parser.check_event(StreamEndEvent) + + def get_node(self): + # type: () -> Any + # Get the root node of the next document. + if not self.parser.check_event(StreamEndEvent): + return self.compose_document() + + def get_single_node(self): + # type: () -> Any + # Drop the STREAM-START event. + self.parser.get_event() + + # Compose a document if the stream is not empty. + document = None # type: Any + if not self.parser.check_event(StreamEndEvent): + document = self.compose_document() + + # Ensure that the stream contains no more documents. + if not self.parser.check_event(StreamEndEvent): + event = self.parser.get_event() + raise ComposerError( + 'expected a single document in the stream', + document.start_mark, + 'but found another document', + event.start_mark, + ) + + # Drop the STREAM-END event. + self.parser.get_event() + + return document + + def compose_document(self): + # type: (Any) -> Any + # Drop the DOCUMENT-START event. + self.parser.get_event() + + # Compose the root node. + node = self.compose_node(None, None) + + # Drop the DOCUMENT-END event. + self.parser.get_event() + + self.anchors = {} + return node + + def compose_node(self, parent, index): + # type: (Any, Any) -> Any + if self.parser.check_event(AliasEvent): + event = self.parser.get_event() + alias = event.anchor + if alias not in self.anchors: + raise ComposerError( + None, None, 'found undefined alias %r' % utf8(alias), event.start_mark + ) + return self.anchors[alias] + event = self.parser.peek_event() + anchor = event.anchor + if anchor is not None: # have an anchor + if anchor in self.anchors: + # raise ComposerError( + # "found duplicate anchor %r; first occurrence" + # % utf8(anchor), self.anchors[anchor].start_mark, + # "second occurrence", event.start_mark) + ws = ( + '\nfound duplicate anchor {!r}\nfirst occurrence {}\nsecond occurrence ' + '{}'.format((anchor), self.anchors[anchor].start_mark, event.start_mark) + ) + warnings.warn(ws, ReusedAnchorWarning) + self.resolver.descend_resolver(parent, index) + if self.parser.check_event(ScalarEvent): + node = self.compose_scalar_node(anchor) + elif self.parser.check_event(SequenceStartEvent): + node = self.compose_sequence_node(anchor) + elif self.parser.check_event(MappingStartEvent): + node = self.compose_mapping_node(anchor) + self.resolver.ascend_resolver() + return node + + def compose_scalar_node(self, anchor): + # type: (Any) -> Any + event = self.parser.get_event() + tag = event.tag + if tag is None or tag == u'!': + tag = self.resolver.resolve(ScalarNode, event.value, event.implicit) + node = ScalarNode( + tag, + event.value, + event.start_mark, + event.end_mark, + style=event.style, + comment=event.comment, + anchor=anchor, + ) + if anchor is not None: + self.anchors[anchor] = node + return node + + def compose_sequence_node(self, anchor): + # type: (Any) -> Any + start_event = self.parser.get_event() + tag = start_event.tag + if tag is None or tag == u'!': + tag = self.resolver.resolve(SequenceNode, None, start_event.implicit) + node = SequenceNode( + tag, + [], + start_event.start_mark, + None, + flow_style=start_event.flow_style, + comment=start_event.comment, + anchor=anchor, + ) + if anchor is not None: + self.anchors[anchor] = node + index = 0 + while not self.parser.check_event(SequenceEndEvent): + node.value.append(self.compose_node(node, index)) + index += 1 + end_event = self.parser.get_event() + if node.flow_style is True and end_event.comment is not None: + if node.comment is not None: + nprint( + 'Warning: unexpected end_event commment in sequence ' + 'node {}'.format(node.flow_style) + ) + node.comment = end_event.comment + node.end_mark = end_event.end_mark + self.check_end_doc_comment(end_event, node) + return node + + def compose_mapping_node(self, anchor): + # type: (Any) -> Any + start_event = self.parser.get_event() + tag = start_event.tag + if tag is None or tag == u'!': + tag = self.resolver.resolve(MappingNode, None, start_event.implicit) + node = MappingNode( + tag, + [], + start_event.start_mark, + None, + flow_style=start_event.flow_style, + comment=start_event.comment, + anchor=anchor, + ) + if anchor is not None: + self.anchors[anchor] = node + while not self.parser.check_event(MappingEndEvent): + # key_event = self.parser.peek_event() + item_key = self.compose_node(node, None) + # if item_key in node.value: + # raise ComposerError("while composing a mapping", + # start_event.start_mark, + # "found duplicate key", key_event.start_mark) + item_value = self.compose_node(node, item_key) + # node.value[item_key] = item_value + node.value.append((item_key, item_value)) + end_event = self.parser.get_event() + if node.flow_style is True and end_event.comment is not None: + node.comment = end_event.comment + node.end_mark = end_event.end_mark + self.check_end_doc_comment(end_event, node) + return node + + def check_end_doc_comment(self, end_event, node): + # type: (Any, Any) -> None + if end_event.comment and end_event.comment[1]: + # pre comments on an end_event, no following to move to + if node.comment is None: + node.comment = [None, None] + assert not isinstance(node, ScalarEvent) + # this is a post comment on a mapping node, add as third element + # in the list + node.comment.append(end_event.comment[1]) + end_event.comment[1] = None diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/configobjwalker.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/configobjwalker.py new file mode 100644 index 0000000000..8c6504f8fc --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/configobjwalker.py @@ -0,0 +1,14 @@ +# coding: utf-8 + +import warnings + +from ...ruamel.yaml.util import configobj_walker as new_configobj_walker + +if False: # MYPY + from typing import Any # NOQA + + +def configobj_walker(cfg): + # type: (Any) -> Any + warnings.warn('configobj_walker has moved to ruamel.yaml.util, please update your code') + return new_configobj_walker(cfg) diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/constructor.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/constructor.py new file mode 100644 index 0000000000..6f827aea1c --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/constructor.py @@ -0,0 +1,1806 @@ +# coding: utf-8 + +from __future__ import print_function, absolute_import, division + +import datetime +import base64 +import binascii +import re +import sys +import types +import warnings + +# fmt: off +from ...ruamel.yaml.error import (MarkedYAMLError, MarkedYAMLFutureWarning, + MantissaNoDotYAML1_1Warning) +from ...ruamel.yaml.nodes import * # NOQA +from ...ruamel.yaml.nodes import (SequenceNode, MappingNode, ScalarNode) +from ...ruamel.yaml.compat import (utf8, builtins_module, to_str, PY2, PY3, # NOQA + text_type, nprint, nprintf, version_tnf) +from ...ruamel.yaml.compat import ordereddict, Hashable, MutableSequence # type: ignore +from ...ruamel.yaml.compat import MutableMapping # type: ignore + +from ...ruamel.yaml.comments import * # NOQA +from ...ruamel.yaml.comments import (CommentedMap, CommentedOrderedMap, CommentedSet, + CommentedKeySeq, CommentedSeq, TaggedScalar, + CommentedKeyMap) +from ...ruamel.yaml.scalarstring import (SingleQuotedScalarString, DoubleQuotedScalarString, + LiteralScalarString, FoldedScalarString, + PlainScalarString, ScalarString,) +from ...ruamel.yaml.scalarint import ScalarInt, BinaryInt, OctalInt, HexInt, HexCapsInt +from ...ruamel.yaml.scalarfloat import ScalarFloat +from ...ruamel.yaml.scalarbool import ScalarBoolean +from ...ruamel.yaml.timestamp import TimeStamp +from ...ruamel.yaml.util import RegExp + +if False: # MYPY + from typing import Any, Dict, List, Set, Generator, Union, Optional # NOQA + + +__all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor', + 'ConstructorError', 'RoundTripConstructor'] +# fmt: on + + +class ConstructorError(MarkedYAMLError): + pass + + +class DuplicateKeyFutureWarning(MarkedYAMLFutureWarning): + pass + + +class DuplicateKeyError(MarkedYAMLFutureWarning): + pass + + +class BaseConstructor(object): + + yaml_constructors = {} # type: Dict[Any, Any] + yaml_multi_constructors = {} # type: Dict[Any, Any] + + def __init__(self, preserve_quotes=None, loader=None): + # type: (Optional[bool], Any) -> None + self.loader = loader + if self.loader is not None and getattr(self.loader, '_constructor', None) is None: + self.loader._constructor = self + self.loader = loader + self.yaml_base_dict_type = dict + self.yaml_base_list_type = list + self.constructed_objects = {} # type: Dict[Any, Any] + self.recursive_objects = {} # type: Dict[Any, Any] + self.state_generators = [] # type: List[Any] + self.deep_construct = False + self._preserve_quotes = preserve_quotes + self.allow_duplicate_keys = version_tnf((0, 15, 1), (0, 16)) + + @property + def composer(self): + # type: () -> Any + if hasattr(self.loader, 'typ'): + return self.loader.composer + try: + return self.loader._composer + except AttributeError: + sys.stdout.write('slt {}\n'.format(type(self))) + sys.stdout.write('slc {}\n'.format(self.loader._composer)) + sys.stdout.write('{}\n'.format(dir(self))) + raise + + @property + def resolver(self): + # type: () -> Any + if hasattr(self.loader, 'typ'): + return self.loader.resolver + return self.loader._resolver + + def check_data(self): + # type: () -> Any + # If there are more documents available? + return self.composer.check_node() + + def get_data(self): + # type: () -> Any + # Construct and return the next document. + if self.composer.check_node(): + return self.construct_document(self.composer.get_node()) + + def get_single_data(self): + # type: () -> Any + # Ensure that the stream contains a single document and construct it. + node = self.composer.get_single_node() + if node is not None: + return self.construct_document(node) + return None + + def construct_document(self, node): + # type: (Any) -> Any + data = self.construct_object(node) + while bool(self.state_generators): + state_generators = self.state_generators + self.state_generators = [] + for generator in state_generators: + for _dummy in generator: + pass + self.constructed_objects = {} + self.recursive_objects = {} + self.deep_construct = False + return data + + def construct_object(self, node, deep=False): + # type: (Any, bool) -> Any + """deep is True when creating an object/mapping recursively, + in that case want the underlying elements available during construction + """ + if node in self.constructed_objects: + return self.constructed_objects[node] + if deep: + old_deep = self.deep_construct + self.deep_construct = True + if node in self.recursive_objects: + return self.recursive_objects[node] + # raise ConstructorError( + # None, None, 'found unconstructable recursive node', node.start_mark + # ) + self.recursive_objects[node] = None + data = self.construct_non_recursive_object(node) + + self.constructed_objects[node] = data + del self.recursive_objects[node] + if deep: + self.deep_construct = old_deep + return data + + def construct_non_recursive_object(self, node, tag=None): + # type: (Any, Optional[str]) -> Any + constructor = None # type: Any + tag_suffix = None + if tag is None: + tag = node.tag + if tag in self.yaml_constructors: + constructor = self.yaml_constructors[tag] + else: + for tag_prefix in self.yaml_multi_constructors: + if tag.startswith(tag_prefix): + tag_suffix = tag[len(tag_prefix) :] + constructor = self.yaml_multi_constructors[tag_prefix] + break + else: + if None in self.yaml_multi_constructors: + tag_suffix = tag + constructor = self.yaml_multi_constructors[None] + elif None in self.yaml_constructors: + constructor = self.yaml_constructors[None] + elif isinstance(node, ScalarNode): + constructor = self.__class__.construct_scalar + elif isinstance(node, SequenceNode): + constructor = self.__class__.construct_sequence + elif isinstance(node, MappingNode): + constructor = self.__class__.construct_mapping + if tag_suffix is None: + data = constructor(self, node) + else: + data = constructor(self, tag_suffix, node) + if isinstance(data, types.GeneratorType): + generator = data + data = next(generator) + if self.deep_construct: + for _dummy in generator: + pass + else: + self.state_generators.append(generator) + return data + + def construct_scalar(self, node): + # type: (Any) -> Any + if not isinstance(node, ScalarNode): + raise ConstructorError( + None, None, 'expected a scalar node, but found %s' % node.id, node.start_mark + ) + return node.value + + def construct_sequence(self, node, deep=False): + # type: (Any, bool) -> Any + """deep is True when creating an object/mapping recursively, + in that case want the underlying elements available during construction + """ + if not isinstance(node, SequenceNode): + raise ConstructorError( + None, None, 'expected a sequence node, but found %s' % node.id, node.start_mark + ) + return [self.construct_object(child, deep=deep) for child in node.value] + + def construct_mapping(self, node, deep=False): + # type: (Any, bool) -> Any + """deep is True when creating an object/mapping recursively, + in that case want the underlying elements available during construction + """ + if not isinstance(node, MappingNode): + raise ConstructorError( + None, None, 'expected a mapping node, but found %s' % node.id, node.start_mark + ) + total_mapping = self.yaml_base_dict_type() + if getattr(node, 'merge', None) is not None: + todo = [(node.merge, False), (node.value, False)] + else: + todo = [(node.value, True)] + for values, check in todo: + mapping = self.yaml_base_dict_type() # type: Dict[Any, Any] + for key_node, value_node in values: + # keys can be list -> deep + key = self.construct_object(key_node, deep=True) + # lists are not hashable, but tuples are + if not isinstance(key, Hashable): + if isinstance(key, list): + key = tuple(key) + if PY2: + try: + hash(key) + except TypeError as exc: + raise ConstructorError( + 'while constructing a mapping', + node.start_mark, + 'found unacceptable key (%s)' % exc, + key_node.start_mark, + ) + else: + if not isinstance(key, Hashable): + raise ConstructorError( + 'while constructing a mapping', + node.start_mark, + 'found unhashable key', + key_node.start_mark, + ) + + value = self.construct_object(value_node, deep=deep) + if check: + if self.check_mapping_key(node, key_node, mapping, key, value): + mapping[key] = value + else: + mapping[key] = value + total_mapping.update(mapping) + return total_mapping + + def check_mapping_key(self, node, key_node, mapping, key, value): + # type: (Any, Any, Any, Any, Any) -> bool + """return True if key is unique""" + if key in mapping: + if not self.allow_duplicate_keys: + mk = mapping.get(key) + if PY2: + if isinstance(key, unicode): + key = key.encode('utf-8') + if isinstance(value, unicode): + value = value.encode('utf-8') + if isinstance(mk, unicode): + mk = mk.encode('utf-8') + args = [ + 'while constructing a mapping', + node.start_mark, + 'found duplicate key "{}" with value "{}" ' + '(original value: "{}")'.format(key, value, mk), + key_node.start_mark, + """ + To suppress this check see: + http://yaml.readthedocs.io/en/latest/api.html#duplicate-keys + """, + """\ + Duplicate keys will become an error in future releases, and are errors + by default when using the new API. + """, + ] + if self.allow_duplicate_keys is None: + warnings.warn(DuplicateKeyFutureWarning(*args)) + else: + raise DuplicateKeyError(*args) + return False + return True + + def check_set_key(self, node, key_node, setting, key): + # type: (Any, Any, Any, Any, Any) -> None + if key in setting: + if not self.allow_duplicate_keys: + if PY2: + if isinstance(key, unicode): + key = key.encode('utf-8') + args = [ + 'while constructing a set', + node.start_mark, + 'found duplicate key "{}"'.format(key), + key_node.start_mark, + """ + To suppress this check see: + http://yaml.readthedocs.io/en/latest/api.html#duplicate-keys + """, + """\ + Duplicate keys will become an error in future releases, and are errors + by default when using the new API. + """, + ] + if self.allow_duplicate_keys is None: + warnings.warn(DuplicateKeyFutureWarning(*args)) + else: + raise DuplicateKeyError(*args) + + def construct_pairs(self, node, deep=False): + # type: (Any, bool) -> Any + if not isinstance(node, MappingNode): + raise ConstructorError( + None, None, 'expected a mapping node, but found %s' % node.id, node.start_mark + ) + pairs = [] + for key_node, value_node in node.value: + key = self.construct_object(key_node, deep=deep) + value = self.construct_object(value_node, deep=deep) + pairs.append((key, value)) + return pairs + + @classmethod + def add_constructor(cls, tag, constructor): + # type: (Any, Any) -> None + if 'yaml_constructors' not in cls.__dict__: + cls.yaml_constructors = cls.yaml_constructors.copy() + cls.yaml_constructors[tag] = constructor + + @classmethod + def add_multi_constructor(cls, tag_prefix, multi_constructor): + # type: (Any, Any) -> None + if 'yaml_multi_constructors' not in cls.__dict__: + cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy() + cls.yaml_multi_constructors[tag_prefix] = multi_constructor + + +class SafeConstructor(BaseConstructor): + def construct_scalar(self, node): + # type: (Any) -> Any + if isinstance(node, MappingNode): + for key_node, value_node in node.value: + if key_node.tag == u'tag:yaml.org,2002:value': + return self.construct_scalar(value_node) + return BaseConstructor.construct_scalar(self, node) + + def flatten_mapping(self, node): + # type: (Any) -> Any + """ + This implements the merge key feature http://yaml.org/type/merge.html + by inserting keys from the merge dict/list of dicts if not yet + available in this node + """ + merge = [] # type: List[Any] + index = 0 + while index < len(node.value): + key_node, value_node = node.value[index] + if key_node.tag == u'tag:yaml.org,2002:merge': + if merge: # double << key + if self.allow_duplicate_keys: + del node.value[index] + index += 1 + continue + args = [ + 'while constructing a mapping', + node.start_mark, + 'found duplicate key "{}"'.format(key_node.value), + key_node.start_mark, + """ + To suppress this check see: + http://yaml.readthedocs.io/en/latest/api.html#duplicate-keys + """, + """\ + Duplicate keys will become an error in future releases, and are errors + by default when using the new API. + """, + ] + if self.allow_duplicate_keys is None: + warnings.warn(DuplicateKeyFutureWarning(*args)) + else: + raise DuplicateKeyError(*args) + del node.value[index] + if isinstance(value_node, MappingNode): + self.flatten_mapping(value_node) + merge.extend(value_node.value) + elif isinstance(value_node, SequenceNode): + submerge = [] + for subnode in value_node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError( + 'while constructing a mapping', + node.start_mark, + 'expected a mapping for merging, but found %s' % subnode.id, + subnode.start_mark, + ) + self.flatten_mapping(subnode) + submerge.append(subnode.value) + submerge.reverse() + for value in submerge: + merge.extend(value) + else: + raise ConstructorError( + 'while constructing a mapping', + node.start_mark, + 'expected a mapping or list of mappings for merging, ' + 'but found %s' % value_node.id, + value_node.start_mark, + ) + elif key_node.tag == u'tag:yaml.org,2002:value': + key_node.tag = u'tag:yaml.org,2002:str' + index += 1 + else: + index += 1 + if bool(merge): + node.merge = merge # separate merge keys to be able to update without duplicate + node.value = merge + node.value + + def construct_mapping(self, node, deep=False): + # type: (Any, bool) -> Any + """deep is True when creating an object/mapping recursively, + in that case want the underlying elements available during construction + """ + if isinstance(node, MappingNode): + self.flatten_mapping(node) + return BaseConstructor.construct_mapping(self, node, deep=deep) + + def construct_yaml_null(self, node): + # type: (Any) -> Any + self.construct_scalar(node) + return None + + # YAML 1.2 spec doesn't mention yes/no etc any more, 1.1 does + bool_values = { + u'yes': True, + u'no': False, + u'y': True, + u'n': False, + u'true': True, + u'false': False, + u'on': True, + u'off': False, + } + + def construct_yaml_bool(self, node): + # type: (Any) -> bool + value = self.construct_scalar(node) + return self.bool_values[value.lower()] + + def construct_yaml_int(self, node): + # type: (Any) -> int + value_s = to_str(self.construct_scalar(node)) + value_s = value_s.replace('_', "") + sign = +1 + if value_s[0] == '-': + sign = -1 + if value_s[0] in '+-': + value_s = value_s[1:] + if value_s == '0': + return 0 + elif value_s.startswith('0b'): + return sign * int(value_s[2:], 2) + elif value_s.startswith('0x'): + return sign * int(value_s[2:], 16) + elif value_s.startswith('0o'): + return sign * int(value_s[2:], 8) + elif self.resolver.processing_version == (1, 1) and value_s[0] == '0': + return sign * int(value_s, 8) + elif self.resolver.processing_version == (1, 1) and ':' in value_s: + digits = [int(part) for part in value_s.split(':')] + digits.reverse() + base = 1 + value = 0 + for digit in digits: + value += digit * base + base *= 60 + return sign * value + else: + return sign * int(value_s) + + inf_value = 1e300 + while inf_value != inf_value * inf_value: + inf_value *= inf_value + nan_value = -inf_value / inf_value # Trying to make a quiet NaN (like C99). + + def construct_yaml_float(self, node): + # type: (Any) -> float + value_so = to_str(self.construct_scalar(node)) + value_s = value_so.replace('_', "").lower() + sign = +1 + if value_s[0] == '-': + sign = -1 + if value_s[0] in '+-': + value_s = value_s[1:] + if value_s == '.inf': + return sign * self.inf_value + elif value_s == '.nan': + return self.nan_value + elif self.resolver.processing_version != (1, 2) and ':' in value_s: + digits = [float(part) for part in value_s.split(':')] + digits.reverse() + base = 1 + value = 0.0 + for digit in digits: + value += digit * base + base *= 60 + return sign * value + else: + if self.resolver.processing_version != (1, 2) and 'e' in value_s: + # value_s is lower case independent of input + mantissa, exponent = value_s.split('e') + if '.' not in mantissa: + warnings.warn(MantissaNoDotYAML1_1Warning(node, value_so)) + return sign * float(value_s) + + if PY3: + + def construct_yaml_binary(self, node): + # type: (Any) -> Any + try: + value = self.construct_scalar(node).encode('ascii') + except UnicodeEncodeError as exc: + raise ConstructorError( + None, + None, + 'failed to convert base64 data into ascii: %s' % exc, + node.start_mark, + ) + try: + if hasattr(base64, 'decodebytes'): + return base64.decodebytes(value) + else: + return base64.decodestring(value) + except binascii.Error as exc: + raise ConstructorError( + None, None, 'failed to decode base64 data: %s' % exc, node.start_mark + ) + + else: + + def construct_yaml_binary(self, node): + # type: (Any) -> Any + value = self.construct_scalar(node) + try: + return to_str(value).decode('base64') + except (binascii.Error, UnicodeEncodeError) as exc: + raise ConstructorError( + None, None, 'failed to decode base64 data: %s' % exc, node.start_mark + ) + + timestamp_regexp = RegExp( + u"""^(?P[0-9][0-9][0-9][0-9]) + -(?P[0-9][0-9]?) + -(?P[0-9][0-9]?) + (?:((?P[Tt])|[ \\t]+) # explictly not retaining extra spaces + (?P[0-9][0-9]?) + :(?P[0-9][0-9]) + :(?P[0-9][0-9]) + (?:\\.(?P[0-9]*))? + (?:[ \\t]*(?PZ|(?P[-+])(?P[0-9][0-9]?) + (?::(?P[0-9][0-9]))?))?)?$""", + re.X, + ) + + def construct_yaml_timestamp(self, node, values=None): + # type: (Any, Any) -> Any + if values is None: + try: + match = self.timestamp_regexp.match(node.value) + except TypeError: + match = None + if match is None: + raise ConstructorError( + None, + None, + 'failed to construct timestamp from "{}"'.format(node.value), + node.start_mark, + ) + values = match.groupdict() + year = int(values['year']) + month = int(values['month']) + day = int(values['day']) + if not values['hour']: + return datetime.date(year, month, day) + hour = int(values['hour']) + minute = int(values['minute']) + second = int(values['second']) + fraction = 0 + if values['fraction']: + fraction_s = values['fraction'][:6] + while len(fraction_s) < 6: + fraction_s += '0' + fraction = int(fraction_s) + if len(values['fraction']) > 6 and int(values['fraction'][6]) > 4: + fraction += 1 + delta = None + if values['tz_sign']: + tz_hour = int(values['tz_hour']) + minutes = values['tz_minute'] + tz_minute = int(minutes) if minutes else 0 + delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute) + if values['tz_sign'] == '-': + delta = -delta + # should do something else instead (or hook this up to the preceding if statement + # in reverse + # if delta is None: + # return datetime.datetime(year, month, day, hour, minute, second, fraction) + # return datetime.datetime(year, month, day, hour, minute, second, fraction, + # datetime.timezone.utc) + # the above is not good enough though, should provide tzinfo. In Python3 that is easily + # doable drop that kind of support for Python2 as it has not native tzinfo + data = datetime.datetime(year, month, day, hour, minute, second, fraction) + if delta: + data -= delta + return data + + def construct_yaml_omap(self, node): + # type: (Any) -> Any + # Note: we do now check for duplicate keys + omap = ordereddict() + yield omap + if not isinstance(node, SequenceNode): + raise ConstructorError( + 'while constructing an ordered map', + node.start_mark, + 'expected a sequence, but found %s' % node.id, + node.start_mark, + ) + for subnode in node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError( + 'while constructing an ordered map', + node.start_mark, + 'expected a mapping of length 1, but found %s' % subnode.id, + subnode.start_mark, + ) + if len(subnode.value) != 1: + raise ConstructorError( + 'while constructing an ordered map', + node.start_mark, + 'expected a single mapping item, but found %d items' % len(subnode.value), + subnode.start_mark, + ) + key_node, value_node = subnode.value[0] + key = self.construct_object(key_node) + assert key not in omap + value = self.construct_object(value_node) + omap[key] = value + + def construct_yaml_pairs(self, node): + # type: (Any) -> Any + # Note: the same code as `construct_yaml_omap`. + pairs = [] # type: List[Any] + yield pairs + if not isinstance(node, SequenceNode): + raise ConstructorError( + 'while constructing pairs', + node.start_mark, + 'expected a sequence, but found %s' % node.id, + node.start_mark, + ) + for subnode in node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError( + 'while constructing pairs', + node.start_mark, + 'expected a mapping of length 1, but found %s' % subnode.id, + subnode.start_mark, + ) + if len(subnode.value) != 1: + raise ConstructorError( + 'while constructing pairs', + node.start_mark, + 'expected a single mapping item, but found %d items' % len(subnode.value), + subnode.start_mark, + ) + key_node, value_node = subnode.value[0] + key = self.construct_object(key_node) + value = self.construct_object(value_node) + pairs.append((key, value)) + + def construct_yaml_set(self, node): + # type: (Any) -> Any + data = set() # type: Set[Any] + yield data + value = self.construct_mapping(node) + data.update(value) + + def construct_yaml_str(self, node): + # type: (Any) -> Any + value = self.construct_scalar(node) + if PY3: + return value + try: + return value.encode('ascii') + except UnicodeEncodeError: + return value + + def construct_yaml_seq(self, node): + # type: (Any) -> Any + data = self.yaml_base_list_type() # type: List[Any] + yield data + data.extend(self.construct_sequence(node)) + + def construct_yaml_map(self, node): + # type: (Any) -> Any + data = self.yaml_base_dict_type() # type: Dict[Any, Any] + yield data + value = self.construct_mapping(node) + data.update(value) + + def construct_yaml_object(self, node, cls): + # type: (Any, Any) -> Any + data = cls.__new__(cls) + yield data + if hasattr(data, '__setstate__'): + state = self.construct_mapping(node, deep=True) + data.__setstate__(state) + else: + state = self.construct_mapping(node) + data.__dict__.update(state) + + def construct_undefined(self, node): + # type: (Any) -> None + raise ConstructorError( + None, + None, + 'could not determine a constructor for the tag %r' % utf8(node.tag), + node.start_mark, + ) + + +SafeConstructor.add_constructor(u'tag:yaml.org,2002:null', SafeConstructor.construct_yaml_null) + +SafeConstructor.add_constructor(u'tag:yaml.org,2002:bool', SafeConstructor.construct_yaml_bool) + +SafeConstructor.add_constructor(u'tag:yaml.org,2002:int', SafeConstructor.construct_yaml_int) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:float', SafeConstructor.construct_yaml_float +) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:binary', SafeConstructor.construct_yaml_binary +) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:timestamp', SafeConstructor.construct_yaml_timestamp +) + +SafeConstructor.add_constructor(u'tag:yaml.org,2002:omap', SafeConstructor.construct_yaml_omap) + +SafeConstructor.add_constructor( + u'tag:yaml.org,2002:pairs', SafeConstructor.construct_yaml_pairs +) + +SafeConstructor.add_constructor(u'tag:yaml.org,2002:set', SafeConstructor.construct_yaml_set) + +SafeConstructor.add_constructor(u'tag:yaml.org,2002:str', SafeConstructor.construct_yaml_str) + +SafeConstructor.add_constructor(u'tag:yaml.org,2002:seq', SafeConstructor.construct_yaml_seq) + +SafeConstructor.add_constructor(u'tag:yaml.org,2002:map', SafeConstructor.construct_yaml_map) + +SafeConstructor.add_constructor(None, SafeConstructor.construct_undefined) + +if PY2: + + class classobj: + pass + + +class Constructor(SafeConstructor): + def construct_python_str(self, node): + # type: (Any) -> Any + return utf8(self.construct_scalar(node)) + + def construct_python_unicode(self, node): + # type: (Any) -> Any + return self.construct_scalar(node) + + if PY3: + + def construct_python_bytes(self, node): + # type: (Any) -> Any + try: + value = self.construct_scalar(node).encode('ascii') + except UnicodeEncodeError as exc: + raise ConstructorError( + None, + None, + 'failed to convert base64 data into ascii: %s' % exc, + node.start_mark, + ) + try: + if hasattr(base64, 'decodebytes'): + return base64.decodebytes(value) + else: + return base64.decodestring(value) + except binascii.Error as exc: + raise ConstructorError( + None, None, 'failed to decode base64 data: %s' % exc, node.start_mark + ) + + def construct_python_long(self, node): + # type: (Any) -> int + val = self.construct_yaml_int(node) + if PY3: + return val + return int(val) + + def construct_python_complex(self, node): + # type: (Any) -> Any + return complex(self.construct_scalar(node)) + + def construct_python_tuple(self, node): + # type: (Any) -> Any + return tuple(self.construct_sequence(node)) + + def find_python_module(self, name, mark): + # type: (Any, Any) -> Any + if not name: + raise ConstructorError( + 'while constructing a Python module', + mark, + 'expected non-empty name appended to the tag', + mark, + ) + try: + __import__(name) + except ImportError as exc: + raise ConstructorError( + 'while constructing a Python module', + mark, + 'cannot find module %r (%s)' % (utf8(name), exc), + mark, + ) + return sys.modules[name] + + def find_python_name(self, name, mark): + # type: (Any, Any) -> Any + if not name: + raise ConstructorError( + 'while constructing a Python object', + mark, + 'expected non-empty name appended to the tag', + mark, + ) + if u'.' in name: + lname = name.split('.') + lmodule_name = lname + lobject_name = [] # type: List[Any] + while len(lmodule_name) > 1: + lobject_name.insert(0, lmodule_name.pop()) + module_name = '.'.join(lmodule_name) + try: + __import__(module_name) + # object_name = '.'.join(object_name) + break + except ImportError: + continue + else: + module_name = builtins_module + lobject_name = [name] + try: + __import__(module_name) + except ImportError as exc: + raise ConstructorError( + 'while constructing a Python object', + mark, + 'cannot find module %r (%s)' % (utf8(module_name), exc), + mark, + ) + module = sys.modules[module_name] + object_name = '.'.join(lobject_name) + obj = module + while lobject_name: + if not hasattr(obj, lobject_name[0]): + + raise ConstructorError( + 'while constructing a Python object', + mark, + 'cannot find %r in the module %r' % (utf8(object_name), module.__name__), + mark, + ) + obj = getattr(obj, lobject_name.pop(0)) + return obj + + def construct_python_name(self, suffix, node): + # type: (Any, Any) -> Any + value = self.construct_scalar(node) + if value: + raise ConstructorError( + 'while constructing a Python name', + node.start_mark, + 'expected the empty value, but found %r' % utf8(value), + node.start_mark, + ) + return self.find_python_name(suffix, node.start_mark) + + def construct_python_module(self, suffix, node): + # type: (Any, Any) -> Any + value = self.construct_scalar(node) + if value: + raise ConstructorError( + 'while constructing a Python module', + node.start_mark, + 'expected the empty value, but found %r' % utf8(value), + node.start_mark, + ) + return self.find_python_module(suffix, node.start_mark) + + def make_python_instance(self, suffix, node, args=None, kwds=None, newobj=False): + # type: (Any, Any, Any, Any, bool) -> Any + if not args: + args = [] + if not kwds: + kwds = {} + cls = self.find_python_name(suffix, node.start_mark) + if PY3: + if newobj and isinstance(cls, type): + return cls.__new__(cls, *args, **kwds) + else: + return cls(*args, **kwds) + else: + if newobj and isinstance(cls, type(classobj)) and not args and not kwds: + instance = classobj() + instance.__class__ = cls + return instance + elif newobj and isinstance(cls, type): + return cls.__new__(cls, *args, **kwds) + else: + return cls(*args, **kwds) + + def set_python_instance_state(self, instance, state): + # type: (Any, Any) -> None + if hasattr(instance, '__setstate__'): + instance.__setstate__(state) + else: + slotstate = {} # type: Dict[Any, Any] + if isinstance(state, tuple) and len(state) == 2: + state, slotstate = state + if hasattr(instance, '__dict__'): + instance.__dict__.update(state) + elif state: + slotstate.update(state) + for key, value in slotstate.items(): + setattr(instance, key, value) + + def construct_python_object(self, suffix, node): + # type: (Any, Any) -> Any + # Format: + # !!python/object:module.name { ... state ... } + instance = self.make_python_instance(suffix, node, newobj=True) + self.recursive_objects[node] = instance + yield instance + deep = hasattr(instance, '__setstate__') + state = self.construct_mapping(node, deep=deep) + self.set_python_instance_state(instance, state) + + def construct_python_object_apply(self, suffix, node, newobj=False): + # type: (Any, Any, bool) -> Any + # Format: + # !!python/object/apply # (or !!python/object/new) + # args: [ ... arguments ... ] + # kwds: { ... keywords ... } + # state: ... state ... + # listitems: [ ... listitems ... ] + # dictitems: { ... dictitems ... } + # or short format: + # !!python/object/apply [ ... arguments ... ] + # The difference between !!python/object/apply and !!python/object/new + # is how an object is created, check make_python_instance for details. + if isinstance(node, SequenceNode): + args = self.construct_sequence(node, deep=True) + kwds = {} # type: Dict[Any, Any] + state = {} # type: Dict[Any, Any] + listitems = [] # type: List[Any] + dictitems = {} # type: Dict[Any, Any] + else: + value = self.construct_mapping(node, deep=True) + args = value.get('args', []) + kwds = value.get('kwds', {}) + state = value.get('state', {}) + listitems = value.get('listitems', []) + dictitems = value.get('dictitems', {}) + instance = self.make_python_instance(suffix, node, args, kwds, newobj) + if bool(state): + self.set_python_instance_state(instance, state) + if bool(listitems): + instance.extend(listitems) + if bool(dictitems): + for key in dictitems: + instance[key] = dictitems[key] + return instance + + def construct_python_object_new(self, suffix, node): + # type: (Any, Any) -> Any + return self.construct_python_object_apply(suffix, node, newobj=True) + + +Constructor.add_constructor(u'tag:yaml.org,2002:python/none', Constructor.construct_yaml_null) + +Constructor.add_constructor(u'tag:yaml.org,2002:python/bool', Constructor.construct_yaml_bool) + +Constructor.add_constructor(u'tag:yaml.org,2002:python/str', Constructor.construct_python_str) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/unicode', Constructor.construct_python_unicode +) + +if PY3: + Constructor.add_constructor( + u'tag:yaml.org,2002:python/bytes', Constructor.construct_python_bytes + ) + +Constructor.add_constructor(u'tag:yaml.org,2002:python/int', Constructor.construct_yaml_int) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/long', Constructor.construct_python_long +) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/float', Constructor.construct_yaml_float +) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/complex', Constructor.construct_python_complex +) + +Constructor.add_constructor(u'tag:yaml.org,2002:python/list', Constructor.construct_yaml_seq) + +Constructor.add_constructor( + u'tag:yaml.org,2002:python/tuple', Constructor.construct_python_tuple +) + +Constructor.add_constructor(u'tag:yaml.org,2002:python/dict', Constructor.construct_yaml_map) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/name:', Constructor.construct_python_name +) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/module:', Constructor.construct_python_module +) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/object:', Constructor.construct_python_object +) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/object/apply:', Constructor.construct_python_object_apply +) + +Constructor.add_multi_constructor( + u'tag:yaml.org,2002:python/object/new:', Constructor.construct_python_object_new +) + + +class RoundTripConstructor(SafeConstructor): + """need to store the comments on the node itself, + as well as on the items + """ + + def construct_scalar(self, node): + # type: (Any) -> Any + if not isinstance(node, ScalarNode): + raise ConstructorError( + None, None, 'expected a scalar node, but found %s' % node.id, node.start_mark + ) + + if node.style == '|' and isinstance(node.value, text_type): + lss = LiteralScalarString(node.value, anchor=node.anchor) + if node.comment and node.comment[1]: + lss.comment = node.comment[1][0] # type: ignore + return lss + if node.style == '>' and isinstance(node.value, text_type): + fold_positions = [] # type: List[int] + idx = -1 + while True: + idx = node.value.find('\a', idx + 1) + if idx < 0: + break + fold_positions.append(idx - len(fold_positions)) + fss = FoldedScalarString(node.value.replace('\a', ''), anchor=node.anchor) + if node.comment and node.comment[1]: + fss.comment = node.comment[1][0] # type: ignore + if fold_positions: + fss.fold_pos = fold_positions # type: ignore + return fss + elif bool(self._preserve_quotes) and isinstance(node.value, text_type): + if node.style == "'": + return SingleQuotedScalarString(node.value, anchor=node.anchor) + if node.style == '"': + return DoubleQuotedScalarString(node.value, anchor=node.anchor) + if node.anchor: + return PlainScalarString(node.value, anchor=node.anchor) + return node.value + + def construct_yaml_int(self, node): + # type: (Any) -> Any + width = None # type: Any + value_su = to_str(self.construct_scalar(node)) + try: + sx = value_su.rstrip('_') + underscore = [len(sx) - sx.rindex('_') - 1, False, False] # type: Any + except ValueError: + underscore = None + except IndexError: + underscore = None + value_s = value_su.replace('_', "") + sign = +1 + if value_s[0] == '-': + sign = -1 + if value_s[0] in '+-': + value_s = value_s[1:] + if value_s == '0': + return 0 + elif value_s.startswith('0b'): + if self.resolver.processing_version > (1, 1) and value_s[2] == '0': + width = len(value_s[2:]) + if underscore is not None: + underscore[1] = value_su[2] == '_' + underscore[2] = len(value_su[2:]) > 1 and value_su[-1] == '_' + return BinaryInt( + sign * int(value_s[2:], 2), + width=width, + underscore=underscore, + anchor=node.anchor, + ) + elif value_s.startswith('0x'): + # default to lower-case if no a-fA-F in string + if self.resolver.processing_version > (1, 1) and value_s[2] == '0': + width = len(value_s[2:]) + hex_fun = HexInt # type: Any + for ch in value_s[2:]: + if ch in 'ABCDEF': # first non-digit is capital + hex_fun = HexCapsInt + break + if ch in 'abcdef': + break + if underscore is not None: + underscore[1] = value_su[2] == '_' + underscore[2] = len(value_su[2:]) > 1 and value_su[-1] == '_' + return hex_fun( + sign * int(value_s[2:], 16), + width=width, + underscore=underscore, + anchor=node.anchor, + ) + elif value_s.startswith('0o'): + if self.resolver.processing_version > (1, 1) and value_s[2] == '0': + width = len(value_s[2:]) + if underscore is not None: + underscore[1] = value_su[2] == '_' + underscore[2] = len(value_su[2:]) > 1 and value_su[-1] == '_' + return OctalInt( + sign * int(value_s[2:], 8), + width=width, + underscore=underscore, + anchor=node.anchor, + ) + elif self.resolver.processing_version != (1, 2) and value_s[0] == '0': + return sign * int(value_s, 8) + elif self.resolver.processing_version != (1, 2) and ':' in value_s: + digits = [int(part) for part in value_s.split(':')] + digits.reverse() + base = 1 + value = 0 + for digit in digits: + value += digit * base + base *= 60 + return sign * value + elif self.resolver.processing_version > (1, 1) and value_s[0] == '0': + # not an octal, an integer with leading zero(s) + if underscore is not None: + # cannot have a leading underscore + underscore[2] = len(value_su) > 1 and value_su[-1] == '_' + return ScalarInt(sign * int(value_s), width=len(value_s), underscore=underscore) + elif underscore: + # cannot have a leading underscore + underscore[2] = len(value_su) > 1 and value_su[-1] == '_' + return ScalarInt( + sign * int(value_s), width=None, underscore=underscore, anchor=node.anchor + ) + elif node.anchor: + return ScalarInt(sign * int(value_s), width=None, anchor=node.anchor) + else: + return sign * int(value_s) + + def construct_yaml_float(self, node): + # type: (Any) -> Any + def leading_zeros(v): + # type: (Any) -> int + lead0 = 0 + idx = 0 + while idx < len(v) and v[idx] in '0.': + if v[idx] == '0': + lead0 += 1 + idx += 1 + return lead0 + + # underscore = None + m_sign = False # type: Any + value_so = to_str(self.construct_scalar(node)) + value_s = value_so.replace('_', "").lower() + sign = +1 + if value_s[0] == '-': + sign = -1 + if value_s[0] in '+-': + m_sign = value_s[0] + value_s = value_s[1:] + if value_s == '.inf': + return sign * self.inf_value + if value_s == '.nan': + return self.nan_value + if self.resolver.processing_version != (1, 2) and ':' in value_s: + digits = [float(part) for part in value_s.split(':')] + digits.reverse() + base = 1 + value = 0.0 + for digit in digits: + value += digit * base + base *= 60 + return sign * value + if 'e' in value_s: + try: + mantissa, exponent = value_so.split('e') + exp = 'e' + except ValueError: + mantissa, exponent = value_so.split('E') + exp = 'E' + if self.resolver.processing_version != (1, 2): + # value_s is lower case independent of input + if '.' not in mantissa: + warnings.warn(MantissaNoDotYAML1_1Warning(node, value_so)) + lead0 = leading_zeros(mantissa) + width = len(mantissa) + prec = mantissa.find('.') + if m_sign: + width -= 1 + e_width = len(exponent) + e_sign = exponent[0] in '+-' + # nprint('sf', width, prec, m_sign, exp, e_width, e_sign) + return ScalarFloat( + sign * float(value_s), + width=width, + prec=prec, + m_sign=m_sign, + m_lead0=lead0, + exp=exp, + e_width=e_width, + e_sign=e_sign, + anchor=node.anchor, + ) + width = len(value_so) + prec = value_so.index('.') # you can use index, this would not be float without dot + lead0 = leading_zeros(value_so) + return ScalarFloat( + sign * float(value_s), + width=width, + prec=prec, + m_sign=m_sign, + m_lead0=lead0, + anchor=node.anchor, + ) + + def construct_yaml_str(self, node): + # type: (Any) -> Any + value = self.construct_scalar(node) + if isinstance(value, ScalarString): + return value + if PY3: + return value + try: + return value.encode('ascii') + except AttributeError: + # in case you replace the node dynamically e.g. with a dict + return value + except UnicodeEncodeError: + return value + + def construct_rt_sequence(self, node, seqtyp, deep=False): + # type: (Any, Any, bool) -> Any + if not isinstance(node, SequenceNode): + raise ConstructorError( + None, None, 'expected a sequence node, but found %s' % node.id, node.start_mark + ) + ret_val = [] + if node.comment: + seqtyp._yaml_add_comment(node.comment[:2]) + if len(node.comment) > 2: + seqtyp.yaml_end_comment_extend(node.comment[2], clear=True) + if node.anchor: + from ...ruamel.yaml.serializer import templated_id + + if not templated_id(node.anchor): + seqtyp.yaml_set_anchor(node.anchor) + for idx, child in enumerate(node.value): + if child.comment: + seqtyp._yaml_add_comment(child.comment, key=idx) + child.comment = None # if moved to sequence remove from child + ret_val.append(self.construct_object(child, deep=deep)) + seqtyp._yaml_set_idx_line_col( + idx, [child.start_mark.line, child.start_mark.column] + ) + return ret_val + + def flatten_mapping(self, node): + # type: (Any) -> Any + """ + This implements the merge key feature http://yaml.org/type/merge.html + by inserting keys from the merge dict/list of dicts if not yet + available in this node + """ + + def constructed(value_node): + # type: (Any) -> Any + # If the contents of a merge are defined within the + # merge marker, then they won't have been constructed + # yet. But if they were already constructed, we need to use + # the existing object. + if value_node in self.constructed_objects: + value = self.constructed_objects[value_node] + else: + value = self.construct_object(value_node, deep=False) + return value + + # merge = [] + merge_map_list = [] # type: List[Any] + index = 0 + while index < len(node.value): + key_node, value_node = node.value[index] + if key_node.tag == u'tag:yaml.org,2002:merge': + if merge_map_list: # double << key + if self.allow_duplicate_keys: + del node.value[index] + index += 1 + continue + args = [ + 'while constructing a mapping', + node.start_mark, + 'found duplicate key "{}"'.format(key_node.value), + key_node.start_mark, + """ + To suppress this check see: + http://yaml.readthedocs.io/en/latest/api.html#duplicate-keys + """, + """\ + Duplicate keys will become an error in future releases, and are errors + by default when using the new API. + """, + ] + if self.allow_duplicate_keys is None: + warnings.warn(DuplicateKeyFutureWarning(*args)) + else: + raise DuplicateKeyError(*args) + del node.value[index] + if isinstance(value_node, MappingNode): + merge_map_list.append((index, constructed(value_node))) + # self.flatten_mapping(value_node) + # merge.extend(value_node.value) + elif isinstance(value_node, SequenceNode): + # submerge = [] + for subnode in value_node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError( + 'while constructing a mapping', + node.start_mark, + 'expected a mapping for merging, but found %s' % subnode.id, + subnode.start_mark, + ) + merge_map_list.append((index, constructed(subnode))) + # self.flatten_mapping(subnode) + # submerge.append(subnode.value) + # submerge.reverse() + # for value in submerge: + # merge.extend(value) + else: + raise ConstructorError( + 'while constructing a mapping', + node.start_mark, + 'expected a mapping or list of mappings for merging, ' + 'but found %s' % value_node.id, + value_node.start_mark, + ) + elif key_node.tag == u'tag:yaml.org,2002:value': + key_node.tag = u'tag:yaml.org,2002:str' + index += 1 + else: + index += 1 + return merge_map_list + # if merge: + # node.value = merge + node.value + + def _sentinel(self): + # type: () -> None + pass + + def construct_mapping(self, node, maptyp, deep=False): # type: ignore + # type: (Any, Any, bool) -> Any + if not isinstance(node, MappingNode): + raise ConstructorError( + None, None, 'expected a mapping node, but found %s' % node.id, node.start_mark + ) + merge_map = self.flatten_mapping(node) + # mapping = {} + if node.comment: + maptyp._yaml_add_comment(node.comment[:2]) + if len(node.comment) > 2: + maptyp.yaml_end_comment_extend(node.comment[2], clear=True) + if node.anchor: + from ...ruamel.yaml.serializer import templated_id + + if not templated_id(node.anchor): + maptyp.yaml_set_anchor(node.anchor) + last_key, last_value = None, self._sentinel + for key_node, value_node in node.value: + # keys can be list -> deep + key = self.construct_object(key_node, deep=True) + # lists are not hashable, but tuples are + if not isinstance(key, Hashable): + if isinstance(key, MutableSequence): + key_s = CommentedKeySeq(key) + if key_node.flow_style is True: + key_s.fa.set_flow_style() + elif key_node.flow_style is False: + key_s.fa.set_block_style() + key = key_s + elif isinstance(key, MutableMapping): + key_m = CommentedKeyMap(key) + if key_node.flow_style is True: + key_m.fa.set_flow_style() + elif key_node.flow_style is False: + key_m.fa.set_block_style() + key = key_m + if PY2: + try: + hash(key) + except TypeError as exc: + raise ConstructorError( + 'while constructing a mapping', + node.start_mark, + 'found unacceptable key (%s)' % exc, + key_node.start_mark, + ) + else: + if not isinstance(key, Hashable): + raise ConstructorError( + 'while constructing a mapping', + node.start_mark, + 'found unhashable key', + key_node.start_mark, + ) + value = self.construct_object(value_node, deep=deep) + if self.check_mapping_key(node, key_node, maptyp, key, value): + if key_node.comment and len(key_node.comment) > 4 and key_node.comment[4]: + if last_value is None: + key_node.comment[0] = key_node.comment.pop(4) + maptyp._yaml_add_comment(key_node.comment, value=last_key) + else: + key_node.comment[2] = key_node.comment.pop(4) + maptyp._yaml_add_comment(key_node.comment, key=key) + key_node.comment = None + if key_node.comment: + maptyp._yaml_add_comment(key_node.comment, key=key) + if value_node.comment: + maptyp._yaml_add_comment(value_node.comment, value=key) + maptyp._yaml_set_kv_line_col( + key, + [ + key_node.start_mark.line, + key_node.start_mark.column, + value_node.start_mark.line, + value_node.start_mark.column, + ], + ) + maptyp[key] = value + last_key, last_value = key, value # could use indexing + # do this last, or <<: before a key will prevent insertion in instances + # of collections.OrderedDict (as they have no __contains__ + if merge_map: + maptyp.add_yaml_merge(merge_map) + + def construct_setting(self, node, typ, deep=False): + # type: (Any, Any, bool) -> Any + if not isinstance(node, MappingNode): + raise ConstructorError( + None, None, 'expected a mapping node, but found %s' % node.id, node.start_mark + ) + if node.comment: + typ._yaml_add_comment(node.comment[:2]) + if len(node.comment) > 2: + typ.yaml_end_comment_extend(node.comment[2], clear=True) + if node.anchor: + from ...ruamel.yaml.serializer import templated_id + + if not templated_id(node.anchor): + typ.yaml_set_anchor(node.anchor) + for key_node, value_node in node.value: + # keys can be list -> deep + key = self.construct_object(key_node, deep=True) + # lists are not hashable, but tuples are + if not isinstance(key, Hashable): + if isinstance(key, list): + key = tuple(key) + if PY2: + try: + hash(key) + except TypeError as exc: + raise ConstructorError( + 'while constructing a mapping', + node.start_mark, + 'found unacceptable key (%s)' % exc, + key_node.start_mark, + ) + else: + if not isinstance(key, Hashable): + raise ConstructorError( + 'while constructing a mapping', + node.start_mark, + 'found unhashable key', + key_node.start_mark, + ) + # construct but should be null + value = self.construct_object(value_node, deep=deep) # NOQA + self.check_set_key(node, key_node, typ, key) + if key_node.comment: + typ._yaml_add_comment(key_node.comment, key=key) + if value_node.comment: + typ._yaml_add_comment(value_node.comment, value=key) + typ.add(key) + + def construct_yaml_seq(self, node): + # type: (Any) -> Any + data = CommentedSeq() + data._yaml_set_line_col(node.start_mark.line, node.start_mark.column) + if node.comment: + data._yaml_add_comment(node.comment) + yield data + data.extend(self.construct_rt_sequence(node, data)) + self.set_collection_style(data, node) + + def construct_yaml_map(self, node): + # type: (Any) -> Any + data = CommentedMap() + data._yaml_set_line_col(node.start_mark.line, node.start_mark.column) + yield data + self.construct_mapping(node, data, deep=True) + self.set_collection_style(data, node) + + def set_collection_style(self, data, node): + # type: (Any, Any) -> None + if len(data) == 0: + return + if node.flow_style is True: + data.fa.set_flow_style() + elif node.flow_style is False: + data.fa.set_block_style() + + def construct_yaml_object(self, node, cls): + # type: (Any, Any) -> Any + data = cls.__new__(cls) + yield data + if hasattr(data, '__setstate__'): + state = SafeConstructor.construct_mapping(self, node, deep=True) + data.__setstate__(state) + else: + state = SafeConstructor.construct_mapping(self, node) + data.__dict__.update(state) + + def construct_yaml_omap(self, node): + # type: (Any) -> Any + # Note: we do now check for duplicate keys + omap = CommentedOrderedMap() + omap._yaml_set_line_col(node.start_mark.line, node.start_mark.column) + if node.flow_style is True: + omap.fa.set_flow_style() + elif node.flow_style is False: + omap.fa.set_block_style() + yield omap + if node.comment: + omap._yaml_add_comment(node.comment[:2]) + if len(node.comment) > 2: + omap.yaml_end_comment_extend(node.comment[2], clear=True) + if not isinstance(node, SequenceNode): + raise ConstructorError( + 'while constructing an ordered map', + node.start_mark, + 'expected a sequence, but found %s' % node.id, + node.start_mark, + ) + for subnode in node.value: + if not isinstance(subnode, MappingNode): + raise ConstructorError( + 'while constructing an ordered map', + node.start_mark, + 'expected a mapping of length 1, but found %s' % subnode.id, + subnode.start_mark, + ) + if len(subnode.value) != 1: + raise ConstructorError( + 'while constructing an ordered map', + node.start_mark, + 'expected a single mapping item, but found %d items' % len(subnode.value), + subnode.start_mark, + ) + key_node, value_node = subnode.value[0] + key = self.construct_object(key_node) + assert key not in omap + value = self.construct_object(value_node) + if key_node.comment: + omap._yaml_add_comment(key_node.comment, key=key) + if subnode.comment: + omap._yaml_add_comment(subnode.comment, key=key) + if value_node.comment: + omap._yaml_add_comment(value_node.comment, value=key) + omap[key] = value + + def construct_yaml_set(self, node): + # type: (Any) -> Any + data = CommentedSet() + data._yaml_set_line_col(node.start_mark.line, node.start_mark.column) + yield data + self.construct_setting(node, data) + + def construct_undefined(self, node): + # type: (Any) -> Any + try: + if isinstance(node, MappingNode): + data = CommentedMap() + data._yaml_set_line_col(node.start_mark.line, node.start_mark.column) + if node.flow_style is True: + data.fa.set_flow_style() + elif node.flow_style is False: + data.fa.set_block_style() + data.yaml_set_tag(node.tag) + yield data + if node.anchor: + data.yaml_set_anchor(node.anchor) + self.construct_mapping(node, data) + return + elif isinstance(node, ScalarNode): + data2 = TaggedScalar() + data2.value = self.construct_scalar(node) + data2.style = node.style + data2.yaml_set_tag(node.tag) + yield data2 + if node.anchor: + data2.yaml_set_anchor(node.anchor, always_dump=True) + return + elif isinstance(node, SequenceNode): + data3 = CommentedSeq() + data3._yaml_set_line_col(node.start_mark.line, node.start_mark.column) + if node.flow_style is True: + data3.fa.set_flow_style() + elif node.flow_style is False: + data3.fa.set_block_style() + data3.yaml_set_tag(node.tag) + yield data3 + if node.anchor: + data3.yaml_set_anchor(node.anchor) + data3.extend(self.construct_sequence(node)) + return + except: # NOQA + pass + raise ConstructorError( + None, + None, + 'could not determine a constructor for the tag %r' % utf8(node.tag), + node.start_mark, + ) + + def construct_yaml_timestamp(self, node, values=None): + # type: (Any, Any) -> Any + try: + match = self.timestamp_regexp.match(node.value) + except TypeError: + match = None + if match is None: + raise ConstructorError( + None, + None, + 'failed to construct timestamp from "{}"'.format(node.value), + node.start_mark, + ) + values = match.groupdict() + if not values['hour']: + return SafeConstructor.construct_yaml_timestamp(self, node, values) + for part in ['t', 'tz_sign', 'tz_hour', 'tz_minute']: + if values[part]: + break + else: + return SafeConstructor.construct_yaml_timestamp(self, node, values) + year = int(values['year']) + month = int(values['month']) + day = int(values['day']) + hour = int(values['hour']) + minute = int(values['minute']) + second = int(values['second']) + fraction = 0 + if values['fraction']: + fraction_s = values['fraction'][:6] + while len(fraction_s) < 6: + fraction_s += '0' + fraction = int(fraction_s) + if len(values['fraction']) > 6 and int(values['fraction'][6]) > 4: + fraction += 1 + delta = None + if values['tz_sign']: + tz_hour = int(values['tz_hour']) + minutes = values['tz_minute'] + tz_minute = int(minutes) if minutes else 0 + delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute) + if values['tz_sign'] == '-': + delta = -delta + # shold check for NOne and solve issue 366 should be tzinfo=delta) + if delta: + dt = datetime.datetime(year, month, day, hour, minute) + dt -= delta + data = TimeStamp(dt.year, dt.month, dt.day, dt.hour, dt.minute, second, fraction) + data._yaml['delta'] = delta + tz = values['tz_sign'] + values['tz_hour'] + if values['tz_minute']: + tz += ':' + values['tz_minute'] + data._yaml['tz'] = tz + else: + data = TimeStamp(year, month, day, hour, minute, second, fraction) + if values['tz']: # no delta + data._yaml['tz'] = values['tz'] + + if values['t']: + data._yaml['t'] = True + return data + + def construct_yaml_bool(self, node): + # type: (Any) -> Any + b = SafeConstructor.construct_yaml_bool(self, node) + if node.anchor: + return ScalarBoolean(b, anchor=node.anchor) + return b + + +RoundTripConstructor.add_constructor( + u'tag:yaml.org,2002:null', RoundTripConstructor.construct_yaml_null +) + +RoundTripConstructor.add_constructor( + u'tag:yaml.org,2002:bool', RoundTripConstructor.construct_yaml_bool +) + +RoundTripConstructor.add_constructor( + u'tag:yaml.org,2002:int', RoundTripConstructor.construct_yaml_int +) + +RoundTripConstructor.add_constructor( + u'tag:yaml.org,2002:float', RoundTripConstructor.construct_yaml_float +) + +RoundTripConstructor.add_constructor( + u'tag:yaml.org,2002:binary', RoundTripConstructor.construct_yaml_binary +) + +RoundTripConstructor.add_constructor( + u'tag:yaml.org,2002:timestamp', RoundTripConstructor.construct_yaml_timestamp +) + +RoundTripConstructor.add_constructor( + u'tag:yaml.org,2002:omap', RoundTripConstructor.construct_yaml_omap +) + +RoundTripConstructor.add_constructor( + u'tag:yaml.org,2002:pairs', RoundTripConstructor.construct_yaml_pairs +) + +RoundTripConstructor.add_constructor( + u'tag:yaml.org,2002:set', RoundTripConstructor.construct_yaml_set +) + +RoundTripConstructor.add_constructor( + u'tag:yaml.org,2002:str', RoundTripConstructor.construct_yaml_str +) + +RoundTripConstructor.add_constructor( + u'tag:yaml.org,2002:seq', RoundTripConstructor.construct_yaml_seq +) + +RoundTripConstructor.add_constructor( + u'tag:yaml.org,2002:map', RoundTripConstructor.construct_yaml_map +) + +RoundTripConstructor.add_constructor(None, RoundTripConstructor.construct_undefined) diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/cyaml.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/cyaml.py new file mode 100644 index 0000000000..f8cf47a944 --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/cyaml.py @@ -0,0 +1,185 @@ +# coding: utf-8 + +from __future__ import absolute_import + +from _ruamel_yaml import CParser, CEmitter # type: ignore + +from ...ruamel.yaml.constructor import Constructor, BaseConstructor, SafeConstructor +from ...ruamel.yaml.representer import Representer, SafeRepresenter, BaseRepresenter +from ...ruamel.yaml.resolver import Resolver, BaseResolver + +if False: # MYPY + from typing import Any, Union, Optional # NOQA + from ...ruamel.yaml.compat import StreamTextType, StreamType, VersionType # NOQA + +__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader', 'CBaseDumper', 'CSafeDumper', 'CDumper'] + + +# this includes some hacks to solve the usage of resolver by lower level +# parts of the parser + + +class CBaseLoader(CParser, BaseConstructor, BaseResolver): # type: ignore + def __init__(self, stream, version=None, preserve_quotes=None): + # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None + CParser.__init__(self, stream) + self._parser = self._composer = self + BaseConstructor.__init__(self, loader=self) + BaseResolver.__init__(self, loadumper=self) + # self.descend_resolver = self._resolver.descend_resolver + # self.ascend_resolver = self._resolver.ascend_resolver + # self.resolve = self._resolver.resolve + + +class CSafeLoader(CParser, SafeConstructor, Resolver): # type: ignore + def __init__(self, stream, version=None, preserve_quotes=None): + # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None + CParser.__init__(self, stream) + self._parser = self._composer = self + SafeConstructor.__init__(self, loader=self) + Resolver.__init__(self, loadumper=self) + # self.descend_resolver = self._resolver.descend_resolver + # self.ascend_resolver = self._resolver.ascend_resolver + # self.resolve = self._resolver.resolve + + +class CLoader(CParser, Constructor, Resolver): # type: ignore + def __init__(self, stream, version=None, preserve_quotes=None): + # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None + CParser.__init__(self, stream) + self._parser = self._composer = self + Constructor.__init__(self, loader=self) + Resolver.__init__(self, loadumper=self) + # self.descend_resolver = self._resolver.descend_resolver + # self.ascend_resolver = self._resolver.ascend_resolver + # self.resolve = self._resolver.resolve + + +class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver): # type: ignore + def __init__( + self, + stream, + default_style=None, + default_flow_style=None, + canonical=None, + indent=None, + width=None, + allow_unicode=None, + line_break=None, + encoding=None, + explicit_start=None, + explicit_end=None, + version=None, + tags=None, + block_seq_indent=None, + top_level_colon_align=None, + prefix_colon=None, + ): + # type: (StreamType, Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA + CEmitter.__init__( + self, + stream, + canonical=canonical, + indent=indent, + width=width, + encoding=encoding, + allow_unicode=allow_unicode, + line_break=line_break, + explicit_start=explicit_start, + explicit_end=explicit_end, + version=version, + tags=tags, + ) + self._emitter = self._serializer = self._representer = self + BaseRepresenter.__init__( + self, + default_style=default_style, + default_flow_style=default_flow_style, + dumper=self, + ) + BaseResolver.__init__(self, loadumper=self) + + +class CSafeDumper(CEmitter, SafeRepresenter, Resolver): # type: ignore + def __init__( + self, + stream, + default_style=None, + default_flow_style=None, + canonical=None, + indent=None, + width=None, + allow_unicode=None, + line_break=None, + encoding=None, + explicit_start=None, + explicit_end=None, + version=None, + tags=None, + block_seq_indent=None, + top_level_colon_align=None, + prefix_colon=None, + ): + # type: (StreamType, Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA + self._emitter = self._serializer = self._representer = self + CEmitter.__init__( + self, + stream, + canonical=canonical, + indent=indent, + width=width, + encoding=encoding, + allow_unicode=allow_unicode, + line_break=line_break, + explicit_start=explicit_start, + explicit_end=explicit_end, + version=version, + tags=tags, + ) + self._emitter = self._serializer = self._representer = self + SafeRepresenter.__init__( + self, default_style=default_style, default_flow_style=default_flow_style + ) + Resolver.__init__(self) + + +class CDumper(CEmitter, Representer, Resolver): # type: ignore + def __init__( + self, + stream, + default_style=None, + default_flow_style=None, + canonical=None, + indent=None, + width=None, + allow_unicode=None, + line_break=None, + encoding=None, + explicit_start=None, + explicit_end=None, + version=None, + tags=None, + block_seq_indent=None, + top_level_colon_align=None, + prefix_colon=None, + ): + # type: (StreamType, Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA + CEmitter.__init__( + self, + stream, + canonical=canonical, + indent=indent, + width=width, + encoding=encoding, + allow_unicode=allow_unicode, + line_break=line_break, + explicit_start=explicit_start, + explicit_end=explicit_end, + version=version, + tags=tags, + ) + self._emitter = self._serializer = self._representer = self + Representer.__init__( + self, default_style=default_style, default_flow_style=default_flow_style + ) + Resolver.__init__(self) diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/dumper.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/dumper.py new file mode 100644 index 0000000000..80b7f4a979 --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/dumper.py @@ -0,0 +1,221 @@ +# coding: utf-8 + +from __future__ import absolute_import + +from ...ruamel.yaml.emitter import Emitter +from ...ruamel.yaml.serializer import Serializer +from ...ruamel.yaml.representer import ( + Representer, + SafeRepresenter, + BaseRepresenter, + RoundTripRepresenter, +) +from ...ruamel.yaml.resolver import Resolver, BaseResolver, VersionedResolver + +if False: # MYPY + from typing import Any, Dict, List, Union, Optional # NOQA + from ...ruamel.yaml.compat import StreamType, VersionType # NOQA + +__all__ = ['BaseDumper', 'SafeDumper', 'Dumper', 'RoundTripDumper'] + + +class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver): + def __init__( + self, + stream, + default_style=None, + default_flow_style=None, + canonical=None, + indent=None, + width=None, + allow_unicode=None, + line_break=None, + encoding=None, + explicit_start=None, + explicit_end=None, + version=None, + tags=None, + block_seq_indent=None, + top_level_colon_align=None, + prefix_colon=None, + ): + # type: (Any, StreamType, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA + Emitter.__init__( + self, + stream, + canonical=canonical, + indent=indent, + width=width, + allow_unicode=allow_unicode, + line_break=line_break, + block_seq_indent=block_seq_indent, + dumper=self, + ) + Serializer.__init__( + self, + encoding=encoding, + explicit_start=explicit_start, + explicit_end=explicit_end, + version=version, + tags=tags, + dumper=self, + ) + BaseRepresenter.__init__( + self, + default_style=default_style, + default_flow_style=default_flow_style, + dumper=self, + ) + BaseResolver.__init__(self, loadumper=self) + + +class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver): + def __init__( + self, + stream, + default_style=None, + default_flow_style=None, + canonical=None, + indent=None, + width=None, + allow_unicode=None, + line_break=None, + encoding=None, + explicit_start=None, + explicit_end=None, + version=None, + tags=None, + block_seq_indent=None, + top_level_colon_align=None, + prefix_colon=None, + ): + # type: (StreamType, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA + Emitter.__init__( + self, + stream, + canonical=canonical, + indent=indent, + width=width, + allow_unicode=allow_unicode, + line_break=line_break, + block_seq_indent=block_seq_indent, + dumper=self, + ) + Serializer.__init__( + self, + encoding=encoding, + explicit_start=explicit_start, + explicit_end=explicit_end, + version=version, + tags=tags, + dumper=self, + ) + SafeRepresenter.__init__( + self, + default_style=default_style, + default_flow_style=default_flow_style, + dumper=self, + ) + Resolver.__init__(self, loadumper=self) + + +class Dumper(Emitter, Serializer, Representer, Resolver): + def __init__( + self, + stream, + default_style=None, + default_flow_style=None, + canonical=None, + indent=None, + width=None, + allow_unicode=None, + line_break=None, + encoding=None, + explicit_start=None, + explicit_end=None, + version=None, + tags=None, + block_seq_indent=None, + top_level_colon_align=None, + prefix_colon=None, + ): + # type: (StreamType, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA + Emitter.__init__( + self, + stream, + canonical=canonical, + indent=indent, + width=width, + allow_unicode=allow_unicode, + line_break=line_break, + block_seq_indent=block_seq_indent, + dumper=self, + ) + Serializer.__init__( + self, + encoding=encoding, + explicit_start=explicit_start, + explicit_end=explicit_end, + version=version, + tags=tags, + dumper=self, + ) + Representer.__init__( + self, + default_style=default_style, + default_flow_style=default_flow_style, + dumper=self, + ) + Resolver.__init__(self, loadumper=self) + + +class RoundTripDumper(Emitter, Serializer, RoundTripRepresenter, VersionedResolver): + def __init__( + self, + stream, + default_style=None, + default_flow_style=None, + canonical=None, + indent=None, + width=None, + allow_unicode=None, + line_break=None, + encoding=None, + explicit_start=None, + explicit_end=None, + version=None, + tags=None, + block_seq_indent=None, + top_level_colon_align=None, + prefix_colon=None, + ): + # type: (StreamType, Any, Optional[bool], Optional[int], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA + Emitter.__init__( + self, + stream, + canonical=canonical, + indent=indent, + width=width, + allow_unicode=allow_unicode, + line_break=line_break, + block_seq_indent=block_seq_indent, + top_level_colon_align=top_level_colon_align, + prefix_colon=prefix_colon, + dumper=self, + ) + Serializer.__init__( + self, + encoding=encoding, + explicit_start=explicit_start, + explicit_end=explicit_end, + version=version, + tags=tags, + dumper=self, + ) + RoundTripRepresenter.__init__( + self, + default_style=default_style, + default_flow_style=default_flow_style, + dumper=self, + ) + VersionedResolver.__init__(self, loader=self) diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/emitter.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/emitter.py new file mode 100644 index 0000000000..efc5438263 --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/emitter.py @@ -0,0 +1,1696 @@ +# coding: utf-8 + +from __future__ import absolute_import +from __future__ import print_function + +# Emitter expects events obeying the following grammar: +# stream ::= STREAM-START document* STREAM-END +# document ::= DOCUMENT-START node DOCUMENT-END +# node ::= SCALAR | sequence | mapping +# sequence ::= SEQUENCE-START node* SEQUENCE-END +# mapping ::= MAPPING-START (node node)* MAPPING-END + +import sys +from ...ruamel.yaml.error import YAMLError, YAMLStreamError +from ...ruamel.yaml.events import * # NOQA + +# fmt: off +from ...ruamel.yaml.compat import utf8, text_type, PY2, nprint, dbg, DBG_EVENT, \ + check_anchorname_char +# fmt: on + +if False: # MYPY + from typing import Any, Dict, List, Union, Text, Tuple, Optional # NOQA + from ...ruamel.yaml.compat import StreamType # NOQA + +__all__ = ['Emitter', 'EmitterError'] + + +class EmitterError(YAMLError): + pass + + +class ScalarAnalysis(object): + def __init__( + self, + scalar, + empty, + multiline, + allow_flow_plain, + allow_block_plain, + allow_single_quoted, + allow_double_quoted, + allow_block, + ): + # type: (Any, Any, Any, bool, bool, bool, bool, bool) -> None + self.scalar = scalar + self.empty = empty + self.multiline = multiline + self.allow_flow_plain = allow_flow_plain + self.allow_block_plain = allow_block_plain + self.allow_single_quoted = allow_single_quoted + self.allow_double_quoted = allow_double_quoted + self.allow_block = allow_block + + +class Indents(object): + # replacement for the list based stack of None/int + def __init__(self): + # type: () -> None + self.values = [] # type: List[Tuple[int, bool]] + + def append(self, val, seq): + # type: (Any, Any) -> None + self.values.append((val, seq)) + + def pop(self): + # type: () -> Any + return self.values.pop()[0] + + def last_seq(self): + # type: () -> bool + # return the seq(uence) value for the element added before the last one + # in increase_indent() + try: + return self.values[-2][1] + except IndexError: + return False + + def seq_flow_align(self, seq_indent, column): + # type: (int, int) -> int + # extra spaces because of dash + if len(self.values) < 2 or not self.values[-1][1]: + return 0 + # -1 for the dash + base = self.values[-1][0] if self.values[-1][0] is not None else 0 + return base + seq_indent - column - 1 + + def __len__(self): + # type: () -> int + return len(self.values) + + +class Emitter(object): + # fmt: off + DEFAULT_TAG_PREFIXES = { + u'!': u'!', + u'tag:yaml.org,2002:': u'!!', + } + # fmt: on + + MAX_SIMPLE_KEY_LENGTH = 128 + + def __init__( + self, + stream, + canonical=None, + indent=None, + width=None, + allow_unicode=None, + line_break=None, + block_seq_indent=None, + top_level_colon_align=None, + prefix_colon=None, + brace_single_entry_mapping_in_flow_sequence=None, + dumper=None, + ): + # type: (StreamType, Any, Optional[int], Optional[int], Optional[bool], Any, Optional[int], Optional[bool], Any, Optional[bool], Any) -> None # NOQA + self.dumper = dumper + if self.dumper is not None and getattr(self.dumper, '_emitter', None) is None: + self.dumper._emitter = self + self.stream = stream + + # Encoding can be overriden by STREAM-START. + self.encoding = None # type: Optional[Text] + self.allow_space_break = None + + # Emitter is a state machine with a stack of states to handle nested + # structures. + self.states = [] # type: List[Any] + self.state = self.expect_stream_start # type: Any + + # Current event and the event queue. + self.events = [] # type: List[Any] + self.event = None # type: Any + + # The current indentation level and the stack of previous indents. + self.indents = Indents() + self.indent = None # type: Optional[int] + + # flow_context is an expanding/shrinking list consisting of '{' and '[' + # for each unclosed flow context. If empty list that means block context + self.flow_context = [] # type: List[Text] + + # Contexts. + self.root_context = False + self.sequence_context = False + self.mapping_context = False + self.simple_key_context = False + + # Characteristics of the last emitted character: + # - current position. + # - is it a whitespace? + # - is it an indention character + # (indentation space, '-', '?', or ':')? + self.line = 0 + self.column = 0 + self.whitespace = True + self.indention = True + self.compact_seq_seq = True # dash after dash + self.compact_seq_map = True # key after dash + # self.compact_ms = False # dash after key, only when excplicit key with ? + self.no_newline = None # type: Optional[bool] # set if directly after `- ` + + # Whether the document requires an explicit document end indicator + self.open_ended = False + + # colon handling + self.colon = u':' + self.prefixed_colon = self.colon if prefix_colon is None else prefix_colon + self.colon + # single entry mappings in flow sequence + self.brace_single_entry_mapping_in_flow_sequence = ( + brace_single_entry_mapping_in_flow_sequence # NOQA + ) + + # Formatting details. + self.canonical = canonical + self.allow_unicode = allow_unicode + # set to False to get "\Uxxxxxxxx" for non-basic unicode like emojis + self.unicode_supplementary = sys.maxunicode > 0xFFFF + self.sequence_dash_offset = block_seq_indent if block_seq_indent else 0 + self.top_level_colon_align = top_level_colon_align + self.best_sequence_indent = 2 + self.requested_indent = indent # specific for literal zero indent + if indent and 1 < indent < 10: + self.best_sequence_indent = indent + self.best_map_indent = self.best_sequence_indent + # if self.best_sequence_indent < self.sequence_dash_offset + 1: + # self.best_sequence_indent = self.sequence_dash_offset + 1 + self.best_width = 80 + if width and width > self.best_sequence_indent * 2: + self.best_width = width + self.best_line_break = u'\n' # type: Any + if line_break in [u'\r', u'\n', u'\r\n']: + self.best_line_break = line_break + + # Tag prefixes. + self.tag_prefixes = None # type: Any + + # Prepared anchor and tag. + self.prepared_anchor = None # type: Any + self.prepared_tag = None # type: Any + + # Scalar analysis and style. + self.analysis = None # type: Any + self.style = None # type: Any + + self.scalar_after_indicator = True # write a scalar on the same line as `---` + + self.alt_null = 'null' + + @property + def stream(self): + # type: () -> Any + try: + return self._stream + except AttributeError: + raise YAMLStreamError('output stream needs to specified') + + @stream.setter + def stream(self, val): + # type: (Any) -> None + if val is None: + return + if not hasattr(val, 'write'): + raise YAMLStreamError('stream argument needs to have a write() method') + self._stream = val + + @property + def serializer(self): + # type: () -> Any + try: + if hasattr(self.dumper, 'typ'): + return self.dumper.serializer + return self.dumper._serializer + except AttributeError: + return self # cyaml + + @property + def flow_level(self): + # type: () -> int + return len(self.flow_context) + + def dispose(self): + # type: () -> None + # Reset the state attributes (to clear self-references) + self.states = [] + self.state = None + + def emit(self, event): + # type: (Any) -> None + if dbg(DBG_EVENT): + nprint(event) + self.events.append(event) + while not self.need_more_events(): + self.event = self.events.pop(0) + self.state() + self.event = None + + # In some cases, we wait for a few next events before emitting. + + def need_more_events(self): + # type: () -> bool + if not self.events: + return True + event = self.events[0] + if isinstance(event, DocumentStartEvent): + return self.need_events(1) + elif isinstance(event, SequenceStartEvent): + return self.need_events(2) + elif isinstance(event, MappingStartEvent): + return self.need_events(3) + else: + return False + + def need_events(self, count): + # type: (int) -> bool + level = 0 + for event in self.events[1:]: + if isinstance(event, (DocumentStartEvent, CollectionStartEvent)): + level += 1 + elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)): + level -= 1 + elif isinstance(event, StreamEndEvent): + level = -1 + if level < 0: + return False + return len(self.events) < count + 1 + + def increase_indent(self, flow=False, sequence=None, indentless=False): + # type: (bool, Optional[bool], bool) -> None + self.indents.append(self.indent, sequence) + if self.indent is None: # top level + if flow: + # self.indent = self.best_sequence_indent if self.indents.last_seq() else \ + # self.best_map_indent + # self.indent = self.best_sequence_indent + self.indent = self.requested_indent + else: + self.indent = 0 + elif not indentless: + self.indent += ( + self.best_sequence_indent if self.indents.last_seq() else self.best_map_indent + ) + # if self.indents.last_seq(): + # if self.indent == 0: # top level block sequence + # self.indent = self.best_sequence_indent - self.sequence_dash_offset + # else: + # self.indent += self.best_sequence_indent + # else: + # self.indent += self.best_map_indent + + # States. + + # Stream handlers. + + def expect_stream_start(self): + # type: () -> None + if isinstance(self.event, StreamStartEvent): + if PY2: + if self.event.encoding and not getattr(self.stream, 'encoding', None): + self.encoding = self.event.encoding + else: + if self.event.encoding and not hasattr(self.stream, 'encoding'): + self.encoding = self.event.encoding + self.write_stream_start() + self.state = self.expect_first_document_start + else: + raise EmitterError('expected StreamStartEvent, but got %s' % (self.event,)) + + def expect_nothing(self): + # type: () -> None + raise EmitterError('expected nothing, but got %s' % (self.event,)) + + # Document handlers. + + def expect_first_document_start(self): + # type: () -> Any + return self.expect_document_start(first=True) + + def expect_document_start(self, first=False): + # type: (bool) -> None + if isinstance(self.event, DocumentStartEvent): + if (self.event.version or self.event.tags) and self.open_ended: + self.write_indicator(u'...', True) + self.write_indent() + if self.event.version: + version_text = self.prepare_version(self.event.version) + self.write_version_directive(version_text) + self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy() + if self.event.tags: + handles = sorted(self.event.tags.keys()) + for handle in handles: + prefix = self.event.tags[handle] + self.tag_prefixes[prefix] = handle + handle_text = self.prepare_tag_handle(handle) + prefix_text = self.prepare_tag_prefix(prefix) + self.write_tag_directive(handle_text, prefix_text) + implicit = ( + first + and not self.event.explicit + and not self.canonical + and not self.event.version + and not self.event.tags + and not self.check_empty_document() + ) + if not implicit: + self.write_indent() + self.write_indicator(u'---', True) + if self.canonical: + self.write_indent() + self.state = self.expect_document_root + elif isinstance(self.event, StreamEndEvent): + if self.open_ended: + self.write_indicator(u'...', True) + self.write_indent() + self.write_stream_end() + self.state = self.expect_nothing + else: + raise EmitterError('expected DocumentStartEvent, but got %s' % (self.event,)) + + def expect_document_end(self): + # type: () -> None + if isinstance(self.event, DocumentEndEvent): + self.write_indent() + if self.event.explicit: + self.write_indicator(u'...', True) + self.write_indent() + self.flush_stream() + self.state = self.expect_document_start + else: + raise EmitterError('expected DocumentEndEvent, but got %s' % (self.event,)) + + def expect_document_root(self): + # type: () -> None + self.states.append(self.expect_document_end) + self.expect_node(root=True) + + # Node handlers. + + def expect_node(self, root=False, sequence=False, mapping=False, simple_key=False): + # type: (bool, bool, bool, bool) -> None + self.root_context = root + self.sequence_context = sequence # not used in PyYAML + self.mapping_context = mapping + self.simple_key_context = simple_key + if isinstance(self.event, AliasEvent): + self.expect_alias() + elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)): + if ( + self.process_anchor(u'&') + and isinstance(self.event, ScalarEvent) + and self.sequence_context + ): + self.sequence_context = False + if ( + root + and isinstance(self.event, ScalarEvent) + and not self.scalar_after_indicator + ): + self.write_indent() + self.process_tag() + if isinstance(self.event, ScalarEvent): + # nprint('@', self.indention, self.no_newline, self.column) + self.expect_scalar() + elif isinstance(self.event, SequenceStartEvent): + # nprint('@', self.indention, self.no_newline, self.column) + i2, n2 = self.indention, self.no_newline # NOQA + if self.event.comment: + if self.event.flow_style is False and self.event.comment: + if self.write_post_comment(self.event): + self.indention = False + self.no_newline = True + if self.write_pre_comment(self.event): + self.indention = i2 + self.no_newline = not self.indention + if ( + self.flow_level + or self.canonical + or self.event.flow_style + or self.check_empty_sequence() + ): + self.expect_flow_sequence() + else: + self.expect_block_sequence() + elif isinstance(self.event, MappingStartEvent): + if self.event.flow_style is False and self.event.comment: + self.write_post_comment(self.event) + if self.event.comment and self.event.comment[1]: + self.write_pre_comment(self.event) + if ( + self.flow_level + or self.canonical + or self.event.flow_style + or self.check_empty_mapping() + ): + self.expect_flow_mapping(single=self.event.nr_items == 1) + else: + self.expect_block_mapping() + else: + raise EmitterError('expected NodeEvent, but got %s' % (self.event,)) + + def expect_alias(self): + # type: () -> None + if self.event.anchor is None: + raise EmitterError('anchor is not specified for alias') + self.process_anchor(u'*') + self.state = self.states.pop() + + def expect_scalar(self): + # type: () -> None + self.increase_indent(flow=True) + self.process_scalar() + self.indent = self.indents.pop() + self.state = self.states.pop() + + # Flow sequence handlers. + + def expect_flow_sequence(self): + # type: () -> None + ind = self.indents.seq_flow_align(self.best_sequence_indent, self.column) + self.write_indicator(u' ' * ind + u'[', True, whitespace=True) + self.increase_indent(flow=True, sequence=True) + self.flow_context.append('[') + self.state = self.expect_first_flow_sequence_item + + def expect_first_flow_sequence_item(self): + # type: () -> None + if isinstance(self.event, SequenceEndEvent): + self.indent = self.indents.pop() + popped = self.flow_context.pop() + assert popped == '[' + self.write_indicator(u']', False) + if self.event.comment and self.event.comment[0]: + # eol comment on empty flow sequence + self.write_post_comment(self.event) + elif self.flow_level == 0: + self.write_line_break() + self.state = self.states.pop() + else: + if self.canonical or self.column > self.best_width: + self.write_indent() + self.states.append(self.expect_flow_sequence_item) + self.expect_node(sequence=True) + + def expect_flow_sequence_item(self): + # type: () -> None + if isinstance(self.event, SequenceEndEvent): + self.indent = self.indents.pop() + popped = self.flow_context.pop() + assert popped == '[' + if self.canonical: + self.write_indicator(u',', False) + self.write_indent() + self.write_indicator(u']', False) + if self.event.comment and self.event.comment[0]: + # eol comment on flow sequence + self.write_post_comment(self.event) + else: + self.no_newline = False + self.state = self.states.pop() + else: + self.write_indicator(u',', False) + if self.canonical or self.column > self.best_width: + self.write_indent() + self.states.append(self.expect_flow_sequence_item) + self.expect_node(sequence=True) + + # Flow mapping handlers. + + def expect_flow_mapping(self, single=False): + # type: (Optional[bool]) -> None + ind = self.indents.seq_flow_align(self.best_sequence_indent, self.column) + map_init = u'{' + if ( + single + and self.flow_level + and self.flow_context[-1] == '[' + and not self.canonical + and not self.brace_single_entry_mapping_in_flow_sequence + ): + # single map item with flow context, no curly braces necessary + map_init = u'' + self.write_indicator(u' ' * ind + map_init, True, whitespace=True) + self.flow_context.append(map_init) + self.increase_indent(flow=True, sequence=False) + self.state = self.expect_first_flow_mapping_key + + def expect_first_flow_mapping_key(self): + # type: () -> None + if isinstance(self.event, MappingEndEvent): + self.indent = self.indents.pop() + popped = self.flow_context.pop() + assert popped == '{' # empty flow mapping + self.write_indicator(u'}', False) + if self.event.comment and self.event.comment[0]: + # eol comment on empty mapping + self.write_post_comment(self.event) + elif self.flow_level == 0: + self.write_line_break() + self.state = self.states.pop() + else: + if self.canonical or self.column > self.best_width: + self.write_indent() + if not self.canonical and self.check_simple_key(): + self.states.append(self.expect_flow_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + else: + self.write_indicator(u'?', True) + self.states.append(self.expect_flow_mapping_value) + self.expect_node(mapping=True) + + def expect_flow_mapping_key(self): + # type: () -> None + if isinstance(self.event, MappingEndEvent): + # if self.event.comment and self.event.comment[1]: + # self.write_pre_comment(self.event) + self.indent = self.indents.pop() + popped = self.flow_context.pop() + assert popped in [u'{', u''] + if self.canonical: + self.write_indicator(u',', False) + self.write_indent() + if popped != u'': + self.write_indicator(u'}', False) + if self.event.comment and self.event.comment[0]: + # eol comment on flow mapping, never reached on empty mappings + self.write_post_comment(self.event) + else: + self.no_newline = False + self.state = self.states.pop() + else: + self.write_indicator(u',', False) + if self.canonical or self.column > self.best_width: + self.write_indent() + if not self.canonical and self.check_simple_key(): + self.states.append(self.expect_flow_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + else: + self.write_indicator(u'?', True) + self.states.append(self.expect_flow_mapping_value) + self.expect_node(mapping=True) + + def expect_flow_mapping_simple_value(self): + # type: () -> None + self.write_indicator(self.prefixed_colon, False) + self.states.append(self.expect_flow_mapping_key) + self.expect_node(mapping=True) + + def expect_flow_mapping_value(self): + # type: () -> None + if self.canonical or self.column > self.best_width: + self.write_indent() + self.write_indicator(self.prefixed_colon, True) + self.states.append(self.expect_flow_mapping_key) + self.expect_node(mapping=True) + + # Block sequence handlers. + + def expect_block_sequence(self): + # type: () -> None + if self.mapping_context: + indentless = not self.indention + else: + indentless = False + if not self.compact_seq_seq and self.column != 0: + self.write_line_break() + self.increase_indent(flow=False, sequence=True, indentless=indentless) + self.state = self.expect_first_block_sequence_item + + def expect_first_block_sequence_item(self): + # type: () -> Any + return self.expect_block_sequence_item(first=True) + + def expect_block_sequence_item(self, first=False): + # type: (bool) -> None + if not first and isinstance(self.event, SequenceEndEvent): + if self.event.comment and self.event.comment[1]: + # final comments on a block list e.g. empty line + self.write_pre_comment(self.event) + self.indent = self.indents.pop() + self.state = self.states.pop() + self.no_newline = False + else: + if self.event.comment and self.event.comment[1]: + self.write_pre_comment(self.event) + nonl = self.no_newline if self.column == 0 else False + self.write_indent() + ind = self.sequence_dash_offset # if len(self.indents) > 1 else 0 + self.write_indicator(u' ' * ind + u'-', True, indention=True) + if nonl or self.sequence_dash_offset + 2 > self.best_sequence_indent: + self.no_newline = True + self.states.append(self.expect_block_sequence_item) + self.expect_node(sequence=True) + + # Block mapping handlers. + + def expect_block_mapping(self): + # type: () -> None + if not self.mapping_context and not (self.compact_seq_map or self.column == 0): + self.write_line_break() + self.increase_indent(flow=False, sequence=False) + self.state = self.expect_first_block_mapping_key + + def expect_first_block_mapping_key(self): + # type: () -> None + return self.expect_block_mapping_key(first=True) + + def expect_block_mapping_key(self, first=False): + # type: (Any) -> None + if not first and isinstance(self.event, MappingEndEvent): + if self.event.comment and self.event.comment[1]: + # final comments from a doc + self.write_pre_comment(self.event) + self.indent = self.indents.pop() + self.state = self.states.pop() + else: + if self.event.comment and self.event.comment[1]: + # final comments from a doc + self.write_pre_comment(self.event) + self.write_indent() + if self.check_simple_key(): + if not isinstance( + self.event, (SequenceStartEvent, MappingStartEvent) + ): # sequence keys + try: + if self.event.style == '?': + self.write_indicator(u'?', True, indention=True) + except AttributeError: # aliases have no style + pass + self.states.append(self.expect_block_mapping_simple_value) + self.expect_node(mapping=True, simple_key=True) + if isinstance(self.event, AliasEvent): + self.stream.write(u' ') + else: + self.write_indicator(u'?', True, indention=True) + self.states.append(self.expect_block_mapping_value) + self.expect_node(mapping=True) + + def expect_block_mapping_simple_value(self): + # type: () -> None + if getattr(self.event, 'style', None) != '?': + # prefix = u'' + if self.indent == 0 and self.top_level_colon_align is not None: + # write non-prefixed colon + c = u' ' * (self.top_level_colon_align - self.column) + self.colon + else: + c = self.prefixed_colon + self.write_indicator(c, False) + self.states.append(self.expect_block_mapping_key) + self.expect_node(mapping=True) + + def expect_block_mapping_value(self): + # type: () -> None + self.write_indent() + self.write_indicator(self.prefixed_colon, True, indention=True) + self.states.append(self.expect_block_mapping_key) + self.expect_node(mapping=True) + + # Checkers. + + def check_empty_sequence(self): + # type: () -> bool + return ( + isinstance(self.event, SequenceStartEvent) + and bool(self.events) + and isinstance(self.events[0], SequenceEndEvent) + ) + + def check_empty_mapping(self): + # type: () -> bool + return ( + isinstance(self.event, MappingStartEvent) + and bool(self.events) + and isinstance(self.events[0], MappingEndEvent) + ) + + def check_empty_document(self): + # type: () -> bool + if not isinstance(self.event, DocumentStartEvent) or not self.events: + return False + event = self.events[0] + return ( + isinstance(event, ScalarEvent) + and event.anchor is None + and event.tag is None + and event.implicit + and event.value == "" + ) + + def check_simple_key(self): + # type: () -> bool + length = 0 + if isinstance(self.event, NodeEvent) and self.event.anchor is not None: + if self.prepared_anchor is None: + self.prepared_anchor = self.prepare_anchor(self.event.anchor) + length += len(self.prepared_anchor) + if ( + isinstance(self.event, (ScalarEvent, CollectionStartEvent)) + and self.event.tag is not None + ): + if self.prepared_tag is None: + self.prepared_tag = self.prepare_tag(self.event.tag) + length += len(self.prepared_tag) + if isinstance(self.event, ScalarEvent): + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + length += len(self.analysis.scalar) + return length < self.MAX_SIMPLE_KEY_LENGTH and ( + isinstance(self.event, AliasEvent) + or (isinstance(self.event, SequenceStartEvent) and self.event.flow_style is True) + or (isinstance(self.event, MappingStartEvent) and self.event.flow_style is True) + or ( + isinstance(self.event, ScalarEvent) + # if there is an explicit style for an empty string, it is a simple key + and not (self.analysis.empty and self.style and self.style not in '\'"') + and not self.analysis.multiline + ) + or self.check_empty_sequence() + or self.check_empty_mapping() + ) + + # Anchor, Tag, and Scalar processors. + + def process_anchor(self, indicator): + # type: (Any) -> bool + if self.event.anchor is None: + self.prepared_anchor = None + return False + if self.prepared_anchor is None: + self.prepared_anchor = self.prepare_anchor(self.event.anchor) + if self.prepared_anchor: + self.write_indicator(indicator + self.prepared_anchor, True) + # issue 288 + self.no_newline = False + self.prepared_anchor = None + return True + + def process_tag(self): + # type: () -> None + tag = self.event.tag + if isinstance(self.event, ScalarEvent): + if self.style is None: + self.style = self.choose_scalar_style() + if self.event.value == '' and self.style == "'" and \ + tag == 'tag:yaml.org,2002:null' and self.alt_null is not None: + self.event.value = self.alt_null + self.analysis = None + self.style = self.choose_scalar_style() + if (not self.canonical or tag is None) and ( + (self.style == "" and self.event.implicit[0]) + or (self.style != "" and self.event.implicit[1]) + ): + self.prepared_tag = None + return + if self.event.implicit[0] and tag is None: + tag = u'!' + self.prepared_tag = None + else: + if (not self.canonical or tag is None) and self.event.implicit: + self.prepared_tag = None + return + if tag is None: + raise EmitterError('tag is not specified') + if self.prepared_tag is None: + self.prepared_tag = self.prepare_tag(tag) + if self.prepared_tag: + self.write_indicator(self.prepared_tag, True) + if ( + self.sequence_context + and not self.flow_level + and isinstance(self.event, ScalarEvent) + ): + self.no_newline = True + self.prepared_tag = None + + def choose_scalar_style(self): + # type: () -> Any + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + if self.event.style == '"' or self.canonical: + return '"' + if (not self.event.style or self.event.style == '?') and ( + self.event.implicit[0] or not self.event.implicit[2] + ): + if not ( + self.simple_key_context and (self.analysis.empty or self.analysis.multiline) + ) and ( + self.flow_level + and self.analysis.allow_flow_plain + or (not self.flow_level and self.analysis.allow_block_plain) + ): + return "" + self.analysis.allow_block = True + if self.event.style and self.event.style in '|>': + if ( + not self.flow_level + and not self.simple_key_context + and self.analysis.allow_block + ): + return self.event.style + if not self.event.style and self.analysis.allow_double_quoted: + if "'" in self.event.value or '\n' in self.event.value: + return '"' + if not self.event.style or self.event.style == "'": + if self.analysis.allow_single_quoted and not ( + self.simple_key_context and self.analysis.multiline + ): + return "'" + return '"' + + def process_scalar(self): + # type: () -> None + if self.analysis is None: + self.analysis = self.analyze_scalar(self.event.value) + if self.style is None: + self.style = self.choose_scalar_style() + split = not self.simple_key_context + # if self.analysis.multiline and split \ + # and (not self.style or self.style in '\'\"'): + # self.write_indent() + # nprint('xx', self.sequence_context, self.flow_level) + if self.sequence_context and not self.flow_level: + self.write_indent() + if self.style == '"': + self.write_double_quoted(self.analysis.scalar, split) + elif self.style == "'": + self.write_single_quoted(self.analysis.scalar, split) + elif self.style == '>': + self.write_folded(self.analysis.scalar) + elif self.style == '|': + self.write_literal(self.analysis.scalar, self.event.comment) + else: + self.write_plain(self.analysis.scalar, split) + self.analysis = None + self.style = None + if self.event.comment: + self.write_post_comment(self.event) + + # Analyzers. + + def prepare_version(self, version): + # type: (Any) -> Any + major, minor = version + if major != 1: + raise EmitterError('unsupported YAML version: %d.%d' % (major, minor)) + return u'%d.%d' % (major, minor) + + def prepare_tag_handle(self, handle): + # type: (Any) -> Any + if not handle: + raise EmitterError('tag handle must not be empty') + if handle[0] != u'!' or handle[-1] != u'!': + raise EmitterError("tag handle must start and end with '!': %r" % (utf8(handle))) + for ch in handle[1:-1]: + if not ( + u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' or ch in u'-_' + ): + raise EmitterError( + 'invalid character %r in the tag handle: %r' % (utf8(ch), utf8(handle)) + ) + return handle + + def prepare_tag_prefix(self, prefix): + # type: (Any) -> Any + if not prefix: + raise EmitterError('tag prefix must not be empty') + chunks = [] # type: List[Any] + start = end = 0 + if prefix[0] == u'!': + end = 1 + ch_set = u"-;/?:@&=+$,_.~*'()[]" + if self.dumper: + version = getattr(self.dumper, 'version', (1, 2)) + if version is None or version >= (1, 2): + ch_set += u'#' + while end < len(prefix): + ch = prefix[end] + if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' or ch in ch_set: + end += 1 + else: + if start < end: + chunks.append(prefix[start:end]) + start = end = end + 1 + data = utf8(ch) + for ch in data: + chunks.append(u'%%%02X' % ord(ch)) + if start < end: + chunks.append(prefix[start:end]) + return "".join(chunks) + + def prepare_tag(self, tag): + # type: (Any) -> Any + if not tag: + raise EmitterError('tag must not be empty') + if tag == u'!': + return tag + handle = None + suffix = tag + prefixes = sorted(self.tag_prefixes.keys()) + for prefix in prefixes: + if tag.startswith(prefix) and (prefix == u'!' or len(prefix) < len(tag)): + handle = self.tag_prefixes[prefix] + suffix = tag[len(prefix) :] + chunks = [] # type: List[Any] + start = end = 0 + ch_set = u"-;/?:@&=+$,_.~*'()[]" + if self.dumper: + version = getattr(self.dumper, 'version', (1, 2)) + if version is None or version >= (1, 2): + ch_set += u'#' + while end < len(suffix): + ch = suffix[end] + if ( + u'0' <= ch <= u'9' + or u'A' <= ch <= u'Z' + or u'a' <= ch <= u'z' + or ch in ch_set + or (ch == u'!' and handle != u'!') + ): + end += 1 + else: + if start < end: + chunks.append(suffix[start:end]) + start = end = end + 1 + data = utf8(ch) + for ch in data: + chunks.append(u'%%%02X' % ord(ch)) + if start < end: + chunks.append(suffix[start:end]) + suffix_text = "".join(chunks) + if handle: + return u'%s%s' % (handle, suffix_text) + else: + return u'!<%s>' % suffix_text + + def prepare_anchor(self, anchor): + # type: (Any) -> Any + if not anchor: + raise EmitterError('anchor must not be empty') + for ch in anchor: + if not check_anchorname_char(ch): + raise EmitterError( + 'invalid character %r in the anchor: %r' % (utf8(ch), utf8(anchor)) + ) + return anchor + + def analyze_scalar(self, scalar): + # type: (Any) -> Any + # Empty scalar is a special case. + if not scalar: + return ScalarAnalysis( + scalar=scalar, + empty=True, + multiline=False, + allow_flow_plain=False, + allow_block_plain=True, + allow_single_quoted=True, + allow_double_quoted=True, + allow_block=False, + ) + + # Indicators and special characters. + block_indicators = False + flow_indicators = False + line_breaks = False + special_characters = False + + # Important whitespace combinations. + leading_space = False + leading_break = False + trailing_space = False + trailing_break = False + break_space = False + space_break = False + + # Check document indicators. + if scalar.startswith(u'---') or scalar.startswith(u'...'): + block_indicators = True + flow_indicators = True + + # First character or preceded by a whitespace. + preceeded_by_whitespace = True + + # Last character or followed by a whitespace. + followed_by_whitespace = len(scalar) == 1 or scalar[1] in u'\0 \t\r\n\x85\u2028\u2029' + + # The previous character is a space. + previous_space = False + + # The previous character is a break. + previous_break = False + + index = 0 + while index < len(scalar): + ch = scalar[index] + + # Check for indicators. + if index == 0: + # Leading indicators are special characters. + if ch in u'#,[]{}&*!|>\'"%@`': + flow_indicators = True + block_indicators = True + if ch in u'?:': # ToDo + if self.serializer.use_version == (1, 1): + flow_indicators = True + elif len(scalar) == 1: # single character + flow_indicators = True + if followed_by_whitespace: + block_indicators = True + if ch == u'-' and followed_by_whitespace: + flow_indicators = True + block_indicators = True + else: + # Some indicators cannot appear within a scalar as well. + if ch in u',[]{}': # http://yaml.org/spec/1.2/spec.html#id2788859 + flow_indicators = True + if ch == u'?' and self.serializer.use_version == (1, 1): + flow_indicators = True + if ch == u':': + if followed_by_whitespace: + flow_indicators = True + block_indicators = True + if ch == u'#' and preceeded_by_whitespace: + flow_indicators = True + block_indicators = True + + # Check for line breaks, special, and unicode characters. + if ch in u'\n\x85\u2028\u2029': + line_breaks = True + if not (ch == u'\n' or u'\x20' <= ch <= u'\x7E'): + if ( + ch == u'\x85' + or u'\xA0' <= ch <= u'\uD7FF' + or u'\uE000' <= ch <= u'\uFFFD' + or (self.unicode_supplementary and (u'\U00010000' <= ch <= u'\U0010FFFF')) + ) and ch != u'\uFEFF': + # unicode_characters = True + if not self.allow_unicode: + special_characters = True + else: + special_characters = True + + # Detect important whitespace combinations. + if ch == u' ': + if index == 0: + leading_space = True + if index == len(scalar) - 1: + trailing_space = True + if previous_break: + break_space = True + previous_space = True + previous_break = False + elif ch in u'\n\x85\u2028\u2029': + if index == 0: + leading_break = True + if index == len(scalar) - 1: + trailing_break = True + if previous_space: + space_break = True + previous_space = False + previous_break = True + else: + previous_space = False + previous_break = False + + # Prepare for the next character. + index += 1 + preceeded_by_whitespace = ch in u'\0 \t\r\n\x85\u2028\u2029' + followed_by_whitespace = ( + index + 1 >= len(scalar) or scalar[index + 1] in u'\0 \t\r\n\x85\u2028\u2029' + ) + + # Let's decide what styles are allowed. + allow_flow_plain = True + allow_block_plain = True + allow_single_quoted = True + allow_double_quoted = True + allow_block = True + + # Leading and trailing whitespaces are bad for plain scalars. + if leading_space or leading_break or trailing_space or trailing_break: + allow_flow_plain = allow_block_plain = False + + # We do not permit trailing spaces for block scalars. + if trailing_space: + allow_block = False + + # Spaces at the beginning of a new line are only acceptable for block + # scalars. + if break_space: + allow_flow_plain = allow_block_plain = allow_single_quoted = False + + # Spaces followed by breaks, as well as special character are only + # allowed for double quoted scalars. + if special_characters: + allow_flow_plain = allow_block_plain = allow_single_quoted = allow_block = False + elif space_break: + allow_flow_plain = allow_block_plain = allow_single_quoted = False + if not self.allow_space_break: + allow_block = False + + # Although the plain scalar writer supports breaks, we never emit + # multiline plain scalars. + if line_breaks: + allow_flow_plain = allow_block_plain = False + + # Flow indicators are forbidden for flow plain scalars. + if flow_indicators: + allow_flow_plain = False + + # Block indicators are forbidden for block plain scalars. + if block_indicators: + allow_block_plain = False + + return ScalarAnalysis( + scalar=scalar, + empty=False, + multiline=line_breaks, + allow_flow_plain=allow_flow_plain, + allow_block_plain=allow_block_plain, + allow_single_quoted=allow_single_quoted, + allow_double_quoted=allow_double_quoted, + allow_block=allow_block, + ) + + # Writers. + + def flush_stream(self): + # type: () -> None + if hasattr(self.stream, 'flush'): + self.stream.flush() + + def write_stream_start(self): + # type: () -> None + # Write BOM if needed. + if self.encoding and self.encoding.startswith('utf-16'): + self.stream.write(u'\uFEFF'.encode(self.encoding)) + + def write_stream_end(self): + # type: () -> None + self.flush_stream() + + def write_indicator(self, indicator, need_whitespace, whitespace=False, indention=False): + # type: (Any, Any, bool, bool) -> None + if self.whitespace or not need_whitespace: + data = indicator + else: + data = u' ' + indicator + self.whitespace = whitespace + self.indention = self.indention and indention + self.column += len(data) + self.open_ended = False + if bool(self.encoding): + data = data.encode(self.encoding) + self.stream.write(data) + + def write_indent(self): + # type: () -> None + indent = self.indent or 0 + if ( + not self.indention + or self.column > indent + or (self.column == indent and not self.whitespace) + ): + if bool(self.no_newline): + self.no_newline = False + else: + self.write_line_break() + if self.column < indent: + self.whitespace = True + data = u' ' * (indent - self.column) + self.column = indent + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + + def write_line_break(self, data=None): + # type: (Any) -> None + if data is None: + data = self.best_line_break + self.whitespace = True + self.indention = True + self.line += 1 + self.column = 0 + if bool(self.encoding): + data = data.encode(self.encoding) + self.stream.write(data) + + def write_version_directive(self, version_text): + # type: (Any) -> None + data = u'%%YAML %s' % version_text + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.write_line_break() + + def write_tag_directive(self, handle_text, prefix_text): + # type: (Any, Any) -> None + data = u'%%TAG %s %s' % (handle_text, prefix_text) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.write_line_break() + + # Scalar streams. + + def write_single_quoted(self, text, split=True): + # type: (Any, Any) -> None + if self.root_context: + if self.requested_indent is not None: + self.write_line_break() + if self.requested_indent != 0: + self.write_indent() + self.write_indicator(u"'", True) + spaces = False + breaks = False + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if spaces: + if ch is None or ch != u' ': + if ( + start + 1 == end + and self.column > self.best_width + and split + and start != 0 + and end != len(text) + ): + self.write_indent() + else: + data = text[start:end] + self.column += len(data) + if bool(self.encoding): + data = data.encode(self.encoding) + self.stream.write(data) + start = end + elif breaks: + if ch is None or ch not in u'\n\x85\u2028\u2029': + if text[start] == u'\n': + self.write_line_break() + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + self.write_indent() + start = end + else: + if ch is None or ch in u' \n\x85\u2028\u2029' or ch == u"'": + if start < end: + data = text[start:end] + self.column += len(data) + if bool(self.encoding): + data = data.encode(self.encoding) + self.stream.write(data) + start = end + if ch == u"'": + data = u"''" + self.column += 2 + if bool(self.encoding): + data = data.encode(self.encoding) + self.stream.write(data) + start = end + 1 + if ch is not None: + spaces = ch == u' ' + breaks = ch in u'\n\x85\u2028\u2029' + end += 1 + self.write_indicator(u"'", False) + + ESCAPE_REPLACEMENTS = { + u'\0': u'0', + u'\x07': u'a', + u'\x08': u'b', + u'\x09': u't', + u'\x0A': u'n', + u'\x0B': u'v', + u'\x0C': u'f', + u'\x0D': u'r', + u'\x1B': u'e', + u'"': u'"', + u'\\': u'\\', + u'\x85': u'N', + u'\xA0': u'_', + u'\u2028': u'L', + u'\u2029': u'P', + } + + def write_double_quoted(self, text, split=True): + # type: (Any, Any) -> None + if self.root_context: + if self.requested_indent is not None: + self.write_line_break() + if self.requested_indent != 0: + self.write_indent() + self.write_indicator(u'"', True) + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if ( + ch is None + or ch in u'"\\\x85\u2028\u2029\uFEFF' + or not ( + u'\x20' <= ch <= u'\x7E' + or ( + self.allow_unicode + and (u'\xA0' <= ch <= u'\uD7FF' or u'\uE000' <= ch <= u'\uFFFD') + ) + ) + ): + if start < end: + data = text[start:end] + self.column += len(data) + if bool(self.encoding): + data = data.encode(self.encoding) + self.stream.write(data) + start = end + if ch is not None: + if ch in self.ESCAPE_REPLACEMENTS: + data = u'\\' + self.ESCAPE_REPLACEMENTS[ch] + elif ch <= u'\xFF': + data = u'\\x%02X' % ord(ch) + elif ch <= u'\uFFFF': + data = u'\\u%04X' % ord(ch) + else: + data = u'\\U%08X' % ord(ch) + self.column += len(data) + if bool(self.encoding): + data = data.encode(self.encoding) + self.stream.write(data) + start = end + 1 + if ( + 0 < end < len(text) - 1 + and (ch == u' ' or start >= end) + and self.column + (end - start) > self.best_width + and split + ): + data = text[start:end] + u'\\' + if start < end: + start = end + self.column += len(data) + if bool(self.encoding): + data = data.encode(self.encoding) + self.stream.write(data) + self.write_indent() + self.whitespace = False + self.indention = False + if text[start] == u' ': + data = u'\\' + self.column += len(data) + if bool(self.encoding): + data = data.encode(self.encoding) + self.stream.write(data) + end += 1 + self.write_indicator(u'"', False) + + def determine_block_hints(self, text): + # type: (Any) -> Any + indent = 0 + indicator = u'' + hints = u'' + if text: + if text[0] in u' \n\x85\u2028\u2029': + indent = self.best_sequence_indent + hints += text_type(indent) + elif self.root_context: + for end in ['\n---', '\n...']: + pos = 0 + while True: + pos = text.find(end, pos) + if pos == -1: + break + try: + if text[pos + 4] in ' \r\n': + break + except IndexError: + pass + pos += 1 + if pos > -1: + break + if pos > 0: + indent = self.best_sequence_indent + if text[-1] not in u'\n\x85\u2028\u2029': + indicator = u'-' + elif len(text) == 1 or text[-2] in u'\n\x85\u2028\u2029': + indicator = u'+' + hints += indicator + return hints, indent, indicator + + def write_folded(self, text): + # type: (Any) -> None + hints, _indent, _indicator = self.determine_block_hints(text) + self.write_indicator(u'>' + hints, True) + if _indicator == u'+': + self.open_ended = True + self.write_line_break() + leading_space = True + spaces = False + breaks = True + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if breaks: + if ch is None or ch not in u'\n\x85\u2028\u2029\a': + if ( + not leading_space + and ch is not None + and ch != u' ' + and text[start] == u'\n' + ): + self.write_line_break() + leading_space = ch == u' ' + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + if ch is not None: + self.write_indent() + start = end + elif spaces: + if ch != u' ': + if start + 1 == end and self.column > self.best_width: + self.write_indent() + else: + data = text[start:end] + self.column += len(data) + if bool(self.encoding): + data = data.encode(self.encoding) + self.stream.write(data) + start = end + else: + if ch is None or ch in u' \n\x85\u2028\u2029\a': + data = text[start:end] + self.column += len(data) + if bool(self.encoding): + data = data.encode(self.encoding) + self.stream.write(data) + if ch == u'\a': + if end < (len(text) - 1) and not text[end + 2].isspace(): + self.write_line_break() + self.write_indent() + end += 2 # \a and the space that is inserted on the fold + else: + raise EmitterError('unexcpected fold indicator \\a before space') + if ch is None: + self.write_line_break() + start = end + if ch is not None: + breaks = ch in u'\n\x85\u2028\u2029' + spaces = ch == u' ' + end += 1 + + def write_literal(self, text, comment=None): + # type: (Any, Any) -> None + hints, _indent, _indicator = self.determine_block_hints(text) + self.write_indicator(u'|' + hints, True) + try: + comment = comment[1][0] + if comment: + self.stream.write(comment) + except (TypeError, IndexError): + pass + if _indicator == u'+': + self.open_ended = True + self.write_line_break() + breaks = True + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if breaks: + if ch is None or ch not in u'\n\x85\u2028\u2029': + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + if ch is not None: + if self.root_context: + idnx = self.indent if self.indent is not None else 0 + self.stream.write(u' ' * (_indent + idnx)) + else: + self.write_indent() + start = end + else: + if ch is None or ch in u'\n\x85\u2028\u2029': + data = text[start:end] + if bool(self.encoding): + data = data.encode(self.encoding) + self.stream.write(data) + if ch is None: + self.write_line_break() + start = end + if ch is not None: + breaks = ch in u'\n\x85\u2028\u2029' + end += 1 + + def write_plain(self, text, split=True): + # type: (Any, Any) -> None + if self.root_context: + if self.requested_indent is not None: + self.write_line_break() + if self.requested_indent != 0: + self.write_indent() + else: + self.open_ended = True + if not text: + return + if not self.whitespace: + data = u' ' + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + self.whitespace = False + self.indention = False + spaces = False + breaks = False + start = end = 0 + while end <= len(text): + ch = None + if end < len(text): + ch = text[end] + if spaces: + if ch != u' ': + if start + 1 == end and self.column > self.best_width and split: + self.write_indent() + self.whitespace = False + self.indention = False + else: + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + self.stream.write(data) + start = end + elif breaks: + if ch not in u'\n\x85\u2028\u2029': # type: ignore + if text[start] == u'\n': + self.write_line_break() + for br in text[start:end]: + if br == u'\n': + self.write_line_break() + else: + self.write_line_break(br) + self.write_indent() + self.whitespace = False + self.indention = False + start = end + else: + if ch is None or ch in u' \n\x85\u2028\u2029': + data = text[start:end] + self.column += len(data) + if self.encoding: + data = data.encode(self.encoding) + try: + self.stream.write(data) + except: # NOQA + sys.stdout.write(repr(data) + '\n') + raise + start = end + if ch is not None: + spaces = ch == u' ' + breaks = ch in u'\n\x85\u2028\u2029' + end += 1 + + def write_comment(self, comment, pre=False): + # type: (Any, bool) -> None + value = comment.value + # nprintf('{:02d} {:02d} {!r}'.format(self.column, comment.start_mark.column, value)) + if not pre and value[-1] == '\n': + value = value[:-1] + try: + # get original column position + col = comment.start_mark.column + if comment.value and comment.value.startswith('\n'): + # never inject extra spaces if the comment starts with a newline + # and not a real comment (e.g. if you have an empty line following a key-value + col = self.column + elif col < self.column + 1: + ValueError + except ValueError: + col = self.column + 1 + # nprint('post_comment', self.line, self.column, value) + try: + # at least one space if the current column >= the start column of the comment + # but not at the start of a line + nr_spaces = col - self.column + if self.column and value.strip() and nr_spaces < 1 and value[0] != '\n': + nr_spaces = 1 + value = ' ' * nr_spaces + value + try: + if bool(self.encoding): + value = value.encode(self.encoding) + except UnicodeDecodeError: + pass + self.stream.write(value) + except TypeError: + raise + if not pre: + self.write_line_break() + + def write_pre_comment(self, event): + # type: (Any) -> bool + comments = event.comment[1] + if comments is None: + return False + try: + start_events = (MappingStartEvent, SequenceStartEvent) + for comment in comments: + if isinstance(event, start_events) and getattr(comment, 'pre_done', None): + continue + if self.column != 0: + self.write_line_break() + self.write_comment(comment, pre=True) + if isinstance(event, start_events): + comment.pre_done = True + except TypeError: + sys.stdout.write('eventtt {} {}'.format(type(event), event)) + raise + return True + + def write_post_comment(self, event): + # type: (Any) -> bool + if self.event.comment[0] is None: + return False + comment = event.comment[0] + self.write_comment(comment) + return True diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/error.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/error.py new file mode 100644 index 0000000000..cfad4a6f4d --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/error.py @@ -0,0 +1,311 @@ +# coding: utf-8 + +from __future__ import absolute_import + +import warnings +import textwrap + +from ...ruamel.yaml.compat import utf8 + +if False: # MYPY + from typing import Any, Dict, Optional, List, Text # NOQA + + +__all__ = [ + 'FileMark', + 'StringMark', + 'CommentMark', + 'YAMLError', + 'MarkedYAMLError', + 'ReusedAnchorWarning', + 'UnsafeLoaderWarning', + 'MarkedYAMLWarning', + 'MarkedYAMLFutureWarning', +] + + +class StreamMark(object): + __slots__ = 'name', 'index', 'line', 'column' + + def __init__(self, name, index, line, column): + # type: (Any, int, int, int) -> None + self.name = name + self.index = index + self.line = line + self.column = column + + def __str__(self): + # type: () -> Any + where = ' in "%s", line %d, column %d' % (self.name, self.line + 1, self.column + 1) + return where + + def __eq__(self, other): + # type: (Any) -> bool + if self.line != other.line or self.column != other.column: + return False + if self.name != other.name or self.index != other.index: + return False + return True + + def __ne__(self, other): + # type: (Any) -> bool + return not self.__eq__(other) + + +class FileMark(StreamMark): + __slots__ = () + + +class StringMark(StreamMark): + __slots__ = 'name', 'index', 'line', 'column', 'buffer', 'pointer' + + def __init__(self, name, index, line, column, buffer, pointer): + # type: (Any, int, int, int, Any, Any) -> None + StreamMark.__init__(self, name, index, line, column) + self.buffer = buffer + self.pointer = pointer + + def get_snippet(self, indent=4, max_length=75): + # type: (int, int) -> Any + if self.buffer is None: # always False + return None + head = "" + start = self.pointer + while start > 0 and self.buffer[start - 1] not in u'\0\r\n\x85\u2028\u2029': + start -= 1 + if self.pointer - start > max_length / 2 - 1: + head = ' ... ' + start += 5 + break + tail = "" + end = self.pointer + while end < len(self.buffer) and self.buffer[end] not in u'\0\r\n\x85\u2028\u2029': + end += 1 + if end - self.pointer > max_length / 2 - 1: + tail = ' ... ' + end -= 5 + break + snippet = utf8(self.buffer[start:end]) + caret = '^' + caret = '^ (line: {})'.format(self.line + 1) + return ( + ' ' * indent + + head + + snippet + + tail + + '\n' + + ' ' * (indent + self.pointer - start + len(head)) + + caret + ) + + def __str__(self): + # type: () -> Any + snippet = self.get_snippet() + where = ' in "%s", line %d, column %d' % (self.name, self.line + 1, self.column + 1) + if snippet is not None: + where += ':\n' + snippet + return where + + +class CommentMark(object): + __slots__ = ('column',) + + def __init__(self, column): + # type: (Any) -> None + self.column = column + + +class YAMLError(Exception): + pass + + +class MarkedYAMLError(YAMLError): + def __init__( + self, + context=None, + context_mark=None, + problem=None, + problem_mark=None, + note=None, + warn=None, + ): + # type: (Any, Any, Any, Any, Any, Any) -> None + self.context = context + self.context_mark = context_mark + self.problem = problem + self.problem_mark = problem_mark + self.note = note + # warn is ignored + + def __str__(self): + # type: () -> Any + lines = [] # type: List[str] + if self.context is not None: + lines.append(self.context) + if self.context_mark is not None and ( + self.problem is None + or self.problem_mark is None + or self.context_mark.name != self.problem_mark.name + or self.context_mark.line != self.problem_mark.line + or self.context_mark.column != self.problem_mark.column + ): + lines.append(str(self.context_mark)) + if self.problem is not None: + lines.append(self.problem) + if self.problem_mark is not None: + lines.append(str(self.problem_mark)) + if self.note is not None and self.note: + note = textwrap.dedent(self.note) + lines.append(note) + return '\n'.join(lines) + + +class YAMLStreamError(Exception): + pass + + +class YAMLWarning(Warning): + pass + + +class MarkedYAMLWarning(YAMLWarning): + def __init__( + self, + context=None, + context_mark=None, + problem=None, + problem_mark=None, + note=None, + warn=None, + ): + # type: (Any, Any, Any, Any, Any, Any) -> None + self.context = context + self.context_mark = context_mark + self.problem = problem + self.problem_mark = problem_mark + self.note = note + self.warn = warn + + def __str__(self): + # type: () -> Any + lines = [] # type: List[str] + if self.context is not None: + lines.append(self.context) + if self.context_mark is not None and ( + self.problem is None + or self.problem_mark is None + or self.context_mark.name != self.problem_mark.name + or self.context_mark.line != self.problem_mark.line + or self.context_mark.column != self.problem_mark.column + ): + lines.append(str(self.context_mark)) + if self.problem is not None: + lines.append(self.problem) + if self.problem_mark is not None: + lines.append(str(self.problem_mark)) + if self.note is not None and self.note: + note = textwrap.dedent(self.note) + lines.append(note) + if self.warn is not None and self.warn: + warn = textwrap.dedent(self.warn) + lines.append(warn) + return '\n'.join(lines) + + +class ReusedAnchorWarning(YAMLWarning): + pass + + +class UnsafeLoaderWarning(YAMLWarning): + text = """ +The default 'Loader' for 'load(stream)' without further arguments can be unsafe. +Use 'load(stream, Loader=ruamel.yaml.Loader)' explicitly if that is OK. +Alternatively include the following in your code: + + import warnings + warnings.simplefilter('ignore', ruamel.yaml.error.UnsafeLoaderWarning) + +In most other cases you should consider using 'safe_load(stream)'""" + pass + + +warnings.simplefilter('once', UnsafeLoaderWarning) + + +class MantissaNoDotYAML1_1Warning(YAMLWarning): + def __init__(self, node, flt_str): + # type: (Any, Any) -> None + self.node = node + self.flt = flt_str + + def __str__(self): + # type: () -> Any + line = self.node.start_mark.line + col = self.node.start_mark.column + return """ +In YAML 1.1 floating point values should have a dot ('.') in their mantissa. +See the Floating-Point Language-Independent Type for YAML™ Version 1.1 specification +( http://yaml.org/type/float.html ). This dot is not required for JSON nor for YAML 1.2 + +Correct your float: "{}" on line: {}, column: {} + +or alternatively include the following in your code: + + import warnings + warnings.simplefilter('ignore', ruamel.yaml.error.MantissaNoDotYAML1_1Warning) + +""".format( + self.flt, line, col + ) + + +warnings.simplefilter('once', MantissaNoDotYAML1_1Warning) + + +class YAMLFutureWarning(Warning): + pass + + +class MarkedYAMLFutureWarning(YAMLFutureWarning): + def __init__( + self, + context=None, + context_mark=None, + problem=None, + problem_mark=None, + note=None, + warn=None, + ): + # type: (Any, Any, Any, Any, Any, Any) -> None + self.context = context + self.context_mark = context_mark + self.problem = problem + self.problem_mark = problem_mark + self.note = note + self.warn = warn + + def __str__(self): + # type: () -> Any + lines = [] # type: List[str] + if self.context is not None: + lines.append(self.context) + + if self.context_mark is not None and ( + self.problem is None + or self.problem_mark is None + or self.context_mark.name != self.problem_mark.name + or self.context_mark.line != self.problem_mark.line + or self.context_mark.column != self.problem_mark.column + ): + lines.append(str(self.context_mark)) + if self.problem is not None: + lines.append(self.problem) + if self.problem_mark is not None: + lines.append(str(self.problem_mark)) + if self.note is not None and self.note: + note = textwrap.dedent(self.note) + lines.append(note) + if self.warn is not None and self.warn: + warn = textwrap.dedent(self.warn) + lines.append(warn) + return '\n'.join(lines) diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/events.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/events.py new file mode 100644 index 0000000000..58b212190a --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/events.py @@ -0,0 +1,157 @@ +# coding: utf-8 + +# Abstract classes. + +if False: # MYPY + from typing import Any, Dict, Optional, List # NOQA + + +def CommentCheck(): + # type: () -> None + pass + + +class Event(object): + __slots__ = 'start_mark', 'end_mark', 'comment' + + def __init__(self, start_mark=None, end_mark=None, comment=CommentCheck): + # type: (Any, Any, Any) -> None + self.start_mark = start_mark + self.end_mark = end_mark + # assert comment is not CommentCheck + if comment is CommentCheck: + comment = None + self.comment = comment + + def __repr__(self): + # type: () -> Any + attributes = [ + key + for key in ['anchor', 'tag', 'implicit', 'value', 'flow_style', 'style'] + if hasattr(self, key) + ] + arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) for key in attributes]) + if self.comment not in [None, CommentCheck]: + arguments += ', comment={!r}'.format(self.comment) + return '%s(%s)' % (self.__class__.__name__, arguments) + + +class NodeEvent(Event): + __slots__ = ('anchor',) + + def __init__(self, anchor, start_mark=None, end_mark=None, comment=None): + # type: (Any, Any, Any, Any) -> None + Event.__init__(self, start_mark, end_mark, comment) + self.anchor = anchor + + +class CollectionStartEvent(NodeEvent): + __slots__ = 'tag', 'implicit', 'flow_style', 'nr_items' + + def __init__( + self, + anchor, + tag, + implicit, + start_mark=None, + end_mark=None, + flow_style=None, + comment=None, + nr_items=None, + ): + # type: (Any, Any, Any, Any, Any, Any, Any, Optional[int]) -> None + NodeEvent.__init__(self, anchor, start_mark, end_mark, comment) + self.tag = tag + self.implicit = implicit + self.flow_style = flow_style + self.nr_items = nr_items + + +class CollectionEndEvent(Event): + __slots__ = () + + +# Implementations. + + +class StreamStartEvent(Event): + __slots__ = ('encoding',) + + def __init__(self, start_mark=None, end_mark=None, encoding=None, comment=None): + # type: (Any, Any, Any, Any) -> None + Event.__init__(self, start_mark, end_mark, comment) + self.encoding = encoding + + +class StreamEndEvent(Event): + __slots__ = () + + +class DocumentStartEvent(Event): + __slots__ = 'explicit', 'version', 'tags' + + def __init__( + self, + start_mark=None, + end_mark=None, + explicit=None, + version=None, + tags=None, + comment=None, + ): + # type: (Any, Any, Any, Any, Any, Any) -> None + Event.__init__(self, start_mark, end_mark, comment) + self.explicit = explicit + self.version = version + self.tags = tags + + +class DocumentEndEvent(Event): + __slots__ = ('explicit',) + + def __init__(self, start_mark=None, end_mark=None, explicit=None, comment=None): + # type: (Any, Any, Any, Any) -> None + Event.__init__(self, start_mark, end_mark, comment) + self.explicit = explicit + + +class AliasEvent(NodeEvent): + __slots__ = () + + +class ScalarEvent(NodeEvent): + __slots__ = 'tag', 'implicit', 'value', 'style' + + def __init__( + self, + anchor, + tag, + implicit, + value, + start_mark=None, + end_mark=None, + style=None, + comment=None, + ): + # type: (Any, Any, Any, Any, Any, Any, Any, Any) -> None + NodeEvent.__init__(self, anchor, start_mark, end_mark, comment) + self.tag = tag + self.implicit = implicit + self.value = value + self.style = style + + +class SequenceStartEvent(CollectionStartEvent): + __slots__ = () + + +class SequenceEndEvent(CollectionEndEvent): + __slots__ = () + + +class MappingStartEvent(CollectionStartEvent): + __slots__ = () + + +class MappingEndEvent(CollectionEndEvent): + __slots__ = () diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/loader.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/loader.py new file mode 100644 index 0000000000..ae8c8b8c04 --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/loader.py @@ -0,0 +1,74 @@ +# coding: utf-8 + +from __future__ import absolute_import + + +from ...ruamel.yaml.reader import Reader +from ...ruamel.yaml.scanner import Scanner, RoundTripScanner +from ...ruamel.yaml.parser import Parser, RoundTripParser +from ...ruamel.yaml.composer import Composer +from ...ruamel.yaml.constructor import ( + BaseConstructor, + SafeConstructor, + Constructor, + RoundTripConstructor, +) +from ...ruamel.yaml.resolver import VersionedResolver + +if False: # MYPY + from typing import Any, Dict, List, Union, Optional # NOQA + from ...ruamel.yaml.compat import StreamTextType, VersionType # NOQA + +__all__ = ['BaseLoader', 'SafeLoader', 'Loader', 'RoundTripLoader'] + + +class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, VersionedResolver): + def __init__(self, stream, version=None, preserve_quotes=None): + # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None + Reader.__init__(self, stream, loader=self) + Scanner.__init__(self, loader=self) + Parser.__init__(self, loader=self) + Composer.__init__(self, loader=self) + BaseConstructor.__init__(self, loader=self) + VersionedResolver.__init__(self, version, loader=self) + + +class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, VersionedResolver): + def __init__(self, stream, version=None, preserve_quotes=None): + # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None + Reader.__init__(self, stream, loader=self) + Scanner.__init__(self, loader=self) + Parser.__init__(self, loader=self) + Composer.__init__(self, loader=self) + SafeConstructor.__init__(self, loader=self) + VersionedResolver.__init__(self, version, loader=self) + + +class Loader(Reader, Scanner, Parser, Composer, Constructor, VersionedResolver): + def __init__(self, stream, version=None, preserve_quotes=None): + # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None + Reader.__init__(self, stream, loader=self) + Scanner.__init__(self, loader=self) + Parser.__init__(self, loader=self) + Composer.__init__(self, loader=self) + Constructor.__init__(self, loader=self) + VersionedResolver.__init__(self, version, loader=self) + + +class RoundTripLoader( + Reader, + RoundTripScanner, + RoundTripParser, + Composer, + RoundTripConstructor, + VersionedResolver, +): + def __init__(self, stream, version=None, preserve_quotes=None): + # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None + # self.reader = Reader.__init__(self, stream) + Reader.__init__(self, stream, loader=self) + RoundTripScanner.__init__(self, loader=self) + RoundTripParser.__init__(self, loader=self) + Composer.__init__(self, loader=self) + RoundTripConstructor.__init__(self, preserve_quotes=preserve_quotes, loader=self) + VersionedResolver.__init__(self, version, loader=self) diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/main.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/main.py new file mode 100644 index 0000000000..2abc7c96f4 --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/main.py @@ -0,0 +1,1534 @@ +# coding: utf-8 + +from __future__ import absolute_import, unicode_literals, print_function + +import sys +import os +import warnings +import glob +from importlib import import_module + + +from ... import ruamel +from ...ruamel.yaml.error import UnsafeLoaderWarning, YAMLError # NOQA + +from ...ruamel.yaml.tokens import * # NOQA +from ...ruamel.yaml.events import * # NOQA +from ...ruamel.yaml.nodes import * # NOQA + +from ...ruamel.yaml.loader import BaseLoader, SafeLoader, Loader, RoundTripLoader # NOQA +from ...ruamel.yaml.dumper import BaseDumper, SafeDumper, Dumper, RoundTripDumper # NOQA +from ...ruamel.yaml.compat import StringIO, BytesIO, with_metaclass, PY3, nprint +from ...ruamel.yaml.resolver import VersionedResolver, Resolver # NOQA +from ...ruamel.yaml.representer import ( + BaseRepresenter, + SafeRepresenter, + Representer, + RoundTripRepresenter, +) +from ...ruamel.yaml.constructor import ( + BaseConstructor, + SafeConstructor, + Constructor, + RoundTripConstructor, +) +from ...ruamel.yaml.loader import Loader as UnsafeLoader + +if False: # MYPY + from typing import List, Set, Dict, Union, Any, Callable, Optional, Text # NOQA + from ...ruamel.yaml.compat import StreamType, StreamTextType, VersionType # NOQA + + if PY3: + from pathlib import Path + else: + Path = Any + +try: + from _ruamel_yaml import CParser, CEmitter # type: ignore +except: # NOQA + CParser = CEmitter = None + +# import io + +enforce = object() + + +# YAML is an acronym, i.e. spoken: rhymes with "camel". And thus a +# subset of abbreviations, which should be all caps according to PEP8 + + +class YAML(object): + def __init__( + self, _kw=enforce, typ=None, pure=False, output=None, plug_ins=None # input=None, + ): + # type: (Any, Optional[Text], Any, Any, Any) -> None + """ + _kw: not used, forces keyword arguments in 2.7 (in 3 you can do (*, safe_load=..) + typ: 'rt'/None -> RoundTripLoader/RoundTripDumper, (default) + 'safe' -> SafeLoader/SafeDumper, + 'unsafe' -> normal/unsafe Loader/Dumper + 'base' -> baseloader + pure: if True only use Python modules + input/output: needed to work as context manager + plug_ins: a list of plug-in files + """ + if _kw is not enforce: + raise TypeError( + '{}.__init__() takes no positional argument but at least ' + 'one was given ({!r})'.format(self.__class__.__name__, _kw) + ) + + self.typ = ['rt'] if typ is None else (typ if isinstance(typ, list) else [typ]) + self.pure = pure + + # self._input = input + self._output = output + self._context_manager = None # type: Any + + self.plug_ins = [] # type: List[Any] + for pu in ([] if plug_ins is None else plug_ins) + self.official_plug_ins(): + file_name = pu.replace(os.sep, '.') + self.plug_ins.append(import_module(file_name)) + self.Resolver = ruamel.yaml.resolver.VersionedResolver # type: Any + self.allow_unicode = True + self.Reader = None # type: Any + self.Representer = None # type: Any + self.Constructor = None # type: Any + self.Scanner = None # type: Any + self.Serializer = None # type: Any + self.default_flow_style = None # type: Any + typ_found = 1 + setup_rt = False + if 'rt' in self.typ: + setup_rt = True + elif 'safe' in self.typ: + self.Emitter = ( + ruamel.yaml.emitter.Emitter if pure or CEmitter is None else CEmitter + ) + self.Representer = ruamel.yaml.representer.SafeRepresenter + self.Parser = ruamel.yaml.parser.Parser if pure or CParser is None else CParser + self.Composer = ruamel.yaml.composer.Composer + self.Constructor = ruamel.yaml.constructor.SafeConstructor + elif 'base' in self.typ: + self.Emitter = ruamel.yaml.emitter.Emitter + self.Representer = ruamel.yaml.representer.BaseRepresenter + self.Parser = ruamel.yaml.parser.Parser if pure or CParser is None else CParser + self.Composer = ruamel.yaml.composer.Composer + self.Constructor = ruamel.yaml.constructor.BaseConstructor + elif 'unsafe' in self.typ: + self.Emitter = ( + ruamel.yaml.emitter.Emitter if pure or CEmitter is None else CEmitter + ) + self.Representer = ruamel.yaml.representer.Representer + self.Parser = ruamel.yaml.parser.Parser if pure or CParser is None else CParser + self.Composer = ruamel.yaml.composer.Composer + self.Constructor = ruamel.yaml.constructor.Constructor + else: + setup_rt = True + typ_found = 0 + if setup_rt: + self.default_flow_style = False + # no optimized rt-dumper yet + self.Emitter = ruamel.yaml.emitter.Emitter + self.Serializer = ruamel.yaml.serializer.Serializer + self.Representer = ruamel.yaml.representer.RoundTripRepresenter + self.Scanner = ruamel.yaml.scanner.RoundTripScanner + # no optimized rt-parser yet + self.Parser = ruamel.yaml.parser.RoundTripParser + self.Composer = ruamel.yaml.composer.Composer + self.Constructor = ruamel.yaml.constructor.RoundTripConstructor + del setup_rt + self.stream = None + self.canonical = None + self.old_indent = None + self.width = None + self.line_break = None + + self.map_indent = None + self.sequence_indent = None + self.sequence_dash_offset = 0 + self.compact_seq_seq = None + self.compact_seq_map = None + self.sort_base_mapping_type_on_output = None # default: sort + + self.top_level_colon_align = None + self.prefix_colon = None + self.version = None + self.preserve_quotes = None + self.allow_duplicate_keys = False # duplicate keys in map, set + self.encoding = 'utf-8' + self.explicit_start = None + self.explicit_end = None + self.tags = None + self.default_style = None + self.top_level_block_style_scalar_no_indent_error_1_1 = False + # directives end indicator with single scalar document + self.scalar_after_indicator = None + # [a, b: 1, c: {d: 2}] vs. [a, {b: 1}, {c: {d: 2}}] + self.brace_single_entry_mapping_in_flow_sequence = False + for module in self.plug_ins: + if getattr(module, 'typ', None) in self.typ: + typ_found += 1 + module.init_typ(self) + break + if typ_found == 0: + raise NotImplementedError( + 'typ "{}"not recognised (need to install plug-in?)'.format(self.typ) + ) + + @property + def reader(self): + # type: () -> Any + try: + return self._reader # type: ignore + except AttributeError: + self._reader = self.Reader(None, loader=self) + return self._reader + + @property + def scanner(self): + # type: () -> Any + try: + return self._scanner # type: ignore + except AttributeError: + self._scanner = self.Scanner(loader=self) + return self._scanner + + @property + def parser(self): + # type: () -> Any + attr = '_' + sys._getframe().f_code.co_name + if not hasattr(self, attr): + if self.Parser is not CParser: + setattr(self, attr, self.Parser(loader=self)) + else: + if getattr(self, '_stream', None) is None: + # wait for the stream + return None + else: + # if not hasattr(self._stream, 'read') and hasattr(self._stream, 'open'): + # # pathlib.Path() instance + # setattr(self, attr, CParser(self._stream)) + # else: + setattr(self, attr, CParser(self._stream)) + # self._parser = self._composer = self + # nprint('scanner', self.loader.scanner) + + return getattr(self, attr) + + @property + def composer(self): + # type: () -> Any + attr = '_' + sys._getframe().f_code.co_name + if not hasattr(self, attr): + setattr(self, attr, self.Composer(loader=self)) + return getattr(self, attr) + + @property + def constructor(self): + # type: () -> Any + attr = '_' + sys._getframe().f_code.co_name + if not hasattr(self, attr): + cnst = self.Constructor(preserve_quotes=self.preserve_quotes, loader=self) + cnst.allow_duplicate_keys = self.allow_duplicate_keys + setattr(self, attr, cnst) + return getattr(self, attr) + + @property + def resolver(self): + # type: () -> Any + attr = '_' + sys._getframe().f_code.co_name + if not hasattr(self, attr): + setattr(self, attr, self.Resolver(version=self.version, loader=self)) + return getattr(self, attr) + + @property + def emitter(self): + # type: () -> Any + attr = '_' + sys._getframe().f_code.co_name + if not hasattr(self, attr): + if self.Emitter is not CEmitter: + _emitter = self.Emitter( + None, + canonical=self.canonical, + indent=self.old_indent, + width=self.width, + allow_unicode=self.allow_unicode, + line_break=self.line_break, + prefix_colon=self.prefix_colon, + brace_single_entry_mapping_in_flow_sequence=self.brace_single_entry_mapping_in_flow_sequence, # NOQA + dumper=self, + ) + setattr(self, attr, _emitter) + if self.map_indent is not None: + _emitter.best_map_indent = self.map_indent + if self.sequence_indent is not None: + _emitter.best_sequence_indent = self.sequence_indent + if self.sequence_dash_offset is not None: + _emitter.sequence_dash_offset = self.sequence_dash_offset + # _emitter.block_seq_indent = self.sequence_dash_offset + if self.compact_seq_seq is not None: + _emitter.compact_seq_seq = self.compact_seq_seq + if self.compact_seq_map is not None: + _emitter.compact_seq_map = self.compact_seq_map + else: + if getattr(self, '_stream', None) is None: + # wait for the stream + return None + return None + return getattr(self, attr) + + @property + def serializer(self): + # type: () -> Any + attr = '_' + sys._getframe().f_code.co_name + if not hasattr(self, attr): + setattr( + self, + attr, + self.Serializer( + encoding=self.encoding, + explicit_start=self.explicit_start, + explicit_end=self.explicit_end, + version=self.version, + tags=self.tags, + dumper=self, + ), + ) + return getattr(self, attr) + + @property + def representer(self): + # type: () -> Any + attr = '_' + sys._getframe().f_code.co_name + if not hasattr(self, attr): + repres = self.Representer( + default_style=self.default_style, + default_flow_style=self.default_flow_style, + dumper=self, + ) + if self.sort_base_mapping_type_on_output is not None: + repres.sort_base_mapping_type_on_output = self.sort_base_mapping_type_on_output + setattr(self, attr, repres) + return getattr(self, attr) + + # separate output resolver? + + # def load(self, stream=None): + # if self._context_manager: + # if not self._input: + # raise TypeError("Missing input stream while dumping from context manager") + # for data in self._context_manager.load(): + # yield data + # return + # if stream is None: + # raise TypeError("Need a stream argument when not loading from context manager") + # return self.load_one(stream) + + def load(self, stream): + # type: (Union[Path, StreamTextType]) -> Any + """ + at this point you either have the non-pure Parser (which has its own reader and + scanner) or you have the pure Parser. + If the pure Parser is set, then set the Reader and Scanner, if not already set. + If either the Scanner or Reader are set, you cannot use the non-pure Parser, + so reset it to the pure parser and set the Reader resp. Scanner if necessary + """ + if not hasattr(stream, 'read') and hasattr(stream, 'open'): + # pathlib.Path() instance + with stream.open('rb') as fp: + return self.load(fp) + constructor, parser = self.get_constructor_parser(stream) + try: + return constructor.get_single_data() + finally: + parser.dispose() + try: + self._reader.reset_reader() + except AttributeError: + pass + try: + self._scanner.reset_scanner() + except AttributeError: + pass + + def load_all(self, stream, _kw=enforce): # , skip=None): + # type: (Union[Path, StreamTextType], Any) -> Any + if _kw is not enforce: + raise TypeError( + '{}.__init__() takes no positional argument but at least ' + 'one was given ({!r})'.format(self.__class__.__name__, _kw) + ) + if not hasattr(stream, 'read') and hasattr(stream, 'open'): + # pathlib.Path() instance + with stream.open('r') as fp: + for d in self.load_all(fp, _kw=enforce): + yield d + return + # if skip is None: + # skip = [] + # elif isinstance(skip, int): + # skip = [skip] + constructor, parser = self.get_constructor_parser(stream) + try: + while constructor.check_data(): + yield constructor.get_data() + finally: + parser.dispose() + try: + self._reader.reset_reader() + except AttributeError: + pass + try: + self._scanner.reset_scanner() + except AttributeError: + pass + + def get_constructor_parser(self, stream): + # type: (StreamTextType) -> Any + """ + the old cyaml needs special setup, and therefore the stream + """ + if self.Parser is not CParser: + if self.Reader is None: + self.Reader = ruamel.yaml.reader.Reader + if self.Scanner is None: + self.Scanner = ruamel.yaml.scanner.Scanner + self.reader.stream = stream + else: + if self.Reader is not None: + if self.Scanner is None: + self.Scanner = ruamel.yaml.scanner.Scanner + self.Parser = ruamel.yaml.parser.Parser + self.reader.stream = stream + elif self.Scanner is not None: + if self.Reader is None: + self.Reader = ruamel.yaml.reader.Reader + self.Parser = ruamel.yaml.parser.Parser + self.reader.stream = stream + else: + # combined C level reader>scanner>parser + # does some calls to the resolver, e.g. BaseResolver.descend_resolver + # if you just initialise the CParser, to much of resolver.py + # is actually used + rslvr = self.Resolver + # if rslvr is ruamel.yaml.resolver.VersionedResolver: + # rslvr = ruamel.yaml.resolver.Resolver + + class XLoader(self.Parser, self.Constructor, rslvr): # type: ignore + def __init__(selfx, stream, version=self.version, preserve_quotes=None): + # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None # NOQA + CParser.__init__(selfx, stream) + selfx._parser = selfx._composer = selfx + self.Constructor.__init__(selfx, loader=selfx) + selfx.allow_duplicate_keys = self.allow_duplicate_keys + rslvr.__init__(selfx, version=version, loadumper=selfx) + + self._stream = stream + loader = XLoader(stream) + return loader, loader + return self.constructor, self.parser + + def dump(self, data, stream=None, _kw=enforce, transform=None): + # type: (Any, Union[Path, StreamType], Any, Any) -> Any + if self._context_manager: + if not self._output: + raise TypeError('Missing output stream while dumping from context manager') + if _kw is not enforce: + raise TypeError( + '{}.dump() takes one positional argument but at least ' + 'two were given ({!r})'.format(self.__class__.__name__, _kw) + ) + if transform is not None: + raise TypeError( + '{}.dump() in the context manager cannot have transform keyword ' + ''.format(self.__class__.__name__) + ) + self._context_manager.dump(data) + else: # old style + if stream is None: + raise TypeError('Need a stream argument when not dumping from context manager') + return self.dump_all([data], stream, _kw, transform=transform) + + def dump_all(self, documents, stream, _kw=enforce, transform=None): + # type: (Any, Union[Path, StreamType], Any, Any) -> Any + if self._context_manager: + raise NotImplementedError + if _kw is not enforce: + raise TypeError( + '{}.dump(_all) takes two positional argument but at least ' + 'three were given ({!r})'.format(self.__class__.__name__, _kw) + ) + self._output = stream + self._context_manager = YAMLContextManager(self, transform=transform) + for data in documents: + self._context_manager.dump(data) + self._context_manager.teardown_output() + self._output = None + self._context_manager = None + + def Xdump_all(self, documents, stream, _kw=enforce, transform=None): + # type: (Any, Union[Path, StreamType], Any, Any) -> Any + """ + Serialize a sequence of Python objects into a YAML stream. + """ + if not hasattr(stream, 'write') and hasattr(stream, 'open'): + # pathlib.Path() instance + with stream.open('w') as fp: + return self.dump_all(documents, fp, _kw, transform=transform) + if _kw is not enforce: + raise TypeError( + '{}.dump(_all) takes two positional argument but at least ' + 'three were given ({!r})'.format(self.__class__.__name__, _kw) + ) + # The stream should have the methods `write` and possibly `flush`. + if self.top_level_colon_align is True: + tlca = max([len(str(x)) for x in documents[0]]) # type: Any + else: + tlca = self.top_level_colon_align + if transform is not None: + fstream = stream + if self.encoding is None: + stream = StringIO() + else: + stream = BytesIO() + serializer, representer, emitter = self.get_serializer_representer_emitter( + stream, tlca + ) + try: + self.serializer.open() + for data in documents: + try: + self.representer.represent(data) + except AttributeError: + # nprint(dir(dumper._representer)) + raise + self.serializer.close() + finally: + try: + self.emitter.dispose() + except AttributeError: + raise + # self.dumper.dispose() # cyaml + delattr(self, '_serializer') + delattr(self, '_emitter') + if transform: + val = stream.getvalue() + if self.encoding: + val = val.decode(self.encoding) + if fstream is None: + transform(val) + else: + fstream.write(transform(val)) + return None + + def get_serializer_representer_emitter(self, stream, tlca): + # type: (StreamType, Any) -> Any + # we have only .Serializer to deal with (vs .Reader & .Scanner), much simpler + if self.Emitter is not CEmitter: + if self.Serializer is None: + self.Serializer = ruamel.yaml.serializer.Serializer + self.emitter.stream = stream + self.emitter.top_level_colon_align = tlca + if self.scalar_after_indicator is not None: + self.emitter.scalar_after_indicator = self.scalar_after_indicator + return self.serializer, self.representer, self.emitter + if self.Serializer is not None: + # cannot set serializer with CEmitter + self.Emitter = ruamel.yaml.emitter.Emitter + self.emitter.stream = stream + self.emitter.top_level_colon_align = tlca + if self.scalar_after_indicator is not None: + self.emitter.scalar_after_indicator = self.scalar_after_indicator + return self.serializer, self.representer, self.emitter + # C routines + + rslvr = ( + ruamel.yaml.resolver.BaseResolver + if 'base' in self.typ + else ruamel.yaml.resolver.Resolver + ) + + class XDumper(CEmitter, self.Representer, rslvr): # type: ignore + def __init__( + selfx, + stream, + default_style=None, + default_flow_style=None, + canonical=None, + indent=None, + width=None, + allow_unicode=None, + line_break=None, + encoding=None, + explicit_start=None, + explicit_end=None, + version=None, + tags=None, + block_seq_indent=None, + top_level_colon_align=None, + prefix_colon=None, + ): + # type: (StreamType, Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> None # NOQA + CEmitter.__init__( + selfx, + stream, + canonical=canonical, + indent=indent, + width=width, + encoding=encoding, + allow_unicode=allow_unicode, + line_break=line_break, + explicit_start=explicit_start, + explicit_end=explicit_end, + version=version, + tags=tags, + ) + selfx._emitter = selfx._serializer = selfx._representer = selfx + self.Representer.__init__( + selfx, default_style=default_style, default_flow_style=default_flow_style + ) + rslvr.__init__(selfx) + + self._stream = stream + dumper = XDumper( + stream, + default_style=self.default_style, + default_flow_style=self.default_flow_style, + canonical=self.canonical, + indent=self.old_indent, + width=self.width, + allow_unicode=self.allow_unicode, + line_break=self.line_break, + explicit_start=self.explicit_start, + explicit_end=self.explicit_end, + version=self.version, + tags=self.tags, + ) + self._emitter = self._serializer = dumper + return dumper, dumper, dumper + + # basic types + def map(self, **kw): + # type: (Any) -> Any + if 'rt' in self.typ: + from ...ruamel.yaml.comments import CommentedMap + + return CommentedMap(**kw) + else: + return dict(**kw) + + def seq(self, *args): + # type: (Any) -> Any + if 'rt' in self.typ: + from ...ruamel.yaml.comments import CommentedSeq + + return CommentedSeq(*args) + else: + return list(*args) + + # helpers + def official_plug_ins(self): + # type: () -> Any + bd = os.path.dirname(__file__) + gpbd = os.path.dirname(os.path.dirname(bd)) + res = [x.replace(gpbd, "")[1:-3] for x in glob.glob(bd + '/*/__plug_in__.py')] + return res + + def register_class(self, cls): + # type:(Any) -> Any + """ + register a class for dumping loading + - if it has attribute yaml_tag use that to register, else use class name + - if it has methods to_yaml/from_yaml use those to dump/load else dump attributes + as mapping + """ + tag = getattr(cls, 'yaml_tag', '!' + cls.__name__) + try: + self.representer.add_representer(cls, cls.to_yaml) + except AttributeError: + + def t_y(representer, data): + # type: (Any, Any) -> Any + return representer.represent_yaml_object( + tag, data, cls, flow_style=representer.default_flow_style + ) + + self.representer.add_representer(cls, t_y) + try: + self.constructor.add_constructor(tag, cls.from_yaml) + except AttributeError: + + def f_y(constructor, node): + # type: (Any, Any) -> Any + return constructor.construct_yaml_object(node, cls) + + self.constructor.add_constructor(tag, f_y) + return cls + + def parse(self, stream): + # type: (StreamTextType) -> Any + """ + Parse a YAML stream and produce parsing events. + """ + _, parser = self.get_constructor_parser(stream) + try: + while parser.check_event(): + yield parser.get_event() + finally: + parser.dispose() + try: + self._reader.reset_reader() + except AttributeError: + pass + try: + self._scanner.reset_scanner() + except AttributeError: + pass + + # ### context manager + + def __enter__(self): + # type: () -> Any + self._context_manager = YAMLContextManager(self) + return self + + def __exit__(self, typ, value, traceback): + # type: (Any, Any, Any) -> None + if typ: + nprint('typ', typ) + self._context_manager.teardown_output() + # self._context_manager.teardown_input() + self._context_manager = None + + # ### backwards compatibility + def _indent(self, mapping=None, sequence=None, offset=None): + # type: (Any, Any, Any) -> None + if mapping is not None: + self.map_indent = mapping + if sequence is not None: + self.sequence_indent = sequence + if offset is not None: + self.sequence_dash_offset = offset + + @property + def indent(self): + # type: () -> Any + return self._indent + + @indent.setter + def indent(self, val): + # type: (Any) -> None + self.old_indent = val + + @property + def block_seq_indent(self): + # type: () -> Any + return self.sequence_dash_offset + + @block_seq_indent.setter + def block_seq_indent(self, val): + # type: (Any) -> None + self.sequence_dash_offset = val + + def compact(self, seq_seq=None, seq_map=None): + # type: (Any, Any) -> None + self.compact_seq_seq = seq_seq + self.compact_seq_map = seq_map + + +class YAMLContextManager(object): + def __init__(self, yaml, transform=None): + # type: (Any, Any) -> None # used to be: (Any, Optional[Callable]) -> None + self._yaml = yaml + self._output_inited = False + self._output_path = None + self._output = self._yaml._output + self._transform = transform + + # self._input_inited = False + # self._input = input + # self._input_path = None + # self._transform = yaml.transform + # self._fstream = None + + if not hasattr(self._output, 'write') and hasattr(self._output, 'open'): + # pathlib.Path() instance, open with the same mode + self._output_path = self._output + self._output = self._output_path.open('w') + + # if not hasattr(self._stream, 'write') and hasattr(stream, 'open'): + # if not hasattr(self._input, 'read') and hasattr(self._input, 'open'): + # # pathlib.Path() instance, open with the same mode + # self._input_path = self._input + # self._input = self._input_path.open('r') + + if self._transform is not None: + self._fstream = self._output + if self._yaml.encoding is None: + self._output = StringIO() + else: + self._output = BytesIO() + + def teardown_output(self): + # type: () -> None + if self._output_inited: + self._yaml.serializer.close() + else: + return + try: + self._yaml.emitter.dispose() + except AttributeError: + raise + # self.dumper.dispose() # cyaml + try: + delattr(self._yaml, '_serializer') + delattr(self._yaml, '_emitter') + except AttributeError: + raise + if self._transform: + val = self._output.getvalue() + if self._yaml.encoding: + val = val.decode(self._yaml.encoding) + if self._fstream is None: + self._transform(val) + else: + self._fstream.write(self._transform(val)) + self._fstream.flush() + self._output = self._fstream # maybe not necessary + if self._output_path is not None: + self._output.close() + + def init_output(self, first_data): + # type: (Any) -> None + if self._yaml.top_level_colon_align is True: + tlca = max([len(str(x)) for x in first_data]) # type: Any + else: + tlca = self._yaml.top_level_colon_align + self._yaml.get_serializer_representer_emitter(self._output, tlca) + self._yaml.serializer.open() + self._output_inited = True + + def dump(self, data): + # type: (Any) -> None + if not self._output_inited: + self.init_output(data) + try: + self._yaml.representer.represent(data) + except AttributeError: + # nprint(dir(dumper._representer)) + raise + + # def teardown_input(self): + # pass + # + # def init_input(self): + # # set the constructor and parser on YAML() instance + # self._yaml.get_constructor_parser(stream) + # + # def load(self): + # if not self._input_inited: + # self.init_input() + # try: + # while self._yaml.constructor.check_data(): + # yield self._yaml.constructor.get_data() + # finally: + # parser.dispose() + # try: + # self._reader.reset_reader() # type: ignore + # except AttributeError: + # pass + # try: + # self._scanner.reset_scanner() # type: ignore + # except AttributeError: + # pass + + +def yaml_object(yml): + # type: (Any) -> Any + """ decorator for classes that needs to dump/load objects + The tag for such objects is taken from the class attribute yaml_tag (or the + class name in lowercase in case unavailable) + If methods to_yaml and/or from_yaml are available, these are called for dumping resp. + loading, default routines (dumping a mapping of the attributes) used otherwise. + """ + + def yo_deco(cls): + # type: (Any) -> Any + tag = getattr(cls, 'yaml_tag', '!' + cls.__name__) + try: + yml.representer.add_representer(cls, cls.to_yaml) + except AttributeError: + + def t_y(representer, data): + # type: (Any, Any) -> Any + return representer.represent_yaml_object( + tag, data, cls, flow_style=representer.default_flow_style + ) + + yml.representer.add_representer(cls, t_y) + try: + yml.constructor.add_constructor(tag, cls.from_yaml) + except AttributeError: + + def f_y(constructor, node): + # type: (Any, Any) -> Any + return constructor.construct_yaml_object(node, cls) + + yml.constructor.add_constructor(tag, f_y) + return cls + + return yo_deco + + +######################################################################################## + + +def scan(stream, Loader=Loader): + # type: (StreamTextType, Any) -> Any + """ + Scan a YAML stream and produce scanning tokens. + """ + loader = Loader(stream) + try: + while loader.scanner.check_token(): + yield loader.scanner.get_token() + finally: + loader._parser.dispose() + + +def parse(stream, Loader=Loader): + # type: (StreamTextType, Any) -> Any + """ + Parse a YAML stream and produce parsing events. + """ + loader = Loader(stream) + try: + while loader._parser.check_event(): + yield loader._parser.get_event() + finally: + loader._parser.dispose() + + +def compose(stream, Loader=Loader): + # type: (StreamTextType, Any) -> Any + """ + Parse the first YAML document in a stream + and produce the corresponding representation tree. + """ + loader = Loader(stream) + try: + return loader.get_single_node() + finally: + loader.dispose() + + +def compose_all(stream, Loader=Loader): + # type: (StreamTextType, Any) -> Any + """ + Parse all YAML documents in a stream + and produce corresponding representation trees. + """ + loader = Loader(stream) + try: + while loader.check_node(): + yield loader._composer.get_node() + finally: + loader._parser.dispose() + + +def load(stream, Loader=None, version=None, preserve_quotes=None): + # type: (StreamTextType, Any, Optional[VersionType], Any) -> Any + """ + Parse the first YAML document in a stream + and produce the corresponding Python object. + """ + if Loader is None: + warnings.warn(UnsafeLoaderWarning.text, UnsafeLoaderWarning, stacklevel=2) + Loader = UnsafeLoader + loader = Loader(stream, version, preserve_quotes=preserve_quotes) + try: + return loader._constructor.get_single_data() + finally: + loader._parser.dispose() + try: + loader._reader.reset_reader() + except AttributeError: + pass + try: + loader._scanner.reset_scanner() + except AttributeError: + pass + + +def load_all(stream, Loader=None, version=None, preserve_quotes=None): + # type: (Optional[StreamTextType], Any, Optional[VersionType], Optional[bool]) -> Any # NOQA + """ + Parse all YAML documents in a stream + and produce corresponding Python objects. + """ + if Loader is None: + warnings.warn(UnsafeLoaderWarning.text, UnsafeLoaderWarning, stacklevel=2) + Loader = UnsafeLoader + loader = Loader(stream, version, preserve_quotes=preserve_quotes) + try: + while loader._constructor.check_data(): + yield loader._constructor.get_data() + finally: + loader._parser.dispose() + try: + loader._reader.reset_reader() + except AttributeError: + pass + try: + loader._scanner.reset_scanner() + except AttributeError: + pass + + +def safe_load(stream, version=None): + # type: (StreamTextType, Optional[VersionType]) -> Any + """ + Parse the first YAML document in a stream + and produce the corresponding Python object. + Resolve only basic YAML tags. + """ + return load(stream, SafeLoader, version) + + +def safe_load_all(stream, version=None): + # type: (StreamTextType, Optional[VersionType]) -> Any + """ + Parse all YAML documents in a stream + and produce corresponding Python objects. + Resolve only basic YAML tags. + """ + return load_all(stream, SafeLoader, version) + + +def round_trip_load(stream, version=None, preserve_quotes=None): + # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> Any + """ + Parse the first YAML document in a stream + and produce the corresponding Python object. + Resolve only basic YAML tags. + """ + return load(stream, RoundTripLoader, version, preserve_quotes=preserve_quotes) + + +def round_trip_load_all(stream, version=None, preserve_quotes=None): + # type: (StreamTextType, Optional[VersionType], Optional[bool]) -> Any + """ + Parse all YAML documents in a stream + and produce corresponding Python objects. + Resolve only basic YAML tags. + """ + return load_all(stream, RoundTripLoader, version, preserve_quotes=preserve_quotes) + + +def emit( + events, + stream=None, + Dumper=Dumper, + canonical=None, + indent=None, + width=None, + allow_unicode=None, + line_break=None, +): + # type: (Any, Optional[StreamType], Any, Optional[bool], Union[int, None], Optional[int], Optional[bool], Any) -> Any # NOQA + """ + Emit YAML parsing events into a stream. + If stream is None, return the produced string instead. + """ + getvalue = None + if stream is None: + stream = StringIO() + getvalue = stream.getvalue + dumper = Dumper( + stream, + canonical=canonical, + indent=indent, + width=width, + allow_unicode=allow_unicode, + line_break=line_break, + ) + try: + for event in events: + dumper.emit(event) + finally: + try: + dumper._emitter.dispose() + except AttributeError: + raise + dumper.dispose() # cyaml + if getvalue is not None: + return getvalue() + + +enc = None if PY3 else 'utf-8' + + +def serialize_all( + nodes, + stream=None, + Dumper=Dumper, + canonical=None, + indent=None, + width=None, + allow_unicode=None, + line_break=None, + encoding=enc, + explicit_start=None, + explicit_end=None, + version=None, + tags=None, +): + # type: (Any, Optional[StreamType], Any, Any, Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Optional[VersionType], Any) -> Any # NOQA + """ + Serialize a sequence of representation trees into a YAML stream. + If stream is None, return the produced string instead. + """ + getvalue = None + if stream is None: + if encoding is None: + stream = StringIO() + else: + stream = BytesIO() + getvalue = stream.getvalue + dumper = Dumper( + stream, + canonical=canonical, + indent=indent, + width=width, + allow_unicode=allow_unicode, + line_break=line_break, + encoding=encoding, + version=version, + tags=tags, + explicit_start=explicit_start, + explicit_end=explicit_end, + ) + try: + dumper._serializer.open() + for node in nodes: + dumper.serialize(node) + dumper._serializer.close() + finally: + try: + dumper._emitter.dispose() + except AttributeError: + raise + dumper.dispose() # cyaml + if getvalue is not None: + return getvalue() + + +def serialize(node, stream=None, Dumper=Dumper, **kwds): + # type: (Any, Optional[StreamType], Any, Any) -> Any + """ + Serialize a representation tree into a YAML stream. + If stream is None, return the produced string instead. + """ + return serialize_all([node], stream, Dumper=Dumper, **kwds) + + +def dump_all( + documents, + stream=None, + Dumper=Dumper, + default_style=None, + default_flow_style=None, + canonical=None, + indent=None, + width=None, + allow_unicode=None, + line_break=None, + encoding=enc, + explicit_start=None, + explicit_end=None, + version=None, + tags=None, + block_seq_indent=None, + top_level_colon_align=None, + prefix_colon=None, +): + # type: (Any, Optional[StreamType], Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Any, Any, Any, Any, Any) -> Optional[str] # NOQA + """ + Serialize a sequence of Python objects into a YAML stream. + If stream is None, return the produced string instead. + """ + getvalue = None + if top_level_colon_align is True: + top_level_colon_align = max([len(str(x)) for x in documents[0]]) + if stream is None: + if encoding is None: + stream = StringIO() + else: + stream = BytesIO() + getvalue = stream.getvalue + dumper = Dumper( + stream, + default_style=default_style, + default_flow_style=default_flow_style, + canonical=canonical, + indent=indent, + width=width, + allow_unicode=allow_unicode, + line_break=line_break, + encoding=encoding, + explicit_start=explicit_start, + explicit_end=explicit_end, + version=version, + tags=tags, + block_seq_indent=block_seq_indent, + top_level_colon_align=top_level_colon_align, + prefix_colon=prefix_colon, + ) + try: + dumper._serializer.open() + for data in documents: + try: + dumper._representer.represent(data) + except AttributeError: + # nprint(dir(dumper._representer)) + raise + dumper._serializer.close() + finally: + try: + dumper._emitter.dispose() + except AttributeError: + raise + dumper.dispose() # cyaml + if getvalue is not None: + return getvalue() + return None + + +def dump( + data, + stream=None, + Dumper=Dumper, + default_style=None, + default_flow_style=None, + canonical=None, + indent=None, + width=None, + allow_unicode=None, + line_break=None, + encoding=enc, + explicit_start=None, + explicit_end=None, + version=None, + tags=None, + block_seq_indent=None, +): + # type: (Any, Optional[StreamType], Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Optional[VersionType], Any, Any) -> Optional[str] # NOQA + """ + Serialize a Python object into a YAML stream. + If stream is None, return the produced string instead. + + default_style ∈ None, '', '"', "'", '|', '>' + + """ + return dump_all( + [data], + stream, + Dumper=Dumper, + default_style=default_style, + default_flow_style=default_flow_style, + canonical=canonical, + indent=indent, + width=width, + allow_unicode=allow_unicode, + line_break=line_break, + encoding=encoding, + explicit_start=explicit_start, + explicit_end=explicit_end, + version=version, + tags=tags, + block_seq_indent=block_seq_indent, + ) + + +def safe_dump_all(documents, stream=None, **kwds): + # type: (Any, Optional[StreamType], Any) -> Optional[str] + """ + Serialize a sequence of Python objects into a YAML stream. + Produce only basic YAML tags. + If stream is None, return the produced string instead. + """ + return dump_all(documents, stream, Dumper=SafeDumper, **kwds) + + +def safe_dump(data, stream=None, **kwds): + # type: (Any, Optional[StreamType], Any) -> Optional[str] + """ + Serialize a Python object into a YAML stream. + Produce only basic YAML tags. + If stream is None, return the produced string instead. + """ + return dump_all([data], stream, Dumper=SafeDumper, **kwds) + + +def round_trip_dump( + data, + stream=None, + Dumper=RoundTripDumper, + default_style=None, + default_flow_style=None, + canonical=None, + indent=None, + width=None, + allow_unicode=None, + line_break=None, + encoding=enc, + explicit_start=None, + explicit_end=None, + version=None, + tags=None, + block_seq_indent=None, + top_level_colon_align=None, + prefix_colon=None, +): + # type: (Any, Optional[StreamType], Any, Any, Any, Optional[bool], Optional[int], Optional[int], Optional[bool], Any, Any, Optional[bool], Optional[bool], Optional[VersionType], Any, Any, Any, Any) -> Optional[str] # NOQA + allow_unicode = True if allow_unicode is None else allow_unicode + return dump_all( + [data], + stream, + Dumper=Dumper, + default_style=default_style, + default_flow_style=default_flow_style, + canonical=canonical, + indent=indent, + width=width, + allow_unicode=allow_unicode, + line_break=line_break, + encoding=encoding, + explicit_start=explicit_start, + explicit_end=explicit_end, + version=version, + tags=tags, + block_seq_indent=block_seq_indent, + top_level_colon_align=top_level_colon_align, + prefix_colon=prefix_colon, + ) + + +# Loader/Dumper are no longer composites, to get to the associated +# Resolver()/Representer(), etc., you need to instantiate the class + + +def add_implicit_resolver( + tag, regexp, first=None, Loader=None, Dumper=None, resolver=Resolver +): + # type: (Any, Any, Any, Any, Any, Any) -> None + """ + Add an implicit scalar detector. + If an implicit scalar value matches the given regexp, + the corresponding tag is assigned to the scalar. + first is a sequence of possible initial characters or None. + """ + if Loader is None and Dumper is None: + resolver.add_implicit_resolver(tag, regexp, first) + return + if Loader: + if hasattr(Loader, 'add_implicit_resolver'): + Loader.add_implicit_resolver(tag, regexp, first) + elif issubclass( + Loader, (BaseLoader, SafeLoader, ruamel.yaml.loader.Loader, RoundTripLoader) + ): + Resolver.add_implicit_resolver(tag, regexp, first) + else: + raise NotImplementedError + if Dumper: + if hasattr(Dumper, 'add_implicit_resolver'): + Dumper.add_implicit_resolver(tag, regexp, first) + elif issubclass( + Dumper, (BaseDumper, SafeDumper, ruamel.yaml.dumper.Dumper, RoundTripDumper) + ): + Resolver.add_implicit_resolver(tag, regexp, first) + else: + raise NotImplementedError + + +# this code currently not tested +def add_path_resolver(tag, path, kind=None, Loader=None, Dumper=None, resolver=Resolver): + # type: (Any, Any, Any, Any, Any, Any) -> None + """ + Add a path based resolver for the given tag. + A path is a list of keys that forms a path + to a node in the representation tree. + Keys can be string values, integers, or None. + """ + if Loader is None and Dumper is None: + resolver.add_path_resolver(tag, path, kind) + return + if Loader: + if hasattr(Loader, 'add_path_resolver'): + Loader.add_path_resolver(tag, path, kind) + elif issubclass( + Loader, (BaseLoader, SafeLoader, ruamel.yaml.loader.Loader, RoundTripLoader) + ): + Resolver.add_path_resolver(tag, path, kind) + else: + raise NotImplementedError + if Dumper: + if hasattr(Dumper, 'add_path_resolver'): + Dumper.add_path_resolver(tag, path, kind) + elif issubclass( + Dumper, (BaseDumper, SafeDumper, ruamel.yaml.dumper.Dumper, RoundTripDumper) + ): + Resolver.add_path_resolver(tag, path, kind) + else: + raise NotImplementedError + + +def add_constructor(tag, object_constructor, Loader=None, constructor=Constructor): + # type: (Any, Any, Any, Any) -> None + """ + Add an object constructor for the given tag. + object_onstructor is a function that accepts a Loader instance + and a node object and produces the corresponding Python object. + """ + if Loader is None: + constructor.add_constructor(tag, object_constructor) + else: + if hasattr(Loader, 'add_constructor'): + Loader.add_constructor(tag, object_constructor) + return + if issubclass(Loader, BaseLoader): + BaseConstructor.add_constructor(tag, object_constructor) + elif issubclass(Loader, SafeLoader): + SafeConstructor.add_constructor(tag, object_constructor) + elif issubclass(Loader, Loader): + Constructor.add_constructor(tag, object_constructor) + elif issubclass(Loader, RoundTripLoader): + RoundTripConstructor.add_constructor(tag, object_constructor) + else: + raise NotImplementedError + + +def add_multi_constructor(tag_prefix, multi_constructor, Loader=None, constructor=Constructor): + # type: (Any, Any, Any, Any) -> None + """ + Add a multi-constructor for the given tag prefix. + Multi-constructor is called for a node if its tag starts with tag_prefix. + Multi-constructor accepts a Loader instance, a tag suffix, + and a node object and produces the corresponding Python object. + """ + if Loader is None: + constructor.add_multi_constructor(tag_prefix, multi_constructor) + else: + if False and hasattr(Loader, 'add_multi_constructor'): + Loader.add_multi_constructor(tag_prefix, constructor) + return + if issubclass(Loader, BaseLoader): + BaseConstructor.add_multi_constructor(tag_prefix, multi_constructor) + elif issubclass(Loader, SafeLoader): + SafeConstructor.add_multi_constructor(tag_prefix, multi_constructor) + elif issubclass(Loader, ruamel.yaml.loader.Loader): + Constructor.add_multi_constructor(tag_prefix, multi_constructor) + elif issubclass(Loader, RoundTripLoader): + RoundTripConstructor.add_multi_constructor(tag_prefix, multi_constructor) + else: + raise NotImplementedError + + +def add_representer(data_type, object_representer, Dumper=None, representer=Representer): + # type: (Any, Any, Any, Any) -> None + """ + Add a representer for the given type. + object_representer is a function accepting a Dumper instance + and an instance of the given data type + and producing the corresponding representation node. + """ + if Dumper is None: + representer.add_representer(data_type, object_representer) + else: + if hasattr(Dumper, 'add_representer'): + Dumper.add_representer(data_type, object_representer) + return + if issubclass(Dumper, BaseDumper): + BaseRepresenter.add_representer(data_type, object_representer) + elif issubclass(Dumper, SafeDumper): + SafeRepresenter.add_representer(data_type, object_representer) + elif issubclass(Dumper, Dumper): + Representer.add_representer(data_type, object_representer) + elif issubclass(Dumper, RoundTripDumper): + RoundTripRepresenter.add_representer(data_type, object_representer) + else: + raise NotImplementedError + + +# this code currently not tested +def add_multi_representer(data_type, multi_representer, Dumper=None, representer=Representer): + # type: (Any, Any, Any, Any) -> None + """ + Add a representer for the given type. + multi_representer is a function accepting a Dumper instance + and an instance of the given data type or subtype + and producing the corresponding representation node. + """ + if Dumper is None: + representer.add_multi_representer(data_type, multi_representer) + else: + if hasattr(Dumper, 'add_multi_representer'): + Dumper.add_multi_representer(data_type, multi_representer) + return + if issubclass(Dumper, BaseDumper): + BaseRepresenter.add_multi_representer(data_type, multi_representer) + elif issubclass(Dumper, SafeDumper): + SafeRepresenter.add_multi_representer(data_type, multi_representer) + elif issubclass(Dumper, Dumper): + Representer.add_multi_representer(data_type, multi_representer) + elif issubclass(Dumper, RoundTripDumper): + RoundTripRepresenter.add_multi_representer(data_type, multi_representer) + else: + raise NotImplementedError + + +class YAMLObjectMetaclass(type): + """ + The metaclass for YAMLObject. + """ + + def __init__(cls, name, bases, kwds): + # type: (Any, Any, Any) -> None + super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds) + if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None: + cls.yaml_constructor.add_constructor(cls.yaml_tag, cls.from_yaml) # type: ignore + cls.yaml_representer.add_representer(cls, cls.to_yaml) # type: ignore + + +class YAMLObject(with_metaclass(YAMLObjectMetaclass)): # type: ignore + """ + An object that can dump itself to a YAML stream + and load itself from a YAML stream. + """ + + __slots__ = () # no direct instantiation, so allow immutable subclasses + + yaml_constructor = Constructor + yaml_representer = Representer + + yaml_tag = None # type: Any + yaml_flow_style = None # type: Any + + @classmethod + def from_yaml(cls, constructor, node): + # type: (Any, Any) -> Any + """ + Convert a representation node to a Python object. + """ + return constructor.construct_yaml_object(node, cls) + + @classmethod + def to_yaml(cls, representer, data): + # type: (Any, Any) -> Any + """ + Convert a Python object to a representation node. + """ + return representer.represent_yaml_object( + cls.yaml_tag, data, cls, flow_style=cls.yaml_flow_style + ) diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/nodes.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/nodes.py new file mode 100644 index 0000000000..da86e9c857 --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/nodes.py @@ -0,0 +1,131 @@ +# coding: utf-8 + +from __future__ import print_function + +import sys +from .compat import string_types + +if False: # MYPY + from typing import Dict, Any, Text # NOQA + + +class Node(object): + __slots__ = 'tag', 'value', 'start_mark', 'end_mark', 'comment', 'anchor' + + def __init__(self, tag, value, start_mark, end_mark, comment=None, anchor=None): + # type: (Any, Any, Any, Any, Any, Any) -> None + self.tag = tag + self.value = value + self.start_mark = start_mark + self.end_mark = end_mark + self.comment = comment + self.anchor = anchor + + def __repr__(self): + # type: () -> str + value = self.value + # if isinstance(value, list): + # if len(value) == 0: + # value = '' + # elif len(value) == 1: + # value = '<1 item>' + # else: + # value = '<%d items>' % len(value) + # else: + # if len(value) > 75: + # value = repr(value[:70]+u' ... ') + # else: + # value = repr(value) + value = repr(value) + return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value) + + def dump(self, indent=0): + # type: (int) -> None + if isinstance(self.value, string_types): + sys.stdout.write( + '{}{}(tag={!r}, value={!r})\n'.format( + ' ' * indent, self.__class__.__name__, self.tag, self.value + ) + ) + if self.comment: + sys.stdout.write(' {}comment: {})\n'.format(' ' * indent, self.comment)) + return + sys.stdout.write( + '{}{}(tag={!r})\n'.format(' ' * indent, self.__class__.__name__, self.tag) + ) + if self.comment: + sys.stdout.write(' {}comment: {})\n'.format(' ' * indent, self.comment)) + for v in self.value: + if isinstance(v, tuple): + for v1 in v: + v1.dump(indent + 1) + elif isinstance(v, Node): + v.dump(indent + 1) + else: + sys.stdout.write('Node value type? {}\n'.format(type(v))) + + +class ScalarNode(Node): + """ + styles: + ? -> set() ? key, no value + " -> double quoted + ' -> single quoted + | -> literal style + > -> folding style + """ + + __slots__ = ('style',) + id = 'scalar' + + def __init__( + self, tag, value, start_mark=None, end_mark=None, style=None, comment=None, anchor=None + ): + # type: (Any, Any, Any, Any, Any, Any, Any) -> None + Node.__init__(self, tag, value, start_mark, end_mark, comment=comment, anchor=anchor) + self.style = style + + +class CollectionNode(Node): + __slots__ = ('flow_style',) + + def __init__( + self, + tag, + value, + start_mark=None, + end_mark=None, + flow_style=None, + comment=None, + anchor=None, + ): + # type: (Any, Any, Any, Any, Any, Any, Any) -> None + Node.__init__(self, tag, value, start_mark, end_mark, comment=comment) + self.flow_style = flow_style + self.anchor = anchor + + +class SequenceNode(CollectionNode): + __slots__ = () + id = 'sequence' + + +class MappingNode(CollectionNode): + __slots__ = ('merge',) + id = 'mapping' + + def __init__( + self, + tag, + value, + start_mark=None, + end_mark=None, + flow_style=None, + comment=None, + anchor=None, + ): + # type: (Any, Any, Any, Any, Any, Any, Any) -> None + CollectionNode.__init__( + self, tag, value, start_mark, end_mark, flow_style, comment, anchor + ) + self.merge = None diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/parser.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/parser.py new file mode 100644 index 0000000000..49bfbc09dd --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/parser.py @@ -0,0 +1,802 @@ +# coding: utf-8 + +from __future__ import absolute_import + +# The following YAML grammar is LL(1) and is parsed by a recursive descent +# parser. +# +# stream ::= STREAM-START implicit_document? explicit_document* +# STREAM-END +# implicit_document ::= block_node DOCUMENT-END* +# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +# block_node_or_indentless_sequence ::= +# ALIAS +# | properties (block_content | +# indentless_block_sequence)? +# | block_content +# | indentless_block_sequence +# block_node ::= ALIAS +# | properties block_content? +# | block_content +# flow_node ::= ALIAS +# | properties flow_content? +# | flow_content +# properties ::= TAG ANCHOR? | ANCHOR TAG? +# block_content ::= block_collection | flow_collection | SCALAR +# flow_content ::= flow_collection | SCALAR +# block_collection ::= block_sequence | block_mapping +# flow_collection ::= flow_sequence | flow_mapping +# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* +# BLOCK-END +# indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +# block_mapping ::= BLOCK-MAPPING_START +# ((KEY block_node_or_indentless_sequence?)? +# (VALUE block_node_or_indentless_sequence?)?)* +# BLOCK-END +# flow_sequence ::= FLOW-SEQUENCE-START +# (flow_sequence_entry FLOW-ENTRY)* +# flow_sequence_entry? +# FLOW-SEQUENCE-END +# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +# flow_mapping ::= FLOW-MAPPING-START +# (flow_mapping_entry FLOW-ENTRY)* +# flow_mapping_entry? +# FLOW-MAPPING-END +# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +# +# FIRST sets: +# +# stream: { STREAM-START } +# explicit_document: { DIRECTIVE DOCUMENT-START } +# implicit_document: FIRST(block_node) +# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START +# BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START } +# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START } +# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START +# FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } +# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR } +# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START } +# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } +# block_sequence: { BLOCK-SEQUENCE-START } +# block_mapping: { BLOCK-MAPPING-START } +# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR +# BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START +# FLOW-MAPPING-START BLOCK-ENTRY } +# indentless_sequence: { ENTRY } +# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START } +# flow_sequence: { FLOW-SEQUENCE-START } +# flow_mapping: { FLOW-MAPPING-START } +# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START +# FLOW-MAPPING-START KEY } +# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START +# FLOW-MAPPING-START KEY } + +# need to have full path with import, as pkg_resources tries to load parser.py in __init__.py +# only to not do anything with the package afterwards +# and for Jython too + + +from ...ruamel.yaml.error import MarkedYAMLError +from ...ruamel.yaml.tokens import * # NOQA +from ...ruamel.yaml.events import * # NOQA +from ...ruamel.yaml.scanner import Scanner, RoundTripScanner, ScannerError # NOQA +from ...ruamel.yaml.compat import utf8, nprint, nprintf # NOQA + +if False: # MYPY + from typing import Any, Dict, Optional, List # NOQA + +__all__ = ['Parser', 'RoundTripParser', 'ParserError'] + + +class ParserError(MarkedYAMLError): + pass + + +class Parser(object): + # Since writing a recursive-descendant parser is a straightforward task, we + # do not give many comments here. + + DEFAULT_TAGS = {u'!': u'!', u'!!': u'tag:yaml.org,2002:'} + + def __init__(self, loader): + # type: (Any) -> None + self.loader = loader + if self.loader is not None and getattr(self.loader, '_parser', None) is None: + self.loader._parser = self + self.reset_parser() + + def reset_parser(self): + # type: () -> None + # Reset the state attributes (to clear self-references) + self.current_event = None + self.tag_handles = {} # type: Dict[Any, Any] + self.states = [] # type: List[Any] + self.marks = [] # type: List[Any] + self.state = self.parse_stream_start # type: Any + + def dispose(self): + # type: () -> None + self.reset_parser() + + @property + def scanner(self): + # type: () -> Any + if hasattr(self.loader, 'typ'): + return self.loader.scanner + return self.loader._scanner + + @property + def resolver(self): + # type: () -> Any + if hasattr(self.loader, 'typ'): + return self.loader.resolver + return self.loader._resolver + + def check_event(self, *choices): + # type: (Any) -> bool + # Check the type of the next event. + if self.current_event is None: + if self.state: + self.current_event = self.state() + if self.current_event is not None: + if not choices: + return True + for choice in choices: + if isinstance(self.current_event, choice): + return True + return False + + def peek_event(self): + # type: () -> Any + # Get the next event. + if self.current_event is None: + if self.state: + self.current_event = self.state() + return self.current_event + + def get_event(self): + # type: () -> Any + # Get the next event and proceed further. + if self.current_event is None: + if self.state: + self.current_event = self.state() + value = self.current_event + self.current_event = None + return value + + # stream ::= STREAM-START implicit_document? explicit_document* + # STREAM-END + # implicit_document ::= block_node DOCUMENT-END* + # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* + + def parse_stream_start(self): + # type: () -> Any + # Parse the stream start. + token = self.scanner.get_token() + token.move_comment(self.scanner.peek_token()) + event = StreamStartEvent(token.start_mark, token.end_mark, encoding=token.encoding) + + # Prepare the next state. + self.state = self.parse_implicit_document_start + + return event + + def parse_implicit_document_start(self): + # type: () -> Any + # Parse an implicit document. + if not self.scanner.check_token(DirectiveToken, DocumentStartToken, StreamEndToken): + self.tag_handles = self.DEFAULT_TAGS + token = self.scanner.peek_token() + start_mark = end_mark = token.start_mark + event = DocumentStartEvent(start_mark, end_mark, explicit=False) + + # Prepare the next state. + self.states.append(self.parse_document_end) + self.state = self.parse_block_node + + return event + + else: + return self.parse_document_start() + + def parse_document_start(self): + # type: () -> Any + # Parse any extra document end indicators. + while self.scanner.check_token(DocumentEndToken): + self.scanner.get_token() + # Parse an explicit document. + if not self.scanner.check_token(StreamEndToken): + token = self.scanner.peek_token() + start_mark = token.start_mark + version, tags = self.process_directives() + if not self.scanner.check_token(DocumentStartToken): + raise ParserError( + None, + None, + "expected '', but found %r" % self.scanner.peek_token().id, + self.scanner.peek_token().start_mark, + ) + token = self.scanner.get_token() + end_mark = token.end_mark + # if self.loader is not None and \ + # end_mark.line != self.scanner.peek_token().start_mark.line: + # self.loader.scalar_after_indicator = False + event = DocumentStartEvent( + start_mark, end_mark, explicit=True, version=version, tags=tags + ) # type: Any + self.states.append(self.parse_document_end) + self.state = self.parse_document_content + else: + # Parse the end of the stream. + token = self.scanner.get_token() + event = StreamEndEvent(token.start_mark, token.end_mark, comment=token.comment) + assert not self.states + assert not self.marks + self.state = None + return event + + def parse_document_end(self): + # type: () -> Any + # Parse the document end. + token = self.scanner.peek_token() + start_mark = end_mark = token.start_mark + explicit = False + if self.scanner.check_token(DocumentEndToken): + token = self.scanner.get_token() + end_mark = token.end_mark + explicit = True + event = DocumentEndEvent(start_mark, end_mark, explicit=explicit) + + # Prepare the next state. + if self.resolver.processing_version == (1, 1): + self.state = self.parse_document_start + else: + self.state = self.parse_implicit_document_start + + return event + + def parse_document_content(self): + # type: () -> Any + if self.scanner.check_token( + DirectiveToken, DocumentStartToken, DocumentEndToken, StreamEndToken + ): + event = self.process_empty_scalar(self.scanner.peek_token().start_mark) + self.state = self.states.pop() + return event + else: + return self.parse_block_node() + + def process_directives(self): + # type: () -> Any + yaml_version = None + self.tag_handles = {} + while self.scanner.check_token(DirectiveToken): + token = self.scanner.get_token() + if token.name == u'YAML': + if yaml_version is not None: + raise ParserError( + None, None, 'found duplicate YAML directive', token.start_mark + ) + major, minor = token.value + if major != 1: + raise ParserError( + None, + None, + 'found incompatible YAML document (version 1.* is ' 'required)', + token.start_mark, + ) + yaml_version = token.value + elif token.name == u'TAG': + handle, prefix = token.value + if handle in self.tag_handles: + raise ParserError( + None, None, 'duplicate tag handle %r' % utf8(handle), token.start_mark + ) + self.tag_handles[handle] = prefix + if bool(self.tag_handles): + value = yaml_version, self.tag_handles.copy() # type: Any + else: + value = yaml_version, None + if self.loader is not None and hasattr(self.loader, 'tags'): + self.loader.version = yaml_version + if self.loader.tags is None: + self.loader.tags = {} + for k in self.tag_handles: + self.loader.tags[k] = self.tag_handles[k] + for key in self.DEFAULT_TAGS: + if key not in self.tag_handles: + self.tag_handles[key] = self.DEFAULT_TAGS[key] + return value + + # block_node_or_indentless_sequence ::= ALIAS + # | properties (block_content | indentless_block_sequence)? + # | block_content + # | indentless_block_sequence + # block_node ::= ALIAS + # | properties block_content? + # | block_content + # flow_node ::= ALIAS + # | properties flow_content? + # | flow_content + # properties ::= TAG ANCHOR? | ANCHOR TAG? + # block_content ::= block_collection | flow_collection | SCALAR + # flow_content ::= flow_collection | SCALAR + # block_collection ::= block_sequence | block_mapping + # flow_collection ::= flow_sequence | flow_mapping + + def parse_block_node(self): + # type: () -> Any + return self.parse_node(block=True) + + def parse_flow_node(self): + # type: () -> Any + return self.parse_node() + + def parse_block_node_or_indentless_sequence(self): + # type: () -> Any + return self.parse_node(block=True, indentless_sequence=True) + + def transform_tag(self, handle, suffix): + # type: (Any, Any) -> Any + return self.tag_handles[handle] + suffix + + def parse_node(self, block=False, indentless_sequence=False): + # type: (bool, bool) -> Any + if self.scanner.check_token(AliasToken): + token = self.scanner.get_token() + event = AliasEvent(token.value, token.start_mark, token.end_mark) # type: Any + self.state = self.states.pop() + return event + + anchor = None + tag = None + start_mark = end_mark = tag_mark = None + if self.scanner.check_token(AnchorToken): + token = self.scanner.get_token() + start_mark = token.start_mark + end_mark = token.end_mark + anchor = token.value + if self.scanner.check_token(TagToken): + token = self.scanner.get_token() + tag_mark = token.start_mark + end_mark = token.end_mark + tag = token.value + elif self.scanner.check_token(TagToken): + token = self.scanner.get_token() + start_mark = tag_mark = token.start_mark + end_mark = token.end_mark + tag = token.value + if self.scanner.check_token(AnchorToken): + token = self.scanner.get_token() + start_mark = tag_mark = token.start_mark + end_mark = token.end_mark + anchor = token.value + if tag is not None: + handle, suffix = tag + if handle is not None: + if handle not in self.tag_handles: + raise ParserError( + 'while parsing a node', + start_mark, + 'found undefined tag handle %r' % utf8(handle), + tag_mark, + ) + tag = self.transform_tag(handle, suffix) + else: + tag = suffix + # if tag == u'!': + # raise ParserError("while parsing a node", start_mark, + # "found non-specific tag '!'", tag_mark, + # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' + # and share your opinion.") + if start_mark is None: + start_mark = end_mark = self.scanner.peek_token().start_mark + event = None + implicit = tag is None or tag == u'!' + if indentless_sequence and self.scanner.check_token(BlockEntryToken): + comment = None + pt = self.scanner.peek_token() + if pt.comment and pt.comment[0]: + comment = [pt.comment[0], []] + pt.comment[0] = None + end_mark = self.scanner.peek_token().end_mark + event = SequenceStartEvent( + anchor, tag, implicit, start_mark, end_mark, flow_style=False, comment=comment + ) + self.state = self.parse_indentless_sequence_entry + return event + + if self.scanner.check_token(ScalarToken): + token = self.scanner.get_token() + # self.scanner.peek_token_same_line_comment(token) + end_mark = token.end_mark + if (token.plain and tag is None) or tag == u'!': + implicit = (True, False) + elif tag is None: + implicit = (False, True) + else: + implicit = (False, False) + # nprint('se', token.value, token.comment) + event = ScalarEvent( + anchor, + tag, + implicit, + token.value, + start_mark, + end_mark, + style=token.style, + comment=token.comment, + ) + self.state = self.states.pop() + elif self.scanner.check_token(FlowSequenceStartToken): + pt = self.scanner.peek_token() + end_mark = pt.end_mark + event = SequenceStartEvent( + anchor, + tag, + implicit, + start_mark, + end_mark, + flow_style=True, + comment=pt.comment, + ) + self.state = self.parse_flow_sequence_first_entry + elif self.scanner.check_token(FlowMappingStartToken): + pt = self.scanner.peek_token() + end_mark = pt.end_mark + event = MappingStartEvent( + anchor, + tag, + implicit, + start_mark, + end_mark, + flow_style=True, + comment=pt.comment, + ) + self.state = self.parse_flow_mapping_first_key + elif block and self.scanner.check_token(BlockSequenceStartToken): + end_mark = self.scanner.peek_token().start_mark + # should inserting the comment be dependent on the + # indentation? + pt = self.scanner.peek_token() + comment = pt.comment + # nprint('pt0', type(pt)) + if comment is None or comment[1] is None: + comment = pt.split_comment() + # nprint('pt1', comment) + event = SequenceStartEvent( + anchor, tag, implicit, start_mark, end_mark, flow_style=False, comment=comment + ) + self.state = self.parse_block_sequence_first_entry + elif block and self.scanner.check_token(BlockMappingStartToken): + end_mark = self.scanner.peek_token().start_mark + comment = self.scanner.peek_token().comment + event = MappingStartEvent( + anchor, tag, implicit, start_mark, end_mark, flow_style=False, comment=comment + ) + self.state = self.parse_block_mapping_first_key + elif anchor is not None or tag is not None: + # Empty scalars are allowed even if a tag or an anchor is + # specified. + event = ScalarEvent(anchor, tag, (implicit, False), "", start_mark, end_mark) + self.state = self.states.pop() + else: + if block: + node = 'block' + else: + node = 'flow' + token = self.scanner.peek_token() + raise ParserError( + 'while parsing a %s node' % node, + start_mark, + 'expected the node content, but found %r' % token.id, + token.start_mark, + ) + return event + + # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* + # BLOCK-END + + def parse_block_sequence_first_entry(self): + # type: () -> Any + token = self.scanner.get_token() + # move any comment from start token + # token.move_comment(self.scanner.peek_token()) + self.marks.append(token.start_mark) + return self.parse_block_sequence_entry() + + def parse_block_sequence_entry(self): + # type: () -> Any + if self.scanner.check_token(BlockEntryToken): + token = self.scanner.get_token() + token.move_comment(self.scanner.peek_token()) + if not self.scanner.check_token(BlockEntryToken, BlockEndToken): + self.states.append(self.parse_block_sequence_entry) + return self.parse_block_node() + else: + self.state = self.parse_block_sequence_entry + return self.process_empty_scalar(token.end_mark) + if not self.scanner.check_token(BlockEndToken): + token = self.scanner.peek_token() + raise ParserError( + 'while parsing a block collection', + self.marks[-1], + 'expected , but found %r' % token.id, + token.start_mark, + ) + token = self.scanner.get_token() # BlockEndToken + event = SequenceEndEvent(token.start_mark, token.end_mark, comment=token.comment) + self.state = self.states.pop() + self.marks.pop() + return event + + # indentless_sequence ::= (BLOCK-ENTRY block_node?)+ + + # indentless_sequence? + # sequence: + # - entry + # - nested + + def parse_indentless_sequence_entry(self): + # type: () -> Any + if self.scanner.check_token(BlockEntryToken): + token = self.scanner.get_token() + token.move_comment(self.scanner.peek_token()) + if not self.scanner.check_token( + BlockEntryToken, KeyToken, ValueToken, BlockEndToken + ): + self.states.append(self.parse_indentless_sequence_entry) + return self.parse_block_node() + else: + self.state = self.parse_indentless_sequence_entry + return self.process_empty_scalar(token.end_mark) + token = self.scanner.peek_token() + event = SequenceEndEvent(token.start_mark, token.start_mark, comment=token.comment) + self.state = self.states.pop() + return event + + # block_mapping ::= BLOCK-MAPPING_START + # ((KEY block_node_or_indentless_sequence?)? + # (VALUE block_node_or_indentless_sequence?)?)* + # BLOCK-END + + def parse_block_mapping_first_key(self): + # type: () -> Any + token = self.scanner.get_token() + self.marks.append(token.start_mark) + return self.parse_block_mapping_key() + + def parse_block_mapping_key(self): + # type: () -> Any + if self.scanner.check_token(KeyToken): + token = self.scanner.get_token() + token.move_comment(self.scanner.peek_token()) + if not self.scanner.check_token(KeyToken, ValueToken, BlockEndToken): + self.states.append(self.parse_block_mapping_value) + return self.parse_block_node_or_indentless_sequence() + else: + self.state = self.parse_block_mapping_value + return self.process_empty_scalar(token.end_mark) + if self.resolver.processing_version > (1, 1) and self.scanner.check_token(ValueToken): + self.state = self.parse_block_mapping_value + return self.process_empty_scalar(self.scanner.peek_token().start_mark) + if not self.scanner.check_token(BlockEndToken): + token = self.scanner.peek_token() + raise ParserError( + 'while parsing a block mapping', + self.marks[-1], + 'expected , but found %r' % token.id, + token.start_mark, + ) + token = self.scanner.get_token() + token.move_comment(self.scanner.peek_token()) + event = MappingEndEvent(token.start_mark, token.end_mark, comment=token.comment) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_block_mapping_value(self): + # type: () -> Any + if self.scanner.check_token(ValueToken): + token = self.scanner.get_token() + # value token might have post comment move it to e.g. block + if self.scanner.check_token(ValueToken): + token.move_comment(self.scanner.peek_token()) + else: + if not self.scanner.check_token(KeyToken): + token.move_comment(self.scanner.peek_token(), empty=True) + # else: empty value for this key cannot move token.comment + if not self.scanner.check_token(KeyToken, ValueToken, BlockEndToken): + self.states.append(self.parse_block_mapping_key) + return self.parse_block_node_or_indentless_sequence() + else: + self.state = self.parse_block_mapping_key + comment = token.comment + if comment is None: + token = self.scanner.peek_token() + comment = token.comment + if comment: + token._comment = [None, comment[1]] + comment = [comment[0], None] + return self.process_empty_scalar(token.end_mark, comment=comment) + else: + self.state = self.parse_block_mapping_key + token = self.scanner.peek_token() + return self.process_empty_scalar(token.start_mark) + + # flow_sequence ::= FLOW-SEQUENCE-START + # (flow_sequence_entry FLOW-ENTRY)* + # flow_sequence_entry? + # FLOW-SEQUENCE-END + # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + # + # Note that while production rules for both flow_sequence_entry and + # flow_mapping_entry are equal, their interpretations are different. + # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?` + # generate an inline mapping (set syntax). + + def parse_flow_sequence_first_entry(self): + # type: () -> Any + token = self.scanner.get_token() + self.marks.append(token.start_mark) + return self.parse_flow_sequence_entry(first=True) + + def parse_flow_sequence_entry(self, first=False): + # type: (bool) -> Any + if not self.scanner.check_token(FlowSequenceEndToken): + if not first: + if self.scanner.check_token(FlowEntryToken): + self.scanner.get_token() + else: + token = self.scanner.peek_token() + raise ParserError( + 'while parsing a flow sequence', + self.marks[-1], + "expected ',' or ']', but got %r" % token.id, + token.start_mark, + ) + + if self.scanner.check_token(KeyToken): + token = self.scanner.peek_token() + event = MappingStartEvent( + None, None, True, token.start_mark, token.end_mark, flow_style=True + ) # type: Any + self.state = self.parse_flow_sequence_entry_mapping_key + return event + elif not self.scanner.check_token(FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry) + return self.parse_flow_node() + token = self.scanner.get_token() + event = SequenceEndEvent(token.start_mark, token.end_mark, comment=token.comment) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_flow_sequence_entry_mapping_key(self): + # type: () -> Any + token = self.scanner.get_token() + if not self.scanner.check_token(ValueToken, FlowEntryToken, FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry_mapping_value) + return self.parse_flow_node() + else: + self.state = self.parse_flow_sequence_entry_mapping_value + return self.process_empty_scalar(token.end_mark) + + def parse_flow_sequence_entry_mapping_value(self): + # type: () -> Any + if self.scanner.check_token(ValueToken): + token = self.scanner.get_token() + if not self.scanner.check_token(FlowEntryToken, FlowSequenceEndToken): + self.states.append(self.parse_flow_sequence_entry_mapping_end) + return self.parse_flow_node() + else: + self.state = self.parse_flow_sequence_entry_mapping_end + return self.process_empty_scalar(token.end_mark) + else: + self.state = self.parse_flow_sequence_entry_mapping_end + token = self.scanner.peek_token() + return self.process_empty_scalar(token.start_mark) + + def parse_flow_sequence_entry_mapping_end(self): + # type: () -> Any + self.state = self.parse_flow_sequence_entry + token = self.scanner.peek_token() + return MappingEndEvent(token.start_mark, token.start_mark) + + # flow_mapping ::= FLOW-MAPPING-START + # (flow_mapping_entry FLOW-ENTRY)* + # flow_mapping_entry? + # FLOW-MAPPING-END + # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + + def parse_flow_mapping_first_key(self): + # type: () -> Any + token = self.scanner.get_token() + self.marks.append(token.start_mark) + return self.parse_flow_mapping_key(first=True) + + def parse_flow_mapping_key(self, first=False): + # type: (Any) -> Any + if not self.scanner.check_token(FlowMappingEndToken): + if not first: + if self.scanner.check_token(FlowEntryToken): + self.scanner.get_token() + else: + token = self.scanner.peek_token() + raise ParserError( + 'while parsing a flow mapping', + self.marks[-1], + "expected ',' or '}', but got %r" % token.id, + token.start_mark, + ) + if self.scanner.check_token(KeyToken): + token = self.scanner.get_token() + if not self.scanner.check_token( + ValueToken, FlowEntryToken, FlowMappingEndToken + ): + self.states.append(self.parse_flow_mapping_value) + return self.parse_flow_node() + else: + self.state = self.parse_flow_mapping_value + return self.process_empty_scalar(token.end_mark) + elif self.resolver.processing_version > (1, 1) and self.scanner.check_token( + ValueToken + ): + self.state = self.parse_flow_mapping_value + return self.process_empty_scalar(self.scanner.peek_token().end_mark) + elif not self.scanner.check_token(FlowMappingEndToken): + self.states.append(self.parse_flow_mapping_empty_value) + return self.parse_flow_node() + token = self.scanner.get_token() + event = MappingEndEvent(token.start_mark, token.end_mark, comment=token.comment) + self.state = self.states.pop() + self.marks.pop() + return event + + def parse_flow_mapping_value(self): + # type: () -> Any + if self.scanner.check_token(ValueToken): + token = self.scanner.get_token() + if not self.scanner.check_token(FlowEntryToken, FlowMappingEndToken): + self.states.append(self.parse_flow_mapping_key) + return self.parse_flow_node() + else: + self.state = self.parse_flow_mapping_key + return self.process_empty_scalar(token.end_mark) + else: + self.state = self.parse_flow_mapping_key + token = self.scanner.peek_token() + return self.process_empty_scalar(token.start_mark) + + def parse_flow_mapping_empty_value(self): + # type: () -> Any + self.state = self.parse_flow_mapping_key + return self.process_empty_scalar(self.scanner.peek_token().start_mark) + + def process_empty_scalar(self, mark, comment=None): + # type: (Any, Any) -> Any + return ScalarEvent(None, None, (True, False), "", mark, mark, comment=comment) + + +class RoundTripParser(Parser): + """roundtrip is a safe loader, that wants to see the unmangled tag""" + + def transform_tag(self, handle, suffix): + # type: (Any, Any) -> Any + # return self.tag_handles[handle]+suffix + if handle == '!!' and suffix in ( + u'null', + u'bool', + u'int', + u'float', + u'binary', + u'timestamp', + u'omap', + u'pairs', + u'set', + u'str', + u'seq', + u'map', + ): + return Parser.transform_tag(self, handle, suffix) + return handle + suffix diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/py.typed b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/py.typed new file mode 100644 index 0000000000..e69de29bb2 diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/reader.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/reader.py new file mode 100644 index 0000000000..62c7c475b3 --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/reader.py @@ -0,0 +1,311 @@ +# coding: utf-8 + +from __future__ import absolute_import + +# This module contains abstractions for the input stream. You don't have to +# looks further, there are no pretty code. +# +# We define two classes here. +# +# Mark(source, line, column) +# It's just a record and its only use is producing nice error messages. +# Parser does not use it for any other purposes. +# +# Reader(source, data) +# Reader determines the encoding of `data` and converts it to unicode. +# Reader provides the following methods and attributes: +# reader.peek(length=1) - return the next `length` characters +# reader.forward(length=1) - move the current position to `length` +# characters. +# reader.index - the number of the current character. +# reader.line, stream.column - the line and the column of the current +# character. + +import codecs + +from ...ruamel.yaml.error import YAMLError, FileMark, StringMark, YAMLStreamError +from ...ruamel.yaml.compat import text_type, binary_type, PY3, UNICODE_SIZE +from ...ruamel.yaml.util import RegExp + +if False: # MYPY + from typing import Any, Dict, Optional, List, Union, Text, Tuple, Optional # NOQA +# from ruamel.yaml.compat import StreamTextType # NOQA + +__all__ = ['Reader', 'ReaderError'] + + +class ReaderError(YAMLError): + def __init__(self, name, position, character, encoding, reason): + # type: (Any, Any, Any, Any, Any) -> None + self.name = name + self.character = character + self.position = position + self.encoding = encoding + self.reason = reason + + def __str__(self): + # type: () -> str + if isinstance(self.character, binary_type): + return "'%s' codec can't decode byte #x%02x: %s\n" ' in "%s", position %d' % ( + self.encoding, + ord(self.character), + self.reason, + self.name, + self.position, + ) + else: + return 'unacceptable character #x%04x: %s\n' ' in "%s", position %d' % ( + self.character, + self.reason, + self.name, + self.position, + ) + + +class Reader(object): + # Reader: + # - determines the data encoding and converts it to a unicode string, + # - checks if characters are in allowed range, + # - adds '\0' to the end. + + # Reader accepts + # - a `str` object (PY2) / a `bytes` object (PY3), + # - a `unicode` object (PY2) / a `str` object (PY3), + # - a file-like object with its `read` method returning `str`, + # - a file-like object with its `read` method returning `unicode`. + + # Yeah, it's ugly and slow. + + def __init__(self, stream, loader=None): + # type: (Any, Any) -> None + self.loader = loader + if self.loader is not None and getattr(self.loader, '_reader', None) is None: + self.loader._reader = self + self.reset_reader() + self.stream = stream # type: Any # as .read is called + + def reset_reader(self): + # type: () -> None + self.name = None # type: Any + self.stream_pointer = 0 + self.eof = True + self.buffer = "" + self.pointer = 0 + self.raw_buffer = None # type: Any + self.raw_decode = None + self.encoding = None # type: Optional[Text] + self.index = 0 + self.line = 0 + self.column = 0 + + @property + def stream(self): + # type: () -> Any + try: + return self._stream + except AttributeError: + raise YAMLStreamError('input stream needs to specified') + + @stream.setter + def stream(self, val): + # type: (Any) -> None + if val is None: + return + self._stream = None + if isinstance(val, text_type): + self.name = '' + self.check_printable(val) + self.buffer = val + u'\0' # type: ignore + elif isinstance(val, binary_type): + self.name = '' + self.raw_buffer = val + self.determine_encoding() + else: + if not hasattr(val, 'read'): + raise YAMLStreamError('stream argument needs to have a read() method') + self._stream = val + self.name = getattr(self.stream, 'name', '') + self.eof = False + self.raw_buffer = None + self.determine_encoding() + + def peek(self, index=0): + # type: (int) -> Text + try: + return self.buffer[self.pointer + index] + except IndexError: + self.update(index + 1) + return self.buffer[self.pointer + index] + + def prefix(self, length=1): + # type: (int) -> Any + if self.pointer + length >= len(self.buffer): + self.update(length) + return self.buffer[self.pointer : self.pointer + length] + + def forward_1_1(self, length=1): + # type: (int) -> None + if self.pointer + length + 1 >= len(self.buffer): + self.update(length + 1) + while length != 0: + ch = self.buffer[self.pointer] + self.pointer += 1 + self.index += 1 + if ch in u'\n\x85\u2028\u2029' or ( + ch == u'\r' and self.buffer[self.pointer] != u'\n' + ): + self.line += 1 + self.column = 0 + elif ch != u'\uFEFF': + self.column += 1 + length -= 1 + + def forward(self, length=1): + # type: (int) -> None + if self.pointer + length + 1 >= len(self.buffer): + self.update(length + 1) + while length != 0: + ch = self.buffer[self.pointer] + self.pointer += 1 + self.index += 1 + if ch == u'\n' or (ch == u'\r' and self.buffer[self.pointer] != u'\n'): + self.line += 1 + self.column = 0 + elif ch != u'\uFEFF': + self.column += 1 + length -= 1 + + def get_mark(self): + # type: () -> Any + if self.stream is None: + return StringMark( + self.name, self.index, self.line, self.column, self.buffer, self.pointer + ) + else: + return FileMark(self.name, self.index, self.line, self.column) + + def determine_encoding(self): + # type: () -> None + while not self.eof and (self.raw_buffer is None or len(self.raw_buffer) < 2): + self.update_raw() + if isinstance(self.raw_buffer, binary_type): + if self.raw_buffer.startswith(codecs.BOM_UTF16_LE): + self.raw_decode = codecs.utf_16_le_decode # type: ignore + self.encoding = 'utf-16-le' + elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE): + self.raw_decode = codecs.utf_16_be_decode # type: ignore + self.encoding = 'utf-16-be' + else: + self.raw_decode = codecs.utf_8_decode # type: ignore + self.encoding = 'utf-8' + self.update(1) + + if UNICODE_SIZE == 2: + NON_PRINTABLE = RegExp( + u'[^\x09\x0A\x0D\x20-\x7E\x85' u'\xA0-\uD7FF' u'\uE000-\uFFFD' u']' + ) + else: + NON_PRINTABLE = RegExp( + u'[^\x09\x0A\x0D\x20-\x7E\x85' + u'\xA0-\uD7FF' + u'\uE000-\uFFFD' + u'\U00010000-\U0010FFFF' + u']' + ) + + _printable_ascii = ('\x09\x0A\x0D' + "".join(map(chr, range(0x20, 0x7F)))).encode('ascii') + + @classmethod + def _get_non_printable_ascii(cls, data): # type: ignore + # type: (Text, bytes) -> Optional[Tuple[int, Text]] + ascii_bytes = data.encode('ascii') + non_printables = ascii_bytes.translate(None, cls._printable_ascii) # type: ignore + if not non_printables: + return None + non_printable = non_printables[:1] + return ascii_bytes.index(non_printable), non_printable.decode('ascii') + + @classmethod + def _get_non_printable_regex(cls, data): + # type: (Text) -> Optional[Tuple[int, Text]] + match = cls.NON_PRINTABLE.search(data) + if not bool(match): + return None + return match.start(), match.group() + + @classmethod + def _get_non_printable(cls, data): + # type: (Text) -> Optional[Tuple[int, Text]] + try: + return cls._get_non_printable_ascii(data) # type: ignore + except UnicodeEncodeError: + return cls._get_non_printable_regex(data) + + def check_printable(self, data): + # type: (Any) -> None + non_printable_match = self._get_non_printable(data) + if non_printable_match is not None: + start, character = non_printable_match + position = self.index + (len(self.buffer) - self.pointer) + start + raise ReaderError( + self.name, + position, + ord(character), + 'unicode', + 'special characters are not allowed', + ) + + def update(self, length): + # type: (int) -> None + if self.raw_buffer is None: + return + self.buffer = self.buffer[self.pointer :] + self.pointer = 0 + while len(self.buffer) < length: + if not self.eof: + self.update_raw() + if self.raw_decode is not None: + try: + data, converted = self.raw_decode(self.raw_buffer, 'strict', self.eof) + except UnicodeDecodeError as exc: + if PY3: + character = self.raw_buffer[exc.start] + else: + character = exc.object[exc.start] + if self.stream is not None: + position = self.stream_pointer - len(self.raw_buffer) + exc.start + elif self.stream is not None: + position = self.stream_pointer - len(self.raw_buffer) + exc.start + else: + position = exc.start + raise ReaderError(self.name, position, character, exc.encoding, exc.reason) + else: + data = self.raw_buffer + converted = len(data) + self.check_printable(data) + self.buffer += data + self.raw_buffer = self.raw_buffer[converted:] + if self.eof: + self.buffer += '\0' + self.raw_buffer = None + break + + def update_raw(self, size=None): + # type: (Optional[int]) -> None + if size is None: + size = 4096 if PY3 else 1024 + data = self.stream.read(size) + if self.raw_buffer is None: + self.raw_buffer = data + else: + self.raw_buffer += data + self.stream_pointer += len(data) + if not data: + self.eof = True + + +# try: +# import psyco +# psyco.bind(Reader) +# except ImportError: +# pass diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/representer.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/representer.py new file mode 100644 index 0000000000..880f4f74ec --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/representer.py @@ -0,0 +1,1282 @@ +# coding: utf-8 + +from __future__ import print_function, absolute_import, division + + +from ...ruamel.yaml.error import * # NOQA +from ...ruamel.yaml.nodes import * # NOQA +from ...ruamel.yaml.compat import text_type, binary_type, to_unicode, PY2, PY3 +from ...ruamel.yaml.compat import ordereddict # type: ignore +from ...ruamel.yaml.compat import nprint, nprintf # NOQA +from ...ruamel.yaml.scalarstring import ( + LiteralScalarString, + FoldedScalarString, + SingleQuotedScalarString, + DoubleQuotedScalarString, + PlainScalarString, +) +from ...ruamel.yaml.comments import ( + CommentedMap, + CommentedOrderedMap, + CommentedSeq, + CommentedKeySeq, + CommentedKeyMap, + CommentedSet, + comment_attrib, + merge_attrib, + TaggedScalar, +) +from ...ruamel.yaml.scalarint import ScalarInt, BinaryInt, OctalInt, HexInt, HexCapsInt +from ...ruamel.yaml.scalarfloat import ScalarFloat +from ...ruamel.yaml.scalarbool import ScalarBoolean +from ...ruamel.yaml.timestamp import TimeStamp + +import datetime +import sys +import types + +if PY3: + import copyreg + import base64 +else: + import copy_reg as copyreg # type: ignore + +if False: # MYPY + from typing import Dict, List, Any, Union, Text, Optional # NOQA + +# fmt: off +__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer', + 'RepresenterError', 'RoundTripRepresenter'] +# fmt: on + + +class RepresenterError(YAMLError): + pass + + +if PY2: + + def get_classobj_bases(cls): + # type: (Any) -> Any + bases = [cls] + for base in cls.__bases__: + bases.extend(get_classobj_bases(base)) + return bases + + +class BaseRepresenter(object): + + yaml_representers = {} # type: Dict[Any, Any] + yaml_multi_representers = {} # type: Dict[Any, Any] + + def __init__(self, default_style=None, default_flow_style=None, dumper=None): + # type: (Any, Any, Any, Any) -> None + self.dumper = dumper + if self.dumper is not None: + self.dumper._representer = self + self.default_style = default_style + self.default_flow_style = default_flow_style + self.represented_objects = {} # type: Dict[Any, Any] + self.object_keeper = [] # type: List[Any] + self.alias_key = None # type: Optional[int] + self.sort_base_mapping_type_on_output = True + + @property + def serializer(self): + # type: () -> Any + try: + if hasattr(self.dumper, 'typ'): + return self.dumper.serializer + return self.dumper._serializer + except AttributeError: + return self # cyaml + + def represent(self, data): + # type: (Any) -> None + node = self.represent_data(data) + self.serializer.serialize(node) + self.represented_objects = {} + self.object_keeper = [] + self.alias_key = None + + def represent_data(self, data): + # type: (Any) -> Any + if self.ignore_aliases(data): + self.alias_key = None + else: + self.alias_key = id(data) + if self.alias_key is not None: + if self.alias_key in self.represented_objects: + node = self.represented_objects[self.alias_key] + # if node is None: + # raise RepresenterError( + # "recursive objects are not allowed: %r" % data) + return node + # self.represented_objects[alias_key] = None + self.object_keeper.append(data) + data_types = type(data).__mro__ + if PY2: + # if type(data) is types.InstanceType: + if isinstance(data, types.InstanceType): + data_types = get_classobj_bases(data.__class__) + list(data_types) + if data_types[0] in self.yaml_representers: + node = self.yaml_representers[data_types[0]](self, data) + else: + for data_type in data_types: + if data_type in self.yaml_multi_representers: + node = self.yaml_multi_representers[data_type](self, data) + break + else: + if None in self.yaml_multi_representers: + node = self.yaml_multi_representers[None](self, data) + elif None in self.yaml_representers: + node = self.yaml_representers[None](self, data) + else: + node = ScalarNode(None, text_type(data)) + # if alias_key is not None: + # self.represented_objects[alias_key] = node + return node + + def represent_key(self, data): + # type: (Any) -> Any + """ + David Fraser: Extract a method to represent keys in mappings, so that + a subclass can choose not to quote them (for example) + used in represent_mapping + https://bitbucket.org/davidfraser/pyyaml/commits/d81df6eb95f20cac4a79eed95ae553b5c6f77b8c + """ + return self.represent_data(data) + + @classmethod + def add_representer(cls, data_type, representer): + # type: (Any, Any) -> None + if 'yaml_representers' not in cls.__dict__: + cls.yaml_representers = cls.yaml_representers.copy() + cls.yaml_representers[data_type] = representer + + @classmethod + def add_multi_representer(cls, data_type, representer): + # type: (Any, Any) -> None + if 'yaml_multi_representers' not in cls.__dict__: + cls.yaml_multi_representers = cls.yaml_multi_representers.copy() + cls.yaml_multi_representers[data_type] = representer + + def represent_scalar(self, tag, value, style=None, anchor=None): + # type: (Any, Any, Any, Any) -> Any + if style is None: + style = self.default_style + comment = None + if style and style[0] in '|>': + comment = getattr(value, 'comment', None) + if comment: + comment = [None, [comment]] + node = ScalarNode(tag, value, style=style, comment=comment, anchor=anchor) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + return node + + def represent_sequence(self, tag, sequence, flow_style=None): + # type: (Any, Any, Any) -> Any + value = [] # type: List[Any] + node = SequenceNode(tag, value, flow_style=flow_style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + for item in sequence: + node_item = self.represent_data(item) + if not (isinstance(node_item, ScalarNode) and not node_item.style): + best_style = False + value.append(node_item) + if flow_style is None: + if self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + return node + + def represent_omap(self, tag, omap, flow_style=None): + # type: (Any, Any, Any) -> Any + value = [] # type: List[Any] + node = SequenceNode(tag, value, flow_style=flow_style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + for item_key in omap: + item_val = omap[item_key] + node_item = self.represent_data({item_key: item_val}) + # if not (isinstance(node_item, ScalarNode) \ + # and not node_item.style): + # best_style = False + value.append(node_item) + if flow_style is None: + if self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + return node + + def represent_mapping(self, tag, mapping, flow_style=None): + # type: (Any, Any, Any) -> Any + value = [] # type: List[Any] + node = MappingNode(tag, value, flow_style=flow_style) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + if hasattr(mapping, 'items'): + mapping = list(mapping.items()) + if self.sort_base_mapping_type_on_output: + try: + mapping = sorted(mapping) + except TypeError: + pass + for item_key, item_value in mapping: + node_key = self.represent_key(item_key) + node_value = self.represent_data(item_value) + if not (isinstance(node_key, ScalarNode) and not node_key.style): + best_style = False + if not (isinstance(node_value, ScalarNode) and not node_value.style): + best_style = False + value.append((node_key, node_value)) + if flow_style is None: + if self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + return node + + def ignore_aliases(self, data): + # type: (Any) -> bool + return False + + +class SafeRepresenter(BaseRepresenter): + def ignore_aliases(self, data): + # type: (Any) -> bool + # https://docs.python.org/3/reference/expressions.html#parenthesized-forms : + # "i.e. two occurrences of the empty tuple may or may not yield the same object" + # so "data is ()" should not be used + if data is None or (isinstance(data, tuple) and data == ()): + return True + if isinstance(data, (binary_type, text_type, bool, int, float)): + return True + return False + + def represent_none(self, data): + # type: (Any) -> Any + return self.represent_scalar(u'tag:yaml.org,2002:null', u'null') + + if PY3: + + def represent_str(self, data): + # type: (Any) -> Any + return self.represent_scalar(u'tag:yaml.org,2002:str', data) + + def represent_binary(self, data): + # type: (Any) -> Any + if hasattr(base64, 'encodebytes'): + data = base64.encodebytes(data).decode('ascii') + else: + data = base64.encodestring(data).decode('ascii') + return self.represent_scalar(u'tag:yaml.org,2002:binary', data, style='|') + + else: + + def represent_str(self, data): + # type: (Any) -> Any + tag = None + style = None + try: + data = unicode(data, 'ascii') + tag = u'tag:yaml.org,2002:str' + except UnicodeDecodeError: + try: + data = unicode(data, 'utf-8') + tag = u'tag:yaml.org,2002:str' + except UnicodeDecodeError: + data = data.encode('base64') + tag = u'tag:yaml.org,2002:binary' + style = '|' + return self.represent_scalar(tag, data, style=style) + + def represent_unicode(self, data): + # type: (Any) -> Any + return self.represent_scalar(u'tag:yaml.org,2002:str', data) + + def represent_bool(self, data, anchor=None): + # type: (Any, Optional[Any]) -> Any + try: + value = self.dumper.boolean_representation[bool(data)] + except AttributeError: + if data: + value = u'true' + else: + value = u'false' + return self.represent_scalar(u'tag:yaml.org,2002:bool', value, anchor=anchor) + + def represent_int(self, data): + # type: (Any) -> Any + return self.represent_scalar(u'tag:yaml.org,2002:int', text_type(data)) + + if PY2: + + def represent_long(self, data): + # type: (Any) -> Any + return self.represent_scalar(u'tag:yaml.org,2002:int', text_type(data)) + + inf_value = 1e300 + while repr(inf_value) != repr(inf_value * inf_value): + inf_value *= inf_value + + def represent_float(self, data): + # type: (Any) -> Any + if data != data or (data == 0.0 and data == 1.0): + value = u'.nan' + elif data == self.inf_value: + value = u'.inf' + elif data == -self.inf_value: + value = u'-.inf' + else: + value = to_unicode(repr(data)).lower() + if getattr(self.serializer, 'use_version', None) == (1, 1): + if u'.' not in value and u'e' in value: + # Note that in some cases `repr(data)` represents a float number + # without the decimal parts. For instance: + # >>> repr(1e17) + # '1e17' + # Unfortunately, this is not a valid float representation according + # to the definition of the `!!float` tag in YAML 1.1. We fix + # this by adding '.0' before the 'e' symbol. + value = value.replace(u'e', u'.0e', 1) + return self.represent_scalar(u'tag:yaml.org,2002:float', value) + + def represent_list(self, data): + # type: (Any) -> Any + # pairs = (len(data) > 0 and isinstance(data, list)) + # if pairs: + # for item in data: + # if not isinstance(item, tuple) or len(item) != 2: + # pairs = False + # break + # if not pairs: + return self.represent_sequence(u'tag:yaml.org,2002:seq', data) + + # value = [] + # for item_key, item_value in data: + # value.append(self.represent_mapping(u'tag:yaml.org,2002:map', + # [(item_key, item_value)])) + # return SequenceNode(u'tag:yaml.org,2002:pairs', value) + + def represent_dict(self, data): + # type: (Any) -> Any + return self.represent_mapping(u'tag:yaml.org,2002:map', data) + + def represent_ordereddict(self, data): + # type: (Any) -> Any + return self.represent_omap(u'tag:yaml.org,2002:omap', data) + + def represent_set(self, data): + # type: (Any) -> Any + value = {} # type: Dict[Any, None] + for key in data: + value[key] = None + return self.represent_mapping(u'tag:yaml.org,2002:set', value) + + def represent_date(self, data): + # type: (Any) -> Any + value = to_unicode(data.isoformat()) + return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value) + + def represent_datetime(self, data): + # type: (Any) -> Any + value = to_unicode(data.isoformat(' ')) + return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value) + + def represent_yaml_object(self, tag, data, cls, flow_style=None): + # type: (Any, Any, Any, Any) -> Any + if hasattr(data, '__getstate__'): + state = data.__getstate__() + else: + state = data.__dict__.copy() + return self.represent_mapping(tag, state, flow_style=flow_style) + + def represent_undefined(self, data): + # type: (Any) -> None + raise RepresenterError('cannot represent an object: %s' % (data,)) + + +SafeRepresenter.add_representer(type(None), SafeRepresenter.represent_none) + +SafeRepresenter.add_representer(str, SafeRepresenter.represent_str) + +if PY2: + SafeRepresenter.add_representer(unicode, SafeRepresenter.represent_unicode) +else: + SafeRepresenter.add_representer(bytes, SafeRepresenter.represent_binary) + +SafeRepresenter.add_representer(bool, SafeRepresenter.represent_bool) + +SafeRepresenter.add_representer(int, SafeRepresenter.represent_int) + +if PY2: + SafeRepresenter.add_representer(long, SafeRepresenter.represent_long) + +SafeRepresenter.add_representer(float, SafeRepresenter.represent_float) + +SafeRepresenter.add_representer(list, SafeRepresenter.represent_list) + +SafeRepresenter.add_representer(tuple, SafeRepresenter.represent_list) + +SafeRepresenter.add_representer(dict, SafeRepresenter.represent_dict) + +SafeRepresenter.add_representer(set, SafeRepresenter.represent_set) + +SafeRepresenter.add_representer(ordereddict, SafeRepresenter.represent_ordereddict) + +if sys.version_info >= (2, 7): + import collections + + SafeRepresenter.add_representer( + collections.OrderedDict, SafeRepresenter.represent_ordereddict + ) + +SafeRepresenter.add_representer(datetime.date, SafeRepresenter.represent_date) + +SafeRepresenter.add_representer(datetime.datetime, SafeRepresenter.represent_datetime) + +SafeRepresenter.add_representer(None, SafeRepresenter.represent_undefined) + + +class Representer(SafeRepresenter): + if PY2: + + def represent_str(self, data): + # type: (Any) -> Any + tag = None + style = None + try: + data = unicode(data, 'ascii') + tag = u'tag:yaml.org,2002:str' + except UnicodeDecodeError: + try: + data = unicode(data, 'utf-8') + tag = u'tag:yaml.org,2002:python/str' + except UnicodeDecodeError: + data = data.encode('base64') + tag = u'tag:yaml.org,2002:binary' + style = '|' + return self.represent_scalar(tag, data, style=style) + + def represent_unicode(self, data): + # type: (Any) -> Any + tag = None + try: + data.encode('ascii') + tag = u'tag:yaml.org,2002:python/unicode' + except UnicodeEncodeError: + tag = u'tag:yaml.org,2002:str' + return self.represent_scalar(tag, data) + + def represent_long(self, data): + # type: (Any) -> Any + tag = u'tag:yaml.org,2002:int' + if int(data) is not data: + tag = u'tag:yaml.org,2002:python/long' + return self.represent_scalar(tag, to_unicode(data)) + + def represent_complex(self, data): + # type: (Any) -> Any + if data.imag == 0.0: + data = u'%r' % data.real + elif data.real == 0.0: + data = u'%rj' % data.imag + elif data.imag > 0: + data = u'%r+%rj' % (data.real, data.imag) + else: + data = u'%r%rj' % (data.real, data.imag) + return self.represent_scalar(u'tag:yaml.org,2002:python/complex', data) + + def represent_tuple(self, data): + # type: (Any) -> Any + return self.represent_sequence(u'tag:yaml.org,2002:python/tuple', data) + + def represent_name(self, data): + # type: (Any) -> Any + try: + name = u'%s.%s' % (data.__module__, data.__qualname__) + except AttributeError: + # probably PY2 + name = u'%s.%s' % (data.__module__, data.__name__) + return self.represent_scalar(u'tag:yaml.org,2002:python/name:' + name, "") + + def represent_module(self, data): + # type: (Any) -> Any + return self.represent_scalar(u'tag:yaml.org,2002:python/module:' + data.__name__, "") + + if PY2: + + def represent_instance(self, data): + # type: (Any) -> Any + # For instances of classic classes, we use __getinitargs__ and + # __getstate__ to serialize the data. + + # If data.__getinitargs__ exists, the object must be reconstructed + # by calling cls(**args), where args is a tuple returned by + # __getinitargs__. Otherwise, the cls.__init__ method should never + # be called and the class instance is created by instantiating a + # trivial class and assigning to the instance's __class__ variable. + + # If data.__getstate__ exists, it returns the state of the object. + # Otherwise, the state of the object is data.__dict__. + + # We produce either a !!python/object or !!python/object/new node. + # If data.__getinitargs__ does not exist and state is a dictionary, + # we produce a !!python/object node . Otherwise we produce a + # !!python/object/new node. + + cls = data.__class__ + class_name = u'%s.%s' % (cls.__module__, cls.__name__) + args = None + state = None + if hasattr(data, '__getinitargs__'): + args = list(data.__getinitargs__()) + if hasattr(data, '__getstate__'): + state = data.__getstate__() + else: + state = data.__dict__ + if args is None and isinstance(state, dict): + return self.represent_mapping( + u'tag:yaml.org,2002:python/object:' + class_name, state + ) + if isinstance(state, dict) and not state: + return self.represent_sequence( + u'tag:yaml.org,2002:python/object/new:' + class_name, args + ) + value = {} + if bool(args): + value['args'] = args + value['state'] = state # type: ignore + return self.represent_mapping( + u'tag:yaml.org,2002:python/object/new:' + class_name, value + ) + + def represent_object(self, data): + # type: (Any) -> Any + # We use __reduce__ API to save the data. data.__reduce__ returns + # a tuple of length 2-5: + # (function, args, state, listitems, dictitems) + + # For reconstructing, we calls function(*args), then set its state, + # listitems, and dictitems if they are not None. + + # A special case is when function.__name__ == '__newobj__'. In this + # case we create the object with args[0].__new__(*args). + + # Another special case is when __reduce__ returns a string - we don't + # support it. + + # We produce a !!python/object, !!python/object/new or + # !!python/object/apply node. + + cls = type(data) + if cls in copyreg.dispatch_table: + reduce = copyreg.dispatch_table[cls](data) + elif hasattr(data, '__reduce_ex__'): + reduce = data.__reduce_ex__(2) + elif hasattr(data, '__reduce__'): + reduce = data.__reduce__() + else: + raise RepresenterError('cannot represent object: %r' % (data,)) + reduce = (list(reduce) + [None] * 5)[:5] + function, args, state, listitems, dictitems = reduce + args = list(args) + if state is None: + state = {} + if listitems is not None: + listitems = list(listitems) + if dictitems is not None: + dictitems = dict(dictitems) + if function.__name__ == '__newobj__': + function = args[0] + args = args[1:] + tag = u'tag:yaml.org,2002:python/object/new:' + newobj = True + else: + tag = u'tag:yaml.org,2002:python/object/apply:' + newobj = False + try: + function_name = u'%s.%s' % (function.__module__, function.__qualname__) + except AttributeError: + # probably PY2 + function_name = u'%s.%s' % (function.__module__, function.__name__) + if not args and not listitems and not dictitems and isinstance(state, dict) and newobj: + return self.represent_mapping( + u'tag:yaml.org,2002:python/object:' + function_name, state + ) + if not listitems and not dictitems and isinstance(state, dict) and not state: + return self.represent_sequence(tag + function_name, args) + value = {} + if args: + value['args'] = args + if state or not isinstance(state, dict): + value['state'] = state + if listitems: + value['listitems'] = listitems + if dictitems: + value['dictitems'] = dictitems + return self.represent_mapping(tag + function_name, value) + + +if PY2: + Representer.add_representer(str, Representer.represent_str) + + Representer.add_representer(unicode, Representer.represent_unicode) + + Representer.add_representer(long, Representer.represent_long) + +Representer.add_representer(complex, Representer.represent_complex) + +Representer.add_representer(tuple, Representer.represent_tuple) + +Representer.add_representer(type, Representer.represent_name) + +if PY2: + Representer.add_representer(types.ClassType, Representer.represent_name) + +Representer.add_representer(types.FunctionType, Representer.represent_name) + +Representer.add_representer(types.BuiltinFunctionType, Representer.represent_name) + +Representer.add_representer(types.ModuleType, Representer.represent_module) + +if PY2: + Representer.add_multi_representer(types.InstanceType, Representer.represent_instance) + +Representer.add_multi_representer(object, Representer.represent_object) + +Representer.add_multi_representer(type, Representer.represent_name) + + +class RoundTripRepresenter(SafeRepresenter): + # need to add type here and write out the .comment + # in serializer and emitter + + def __init__(self, default_style=None, default_flow_style=None, dumper=None): + # type: (Any, Any, Any) -> None + if not hasattr(dumper, 'typ') and default_flow_style is None: + default_flow_style = False + SafeRepresenter.__init__( + self, + default_style=default_style, + default_flow_style=default_flow_style, + dumper=dumper, + ) + + def ignore_aliases(self, data): + # type: (Any) -> bool + try: + if data.anchor is not None and data.anchor.value is not None: + return False + except AttributeError: + pass + return SafeRepresenter.ignore_aliases(self, data) + + def represent_none(self, data): + # type: (Any) -> Any + if len(self.represented_objects) == 0 and not self.serializer.use_explicit_start: + # this will be open ended (although it is not yet) + return self.represent_scalar(u'tag:yaml.org,2002:null', u'null') + return self.represent_scalar(u'tag:yaml.org,2002:null', "") + + def represent_literal_scalarstring(self, data): + # type: (Any) -> Any + tag = None + style = '|' + anchor = data.yaml_anchor(any=True) + if PY2 and not isinstance(data, unicode): + data = unicode(data, 'ascii') + tag = u'tag:yaml.org,2002:str' + return self.represent_scalar(tag, data, style=style, anchor=anchor) + + represent_preserved_scalarstring = represent_literal_scalarstring + + def represent_folded_scalarstring(self, data): + # type: (Any) -> Any + tag = None + style = '>' + anchor = data.yaml_anchor(any=True) + for fold_pos in reversed(getattr(data, 'fold_pos', [])): + if ( + data[fold_pos] == ' ' + and (fold_pos > 0 and not data[fold_pos - 1].isspace()) + and (fold_pos < len(data) and not data[fold_pos + 1].isspace()) + ): + data = data[:fold_pos] + '\a' + data[fold_pos:] + if PY2 and not isinstance(data, unicode): + data = unicode(data, 'ascii') + tag = u'tag:yaml.org,2002:str' + return self.represent_scalar(tag, data, style=style, anchor=anchor) + + def represent_single_quoted_scalarstring(self, data): + # type: (Any) -> Any + tag = None + style = "'" + anchor = data.yaml_anchor(any=True) + if PY2 and not isinstance(data, unicode): + data = unicode(data, 'ascii') + tag = u'tag:yaml.org,2002:str' + return self.represent_scalar(tag, data, style=style, anchor=anchor) + + def represent_double_quoted_scalarstring(self, data): + # type: (Any) -> Any + tag = None + style = '"' + anchor = data.yaml_anchor(any=True) + if PY2 and not isinstance(data, unicode): + data = unicode(data, 'ascii') + tag = u'tag:yaml.org,2002:str' + return self.represent_scalar(tag, data, style=style, anchor=anchor) + + def represent_plain_scalarstring(self, data): + # type: (Any) -> Any + tag = None + style = '' + anchor = data.yaml_anchor(any=True) + if PY2 and not isinstance(data, unicode): + data = unicode(data, 'ascii') + tag = u'tag:yaml.org,2002:str' + return self.represent_scalar(tag, data, style=style, anchor=anchor) + + def insert_underscore(self, prefix, s, underscore, anchor=None): + # type: (Any, Any, Any, Any) -> Any + if underscore is None: + return self.represent_scalar(u'tag:yaml.org,2002:int', prefix + s, anchor=anchor) + if underscore[0]: + sl = list(s) + pos = len(s) - underscore[0] + while pos > 0: + sl.insert(pos, '_') + pos -= underscore[0] + s = "".join(sl) + if underscore[1]: + s = '_' + s + if underscore[2]: + s += '_' + return self.represent_scalar(u'tag:yaml.org,2002:int', prefix + s, anchor=anchor) + + def represent_scalar_int(self, data): + # type: (Any) -> Any + if data._width is not None: + s = '{:0{}d}'.format(data, data._width) + else: + s = format(data, 'd') + anchor = data.yaml_anchor(any=True) + return self.insert_underscore("", s, data._underscore, anchor=anchor) + + def represent_binary_int(self, data): + # type: (Any) -> Any + if data._width is not None: + # cannot use '{:#0{}b}', that strips the zeros + s = '{:0{}b}'.format(data, data._width) + else: + s = format(data, 'b') + anchor = data.yaml_anchor(any=True) + return self.insert_underscore('0b', s, data._underscore, anchor=anchor) + + def represent_octal_int(self, data): + # type: (Any) -> Any + if data._width is not None: + # cannot use '{:#0{}o}', that strips the zeros + s = '{:0{}o}'.format(data, data._width) + else: + s = format(data, 'o') + anchor = data.yaml_anchor(any=True) + return self.insert_underscore('0o', s, data._underscore, anchor=anchor) + + def represent_hex_int(self, data): + # type: (Any) -> Any + if data._width is not None: + # cannot use '{:#0{}x}', that strips the zeros + s = '{:0{}x}'.format(data, data._width) + else: + s = format(data, 'x') + anchor = data.yaml_anchor(any=True) + return self.insert_underscore('0x', s, data._underscore, anchor=anchor) + + def represent_hex_caps_int(self, data): + # type: (Any) -> Any + if data._width is not None: + # cannot use '{:#0{}X}', that strips the zeros + s = '{:0{}X}'.format(data, data._width) + else: + s = format(data, 'X') + anchor = data.yaml_anchor(any=True) + return self.insert_underscore('0x', s, data._underscore, anchor=anchor) + + def represent_scalar_float(self, data): + # type: (Any) -> Any + """ this is way more complicated """ + value = None + anchor = data.yaml_anchor(any=True) + if data != data or (data == 0.0 and data == 1.0): + value = u'.nan' + elif data == self.inf_value: + value = u'.inf' + elif data == -self.inf_value: + value = u'-.inf' + if value: + return self.represent_scalar(u'tag:yaml.org,2002:float', value, anchor=anchor) + if data._exp is None and data._prec > 0 and data._prec == data._width - 1: + # no exponent, but trailing dot + value = u'{}{:d}.'.format(data._m_sign if data._m_sign else "", abs(int(data))) + elif data._exp is None: + # no exponent, "normal" dot + prec = data._prec + ms = data._m_sign if data._m_sign else "" + # -1 for the dot + value = u'{}{:0{}.{}f}'.format( + ms, abs(data), data._width - len(ms), data._width - prec - 1 + ) + if prec == 0 or (prec == 1 and ms != ""): + value = value.replace(u'0.', u'.') + while len(value) < data._width: + value += u'0' + else: + # exponent + m, es = u'{:{}.{}e}'.format( + # data, data._width, data._width - data._prec + (1 if data._m_sign else 0) + data, + data._width, + data._width + (1 if data._m_sign else 0), + ).split('e') + w = data._width if data._prec > 0 else (data._width + 1) + if data < 0: + w += 1 + m = m[:w] + e = int(es) + m1, m2 = m.split('.') # always second? + while len(m1) + len(m2) < data._width - (1 if data._prec >= 0 else 0): + m2 += u'0' + if data._m_sign and data > 0: + m1 = '+' + m1 + esgn = u'+' if data._e_sign else "" + if data._prec < 0: # mantissa without dot + if m2 != u'0': + e -= len(m2) + else: + m2 = "" + while (len(m1) + len(m2) - (1 if data._m_sign else 0)) < data._width: + m2 += u'0' + e -= 1 + value = m1 + m2 + data._exp + u'{:{}0{}d}'.format(e, esgn, data._e_width) + elif data._prec == 0: # mantissa with trailing dot + e -= len(m2) + value = ( + m1 + m2 + u'.' + data._exp + u'{:{}0{}d}'.format(e, esgn, data._e_width) + ) + else: + if data._m_lead0 > 0: + m2 = u'0' * (data._m_lead0 - 1) + m1 + m2 + m1 = u'0' + m2 = m2[: -data._m_lead0] # these should be zeros + e += data._m_lead0 + while len(m1) < data._prec: + m1 += m2[0] + m2 = m2[1:] + e -= 1 + value = ( + m1 + u'.' + m2 + data._exp + u'{:{}0{}d}'.format(e, esgn, data._e_width) + ) + + if value is None: + value = to_unicode(repr(data)).lower() + return self.represent_scalar(u'tag:yaml.org,2002:float', value, anchor=anchor) + + def represent_sequence(self, tag, sequence, flow_style=None): + # type: (Any, Any, Any) -> Any + value = [] # type: List[Any] + # if the flow_style is None, the flow style tacked on to the object + # explicitly will be taken. If that is None as well the default flow + # style rules + try: + flow_style = sequence.fa.flow_style(flow_style) + except AttributeError: + flow_style = flow_style + try: + anchor = sequence.yaml_anchor() + except AttributeError: + anchor = None + node = SequenceNode(tag, value, flow_style=flow_style, anchor=anchor) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + try: + comment = getattr(sequence, comment_attrib) + node.comment = comment.comment + # reset any comment already printed information + if node.comment and node.comment[1]: + for ct in node.comment[1]: + ct.reset() + item_comments = comment.items + for v in item_comments.values(): + if v and v[1]: + for ct in v[1]: + ct.reset() + item_comments = comment.items + node.comment = comment.comment + try: + node.comment.append(comment.end) + except AttributeError: + pass + except AttributeError: + item_comments = {} + for idx, item in enumerate(sequence): + node_item = self.represent_data(item) + self.merge_comments(node_item, item_comments.get(idx)) + if not (isinstance(node_item, ScalarNode) and not node_item.style): + best_style = False + value.append(node_item) + if flow_style is None: + if len(sequence) != 0 and self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + return node + + def merge_comments(self, node, comments): + # type: (Any, Any) -> Any + if comments is None: + assert hasattr(node, 'comment') + return node + if getattr(node, 'comment', None) is not None: + for idx, val in enumerate(comments): + if idx >= len(node.comment): + continue + nc = node.comment[idx] + if nc is not None: + assert val is None or val == nc + comments[idx] = nc + node.comment = comments + return node + + def represent_key(self, data): + # type: (Any) -> Any + if isinstance(data, CommentedKeySeq): + self.alias_key = None + return self.represent_sequence(u'tag:yaml.org,2002:seq', data, flow_style=True) + if isinstance(data, CommentedKeyMap): + self.alias_key = None + return self.represent_mapping(u'tag:yaml.org,2002:map', data, flow_style=True) + return SafeRepresenter.represent_key(self, data) + + def represent_mapping(self, tag, mapping, flow_style=None): + # type: (Any, Any, Any) -> Any + value = [] # type: List[Any] + try: + flow_style = mapping.fa.flow_style(flow_style) + except AttributeError: + flow_style = flow_style + try: + anchor = mapping.yaml_anchor() + except AttributeError: + anchor = None + node = MappingNode(tag, value, flow_style=flow_style, anchor=anchor) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + # no sorting! !! + try: + comment = getattr(mapping, comment_attrib) + node.comment = comment.comment + if node.comment and node.comment[1]: + for ct in node.comment[1]: + ct.reset() + item_comments = comment.items + for v in item_comments.values(): + if v and v[1]: + for ct in v[1]: + ct.reset() + try: + node.comment.append(comment.end) + except AttributeError: + pass + except AttributeError: + item_comments = {} + merge_list = [m[1] for m in getattr(mapping, merge_attrib, [])] + try: + merge_pos = getattr(mapping, merge_attrib, [[0]])[0][0] + except IndexError: + merge_pos = 0 + item_count = 0 + if bool(merge_list): + items = mapping.non_merged_items() + else: + items = mapping.items() + for item_key, item_value in items: + item_count += 1 + node_key = self.represent_key(item_key) + node_value = self.represent_data(item_value) + item_comment = item_comments.get(item_key) + if item_comment: + assert getattr(node_key, 'comment', None) is None + node_key.comment = item_comment[:2] + nvc = getattr(node_value, 'comment', None) + if nvc is not None: # end comment already there + nvc[0] = item_comment[2] + nvc[1] = item_comment[3] + else: + node_value.comment = item_comment[2:] + if not (isinstance(node_key, ScalarNode) and not node_key.style): + best_style = False + if not (isinstance(node_value, ScalarNode) and not node_value.style): + best_style = False + value.append((node_key, node_value)) + if flow_style is None: + if ((item_count != 0) or bool(merge_list)) and self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + if bool(merge_list): + # because of the call to represent_data here, the anchors + # are marked as being used and thereby created + if len(merge_list) == 1: + arg = self.represent_data(merge_list[0]) + else: + arg = self.represent_data(merge_list) + arg.flow_style = True + value.insert(merge_pos, (ScalarNode(u'tag:yaml.org,2002:merge', '<<'), arg)) + return node + + def represent_omap(self, tag, omap, flow_style=None): + # type: (Any, Any, Any) -> Any + value = [] # type: List[Any] + try: + flow_style = omap.fa.flow_style(flow_style) + except AttributeError: + flow_style = flow_style + try: + anchor = omap.yaml_anchor() + except AttributeError: + anchor = None + node = SequenceNode(tag, value, flow_style=flow_style, anchor=anchor) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + try: + comment = getattr(omap, comment_attrib) + node.comment = comment.comment + if node.comment and node.comment[1]: + for ct in node.comment[1]: + ct.reset() + item_comments = comment.items + for v in item_comments.values(): + if v and v[1]: + for ct in v[1]: + ct.reset() + try: + node.comment.append(comment.end) + except AttributeError: + pass + except AttributeError: + item_comments = {} + for item_key in omap: + item_val = omap[item_key] + node_item = self.represent_data({item_key: item_val}) + # node_item.flow_style = False + # node item has two scalars in value: node_key and node_value + item_comment = item_comments.get(item_key) + if item_comment: + if item_comment[1]: + node_item.comment = [None, item_comment[1]] + assert getattr(node_item.value[0][0], 'comment', None) is None + node_item.value[0][0].comment = [item_comment[0], None] + nvc = getattr(node_item.value[0][1], 'comment', None) + if nvc is not None: # end comment already there + nvc[0] = item_comment[2] + nvc[1] = item_comment[3] + else: + node_item.value[0][1].comment = item_comment[2:] + # if not (isinstance(node_item, ScalarNode) \ + # and not node_item.style): + # best_style = False + value.append(node_item) + if flow_style is None: + if self.default_flow_style is not None: + node.flow_style = self.default_flow_style + else: + node.flow_style = best_style + return node + + def represent_set(self, setting): + # type: (Any) -> Any + flow_style = False + tag = u'tag:yaml.org,2002:set' + # return self.represent_mapping(tag, value) + value = [] # type: List[Any] + flow_style = setting.fa.flow_style(flow_style) + try: + anchor = setting.yaml_anchor() + except AttributeError: + anchor = None + node = MappingNode(tag, value, flow_style=flow_style, anchor=anchor) + if self.alias_key is not None: + self.represented_objects[self.alias_key] = node + best_style = True + # no sorting! !! + try: + comment = getattr(setting, comment_attrib) + node.comment = comment.comment + if node.comment and node.comment[1]: + for ct in node.comment[1]: + ct.reset() + item_comments = comment.items + for v in item_comments.values(): + if v and v[1]: + for ct in v[1]: + ct.reset() + try: + node.comment.append(comment.end) + except AttributeError: + pass + except AttributeError: + item_comments = {} + for item_key in setting.odict: + node_key = self.represent_key(item_key) + node_value = self.represent_data(None) + item_comment = item_comments.get(item_key) + if item_comment: + assert getattr(node_key, 'comment', None) is None + node_key.comment = item_comment[:2] + node_key.style = node_value.style = '?' + if not (isinstance(node_key, ScalarNode) and not node_key.style): + best_style = False + if not (isinstance(node_value, ScalarNode) and not node_value.style): + best_style = False + value.append((node_key, node_value)) + best_style = best_style + return node + + def represent_dict(self, data): + # type: (Any) -> Any + """write out tag if saved on loading""" + try: + t = data.tag.value + except AttributeError: + t = None + if t: + if t.startswith('!!'): + tag = 'tag:yaml.org,2002:' + t[2:] + else: + tag = t + else: + tag = u'tag:yaml.org,2002:map' + return self.represent_mapping(tag, data) + + def represent_list(self, data): + # type: (Any) -> Any + try: + t = data.tag.value + except AttributeError: + t = None + if t: + if t.startswith('!!'): + tag = 'tag:yaml.org,2002:' + t[2:] + else: + tag = t + else: + tag = u'tag:yaml.org,2002:seq' + return self.represent_sequence(tag, data) + + def represent_datetime(self, data): + # type: (Any) -> Any + inter = 'T' if data._yaml['t'] else ' ' + _yaml = data._yaml + if _yaml['delta']: + data += _yaml['delta'] + value = data.isoformat(inter) + else: + value = data.isoformat(inter) + if _yaml['tz']: + value += _yaml['tz'] + return self.represent_scalar(u'tag:yaml.org,2002:timestamp', to_unicode(value)) + + def represent_tagged_scalar(self, data): + # type: (Any) -> Any + try: + tag = data.tag.value + except AttributeError: + tag = None + try: + anchor = data.yaml_anchor() + except AttributeError: + anchor = None + return self.represent_scalar(tag, data.value, style=data.style, anchor=anchor) + + def represent_scalar_bool(self, data): + # type: (Any) -> Any + try: + anchor = data.yaml_anchor() + except AttributeError: + anchor = None + return SafeRepresenter.represent_bool(self, data, anchor=anchor) + + +RoundTripRepresenter.add_representer(type(None), RoundTripRepresenter.represent_none) + +RoundTripRepresenter.add_representer( + LiteralScalarString, RoundTripRepresenter.represent_literal_scalarstring +) + +RoundTripRepresenter.add_representer( + FoldedScalarString, RoundTripRepresenter.represent_folded_scalarstring +) + +RoundTripRepresenter.add_representer( + SingleQuotedScalarString, RoundTripRepresenter.represent_single_quoted_scalarstring +) + +RoundTripRepresenter.add_representer( + DoubleQuotedScalarString, RoundTripRepresenter.represent_double_quoted_scalarstring +) + +RoundTripRepresenter.add_representer( + PlainScalarString, RoundTripRepresenter.represent_plain_scalarstring +) + +# RoundTripRepresenter.add_representer(tuple, Representer.represent_tuple) + +RoundTripRepresenter.add_representer(ScalarInt, RoundTripRepresenter.represent_scalar_int) + +RoundTripRepresenter.add_representer(BinaryInt, RoundTripRepresenter.represent_binary_int) + +RoundTripRepresenter.add_representer(OctalInt, RoundTripRepresenter.represent_octal_int) + +RoundTripRepresenter.add_representer(HexInt, RoundTripRepresenter.represent_hex_int) + +RoundTripRepresenter.add_representer(HexCapsInt, RoundTripRepresenter.represent_hex_caps_int) + +RoundTripRepresenter.add_representer(ScalarFloat, RoundTripRepresenter.represent_scalar_float) + +RoundTripRepresenter.add_representer(ScalarBoolean, RoundTripRepresenter.represent_scalar_bool) + +RoundTripRepresenter.add_representer(CommentedSeq, RoundTripRepresenter.represent_list) + +RoundTripRepresenter.add_representer(CommentedMap, RoundTripRepresenter.represent_dict) + +RoundTripRepresenter.add_representer( + CommentedOrderedMap, RoundTripRepresenter.represent_ordereddict +) + +if sys.version_info >= (2, 7): + import collections + + RoundTripRepresenter.add_representer( + collections.OrderedDict, RoundTripRepresenter.represent_ordereddict + ) + +RoundTripRepresenter.add_representer(CommentedSet, RoundTripRepresenter.represent_set) + +RoundTripRepresenter.add_representer( + TaggedScalar, RoundTripRepresenter.represent_tagged_scalar +) + +RoundTripRepresenter.add_representer(TimeStamp, RoundTripRepresenter.represent_datetime) diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/resolver.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/resolver.py new file mode 100644 index 0000000000..28b7767b02 --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/resolver.py @@ -0,0 +1,399 @@ +# coding: utf-8 + +from __future__ import absolute_import + +import re + +if False: # MYPY + from typing import Any, Dict, List, Union, Text, Optional # NOQA + from ...ruamel.yaml.compat import VersionType # NOQA + +from ...ruamel.yaml.compat import string_types, _DEFAULT_YAML_VERSION # NOQA +from ...ruamel.yaml.error import * # NOQA +from ...ruamel.yaml.nodes import MappingNode, ScalarNode, SequenceNode # NOQA +from ...ruamel.yaml.util import RegExp # NOQA + +__all__ = ['BaseResolver', 'Resolver', 'VersionedResolver'] + + +# fmt: off +# resolvers consist of +# - a list of applicable version +# - a tag +# - a regexp +# - a list of first characters to match +implicit_resolvers = [ + ([(1, 2)], + u'tag:yaml.org,2002:bool', + RegExp(u'''^(?:true|True|TRUE|false|False|FALSE)$''', re.X), + list(u'tTfF')), + ([(1, 1)], + u'tag:yaml.org,2002:bool', + RegExp(u'''^(?:y|Y|yes|Yes|YES|n|N|no|No|NO + |true|True|TRUE|false|False|FALSE + |on|On|ON|off|Off|OFF)$''', re.X), + list(u'yYnNtTfFoO')), + ([(1, 2)], + u'tag:yaml.org,2002:float', + RegExp(u'''^(?: + [-+]?(?:[0-9][0-9_]*)\\.[0-9_]*(?:[eE][-+]?[0-9]+)? + |[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+) + |[-+]?\\.[0-9_]+(?:[eE][-+][0-9]+)? + |[-+]?\\.(?:inf|Inf|INF) + |\\.(?:nan|NaN|NAN))$''', re.X), + list(u'-+0123456789.')), + ([(1, 1)], + u'tag:yaml.org,2002:float', + RegExp(u'''^(?: + [-+]?(?:[0-9][0-9_]*)\\.[0-9_]*(?:[eE][-+]?[0-9]+)? + |[-+]?(?:[0-9][0-9_]*)(?:[eE][-+]?[0-9]+) + |\\.[0-9_]+(?:[eE][-+][0-9]+)? + |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\\.[0-9_]* # sexagesimal float + |[-+]?\\.(?:inf|Inf|INF) + |\\.(?:nan|NaN|NAN))$''', re.X), + list(u'-+0123456789.')), + ([(1, 2)], + u'tag:yaml.org,2002:int', + RegExp(u'''^(?:[-+]?0b[0-1_]+ + |[-+]?0o?[0-7_]+ + |[-+]?[0-9_]+ + |[-+]?0x[0-9a-fA-F_]+)$''', re.X), + list(u'-+0123456789')), + ([(1, 1)], + u'tag:yaml.org,2002:int', + RegExp(u'''^(?:[-+]?0b[0-1_]+ + |[-+]?0?[0-7_]+ + |[-+]?(?:0|[1-9][0-9_]*) + |[-+]?0x[0-9a-fA-F_]+ + |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X), # sexagesimal int + list(u'-+0123456789')), + ([(1, 2), (1, 1)], + u'tag:yaml.org,2002:merge', + RegExp(u'^(?:<<)$'), + [u'<']), + ([(1, 2), (1, 1)], + u'tag:yaml.org,2002:null', + RegExp(u'''^(?: ~ + |null|Null|NULL + | )$''', re.X), + [u'~', u'n', u'N', u'']), + ([(1, 2), (1, 1)], + u'tag:yaml.org,2002:timestamp', + RegExp(u'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9] + |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]? + (?:[Tt]|[ \\t]+)[0-9][0-9]? + :[0-9][0-9] :[0-9][0-9] (?:\\.[0-9]*)? + (?:[ \\t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X), + list(u'0123456789')), + ([(1, 2), (1, 1)], + u'tag:yaml.org,2002:value', + RegExp(u'^(?:=)$'), + [u'=']), + # The following resolver is only for documentation purposes. It cannot work + # because plain scalars cannot start with '!', '&', or '*'. + ([(1, 2), (1, 1)], + u'tag:yaml.org,2002:yaml', + RegExp(u'^(?:!|&|\\*)$'), + list(u'!&*')), +] +# fmt: on + + +class ResolverError(YAMLError): + pass + + +class BaseResolver(object): + + DEFAULT_SCALAR_TAG = u'tag:yaml.org,2002:str' + DEFAULT_SEQUENCE_TAG = u'tag:yaml.org,2002:seq' + DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map' + + yaml_implicit_resolvers = {} # type: Dict[Any, Any] + yaml_path_resolvers = {} # type: Dict[Any, Any] + + def __init__(self, loadumper=None): + # type: (Any, Any) -> None + self.loadumper = loadumper + if self.loadumper is not None and getattr(self.loadumper, '_resolver', None) is None: + self.loadumper._resolver = self.loadumper + self._loader_version = None # type: Any + self.resolver_exact_paths = [] # type: List[Any] + self.resolver_prefix_paths = [] # type: List[Any] + + @property + def parser(self): + # type: () -> Any + if self.loadumper is not None: + if hasattr(self.loadumper, 'typ'): + return self.loadumper.parser + return self.loadumper._parser + return None + + @classmethod + def add_implicit_resolver_base(cls, tag, regexp, first): + # type: (Any, Any, Any) -> None + if 'yaml_implicit_resolvers' not in cls.__dict__: + # deepcopy doesn't work here + cls.yaml_implicit_resolvers = dict( + (k, cls.yaml_implicit_resolvers[k][:]) for k in cls.yaml_implicit_resolvers + ) + if first is None: + first = [None] + for ch in first: + cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp)) + + @classmethod + def add_implicit_resolver(cls, tag, regexp, first): + # type: (Any, Any, Any) -> None + if 'yaml_implicit_resolvers' not in cls.__dict__: + # deepcopy doesn't work here + cls.yaml_implicit_resolvers = dict( + (k, cls.yaml_implicit_resolvers[k][:]) for k in cls.yaml_implicit_resolvers + ) + if first is None: + first = [None] + for ch in first: + cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp)) + implicit_resolvers.append(([(1, 2), (1, 1)], tag, regexp, first)) + + # @classmethod + # def add_implicit_resolver(cls, tag, regexp, first): + + @classmethod + def add_path_resolver(cls, tag, path, kind=None): + # type: (Any, Any, Any) -> None + # Note: `add_path_resolver` is experimental. The API could be changed. + # `new_path` is a pattern that is matched against the path from the + # root to the node that is being considered. `node_path` elements are + # tuples `(node_check, index_check)`. `node_check` is a node class: + # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None` + # matches any kind of a node. `index_check` could be `None`, a boolean + # value, a string value, or a number. `None` and `False` match against + # any _value_ of sequence and mapping nodes. `True` matches against + # any _key_ of a mapping node. A string `index_check` matches against + # a mapping value that corresponds to a scalar key which content is + # equal to the `index_check` value. An integer `index_check` matches + # against a sequence value with the index equal to `index_check`. + if 'yaml_path_resolvers' not in cls.__dict__: + cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy() + new_path = [] # type: List[Any] + for element in path: + if isinstance(element, (list, tuple)): + if len(element) == 2: + node_check, index_check = element + elif len(element) == 1: + node_check = element[0] + index_check = True + else: + raise ResolverError('Invalid path element: %s' % (element,)) + else: + node_check = None + index_check = element + if node_check is str: + node_check = ScalarNode + elif node_check is list: + node_check = SequenceNode + elif node_check is dict: + node_check = MappingNode + elif ( + node_check not in [ScalarNode, SequenceNode, MappingNode] + and not isinstance(node_check, string_types) + and node_check is not None + ): + raise ResolverError('Invalid node checker: %s' % (node_check,)) + if not isinstance(index_check, (string_types, int)) and index_check is not None: + raise ResolverError('Invalid index checker: %s' % (index_check,)) + new_path.append((node_check, index_check)) + if kind is str: + kind = ScalarNode + elif kind is list: + kind = SequenceNode + elif kind is dict: + kind = MappingNode + elif kind not in [ScalarNode, SequenceNode, MappingNode] and kind is not None: + raise ResolverError('Invalid node kind: %s' % (kind,)) + cls.yaml_path_resolvers[tuple(new_path), kind] = tag + + def descend_resolver(self, current_node, current_index): + # type: (Any, Any) -> None + if not self.yaml_path_resolvers: + return + exact_paths = {} + prefix_paths = [] + if current_node: + depth = len(self.resolver_prefix_paths) + for path, kind in self.resolver_prefix_paths[-1]: + if self.check_resolver_prefix(depth, path, kind, current_node, current_index): + if len(path) > depth: + prefix_paths.append((path, kind)) + else: + exact_paths[kind] = self.yaml_path_resolvers[path, kind] + else: + for path, kind in self.yaml_path_resolvers: + if not path: + exact_paths[kind] = self.yaml_path_resolvers[path, kind] + else: + prefix_paths.append((path, kind)) + self.resolver_exact_paths.append(exact_paths) + self.resolver_prefix_paths.append(prefix_paths) + + def ascend_resolver(self): + # type: () -> None + if not self.yaml_path_resolvers: + return + self.resolver_exact_paths.pop() + self.resolver_prefix_paths.pop() + + def check_resolver_prefix(self, depth, path, kind, current_node, current_index): + # type: (int, Text, Any, Any, Any) -> bool + node_check, index_check = path[depth - 1] + if isinstance(node_check, string_types): + if current_node.tag != node_check: + return False + elif node_check is not None: + if not isinstance(current_node, node_check): + return False + if index_check is True and current_index is not None: + return False + if (index_check is False or index_check is None) and current_index is None: + return False + if isinstance(index_check, string_types): + if not ( + isinstance(current_index, ScalarNode) and index_check == current_index.value + ): + return False + elif isinstance(index_check, int) and not isinstance(index_check, bool): + if index_check != current_index: + return False + return True + + def resolve(self, kind, value, implicit): + # type: (Any, Any, Any) -> Any + if kind is ScalarNode and implicit[0]: + if value == "": + resolvers = self.yaml_implicit_resolvers.get("", []) + else: + resolvers = self.yaml_implicit_resolvers.get(value[0], []) + resolvers += self.yaml_implicit_resolvers.get(None, []) + for tag, regexp in resolvers: + if regexp.match(value): + return tag + implicit = implicit[1] + if bool(self.yaml_path_resolvers): + exact_paths = self.resolver_exact_paths[-1] + if kind in exact_paths: + return exact_paths[kind] + if None in exact_paths: + return exact_paths[None] + if kind is ScalarNode: + return self.DEFAULT_SCALAR_TAG + elif kind is SequenceNode: + return self.DEFAULT_SEQUENCE_TAG + elif kind is MappingNode: + return self.DEFAULT_MAPPING_TAG + + @property + def processing_version(self): + # type: () -> Any + return None + + +class Resolver(BaseResolver): + pass + + +for ir in implicit_resolvers: + if (1, 2) in ir[0]: + Resolver.add_implicit_resolver_base(*ir[1:]) + + +class VersionedResolver(BaseResolver): + """ + contrary to the "normal" resolver, the smart resolver delays loading + the pattern matching rules. That way it can decide to load 1.1 rules + or the (default) 1.2 rules, that no longer support octal without 0o, sexagesimals + and Yes/No/On/Off booleans. + """ + + def __init__(self, version=None, loader=None, loadumper=None): + # type: (Optional[VersionType], Any, Any) -> None + if loader is None and loadumper is not None: + loader = loadumper + BaseResolver.__init__(self, loader) + self._loader_version = self.get_loader_version(version) + self._version_implicit_resolver = {} # type: Dict[Any, Any] + + def add_version_implicit_resolver(self, version, tag, regexp, first): + # type: (VersionType, Any, Any, Any) -> None + if first is None: + first = [None] + impl_resolver = self._version_implicit_resolver.setdefault(version, {}) + for ch in first: + impl_resolver.setdefault(ch, []).append((tag, regexp)) + + def get_loader_version(self, version): + # type: (Optional[VersionType]) -> Any + if version is None or isinstance(version, tuple): + return version + if isinstance(version, list): + return tuple(version) + # assume string + return tuple(map(int, version.split(u'.'))) + + @property + def versioned_resolver(self): + # type: () -> Any + """ + select the resolver based on the version we are parsing + """ + version = self.processing_version + if version not in self._version_implicit_resolver: + for x in implicit_resolvers: + if version in x[0]: + self.add_version_implicit_resolver(version, x[1], x[2], x[3]) + return self._version_implicit_resolver[version] + + def resolve(self, kind, value, implicit): + # type: (Any, Any, Any) -> Any + if kind is ScalarNode and implicit[0]: + if value == "": + resolvers = self.versioned_resolver.get("", []) + else: + resolvers = self.versioned_resolver.get(value[0], []) + resolvers += self.versioned_resolver.get(None, []) + for tag, regexp in resolvers: + if regexp.match(value): + return tag + implicit = implicit[1] + if bool(self.yaml_path_resolvers): + exact_paths = self.resolver_exact_paths[-1] + if kind in exact_paths: + return exact_paths[kind] + if None in exact_paths: + return exact_paths[None] + if kind is ScalarNode: + return self.DEFAULT_SCALAR_TAG + elif kind is SequenceNode: + return self.DEFAULT_SEQUENCE_TAG + elif kind is MappingNode: + return self.DEFAULT_MAPPING_TAG + + @property + def processing_version(self): + # type: () -> Any + try: + version = self.loadumper._scanner.yaml_version + except AttributeError: + try: + if hasattr(self.loadumper, 'typ'): + version = self.loadumper.version + else: + version = self.loadumper._serializer.use_version # dumping + except AttributeError: + version = None + if version is None: + version = self._loader_version + if version is None: + version = _DEFAULT_YAML_VERSION + return version diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/scalarbool.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/scalarbool.py new file mode 100644 index 0000000000..627d01dad0 --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/scalarbool.py @@ -0,0 +1,51 @@ +# coding: utf-8 + +from __future__ import print_function, absolute_import, division, unicode_literals + +""" +You cannot subclass bool, and this is necessary for round-tripping anchored +bool values (and also if you want to preserve the original way of writing) + +bool.__bases__ is type 'int', so that is what is used as the basis for ScalarBoolean as well. + +You can use these in an if statement, but not when testing equivalence +""" + +from ...ruamel.yaml.anchor import Anchor + +if False: # MYPY + from typing import Text, Any, Dict, List # NOQA + +__all__ = ['ScalarBoolean'] + +# no need for no_limit_int -> int + + +class ScalarBoolean(int): + def __new__(cls, *args, **kw): + # type: (Any, Any, Any) -> Any + anchor = kw.pop('anchor', None) # type: ignore + b = int.__new__(cls, *args, **kw) # type: ignore + if anchor is not None: + b.yaml_set_anchor(anchor, always_dump=True) + return b + + @property + def anchor(self): + # type: () -> Any + if not hasattr(self, Anchor.attrib): + setattr(self, Anchor.attrib, Anchor()) + return getattr(self, Anchor.attrib) + + def yaml_anchor(self, any=False): + # type: (bool) -> Any + if not hasattr(self, Anchor.attrib): + return None + if any or self.anchor.always_dump: + return self.anchor + return None + + def yaml_set_anchor(self, value, always_dump=False): + # type: (Any, bool) -> None + self.anchor.value = value + self.anchor.always_dump = always_dump diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/scalarfloat.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/scalarfloat.py new file mode 100644 index 0000000000..456fdeb34d --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/scalarfloat.py @@ -0,0 +1,127 @@ +# coding: utf-8 + +from __future__ import print_function, absolute_import, division, unicode_literals + +import sys +from .compat import no_limit_int # NOQA +from ...ruamel.yaml.anchor import Anchor + +if False: # MYPY + from typing import Text, Any, Dict, List # NOQA + +__all__ = ['ScalarFloat', 'ExponentialFloat', 'ExponentialCapsFloat'] + + +class ScalarFloat(float): + def __new__(cls, *args, **kw): + # type: (Any, Any, Any) -> Any + width = kw.pop('width', None) # type: ignore + prec = kw.pop('prec', None) # type: ignore + m_sign = kw.pop('m_sign', None) # type: ignore + m_lead0 = kw.pop('m_lead0', 0) # type: ignore + exp = kw.pop('exp', None) # type: ignore + e_width = kw.pop('e_width', None) # type: ignore + e_sign = kw.pop('e_sign', None) # type: ignore + underscore = kw.pop('underscore', None) # type: ignore + anchor = kw.pop('anchor', None) # type: ignore + v = float.__new__(cls, *args, **kw) # type: ignore + v._width = width + v._prec = prec + v._m_sign = m_sign + v._m_lead0 = m_lead0 + v._exp = exp + v._e_width = e_width + v._e_sign = e_sign + v._underscore = underscore + if anchor is not None: + v.yaml_set_anchor(anchor, always_dump=True) + return v + + def __iadd__(self, a): # type: ignore + # type: (Any) -> Any + return float(self) + a + x = type(self)(self + a) + x._width = self._width + x._underscore = self._underscore[:] if self._underscore is not None else None # NOQA + return x + + def __ifloordiv__(self, a): # type: ignore + # type: (Any) -> Any + return float(self) // a + x = type(self)(self // a) + x._width = self._width + x._underscore = self._underscore[:] if self._underscore is not None else None # NOQA + return x + + def __imul__(self, a): # type: ignore + # type: (Any) -> Any + return float(self) * a + x = type(self)(self * a) + x._width = self._width + x._underscore = self._underscore[:] if self._underscore is not None else None # NOQA + x._prec = self._prec # check for others + return x + + def __ipow__(self, a): # type: ignore + # type: (Any) -> Any + return float(self) ** a + x = type(self)(self ** a) + x._width = self._width + x._underscore = self._underscore[:] if self._underscore is not None else None # NOQA + return x + + def __isub__(self, a): # type: ignore + # type: (Any) -> Any + return float(self) - a + x = type(self)(self - a) + x._width = self._width + x._underscore = self._underscore[:] if self._underscore is not None else None # NOQA + return x + + @property + def anchor(self): + # type: () -> Any + if not hasattr(self, Anchor.attrib): + setattr(self, Anchor.attrib, Anchor()) + return getattr(self, Anchor.attrib) + + def yaml_anchor(self, any=False): + # type: (bool) -> Any + if not hasattr(self, Anchor.attrib): + return None + if any or self.anchor.always_dump: + return self.anchor + return None + + def yaml_set_anchor(self, value, always_dump=False): + # type: (Any, bool) -> None + self.anchor.value = value + self.anchor.always_dump = always_dump + + def dump(self, out=sys.stdout): + # type: (Any) -> Any + out.write( + 'ScalarFloat({}| w:{}, p:{}, s:{}, lz:{}, _:{}|{}, w:{}, s:{})\n'.format( + self, + self._width, # type: ignore + self._prec, # type: ignore + self._m_sign, # type: ignore + self._m_lead0, # type: ignore + self._underscore, # type: ignore + self._exp, # type: ignore + self._e_width, # type: ignore + self._e_sign, # type: ignore + ) + ) + + +class ExponentialFloat(ScalarFloat): + def __new__(cls, value, width=None, underscore=None): + # type: (Any, Any, Any) -> Any + return ScalarFloat.__new__(cls, value, width=width, underscore=underscore) + + +class ExponentialCapsFloat(ScalarFloat): + def __new__(cls, value, width=None, underscore=None): + # type: (Any, Any, Any) -> Any + return ScalarFloat.__new__(cls, value, width=width, underscore=underscore) diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/scalarint.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/scalarint.py new file mode 100644 index 0000000000..01567be89c --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/scalarint.py @@ -0,0 +1,130 @@ +# coding: utf-8 + +from __future__ import print_function, absolute_import, division, unicode_literals + +from .compat import no_limit_int # NOQA +from ...ruamel.yaml.anchor import Anchor + +if False: # MYPY + from typing import Text, Any, Dict, List # NOQA + +__all__ = ['ScalarInt', 'BinaryInt', 'OctalInt', 'HexInt', 'HexCapsInt', 'DecimalInt'] + + +class ScalarInt(no_limit_int): + def __new__(cls, *args, **kw): + # type: (Any, Any, Any) -> Any + width = kw.pop('width', None) # type: ignore + underscore = kw.pop('underscore', None) # type: ignore + anchor = kw.pop('anchor', None) # type: ignore + v = no_limit_int.__new__(cls, *args, **kw) # type: ignore + v._width = width + v._underscore = underscore + if anchor is not None: + v.yaml_set_anchor(anchor, always_dump=True) + return v + + def __iadd__(self, a): # type: ignore + # type: (Any) -> Any + x = type(self)(self + a) + x._width = self._width # type: ignore + x._underscore = ( # type: ignore + self._underscore[:] if self._underscore is not None else None # type: ignore + ) # NOQA + return x + + def __ifloordiv__(self, a): # type: ignore + # type: (Any) -> Any + x = type(self)(self // a) + x._width = self._width # type: ignore + x._underscore = ( # type: ignore + self._underscore[:] if self._underscore is not None else None # type: ignore + ) # NOQA + return x + + def __imul__(self, a): # type: ignore + # type: (Any) -> Any + x = type(self)(self * a) + x._width = self._width # type: ignore + x._underscore = ( # type: ignore + self._underscore[:] if self._underscore is not None else None # type: ignore + ) # NOQA + return x + + def __ipow__(self, a): # type: ignore + # type: (Any) -> Any + x = type(self)(self ** a) + x._width = self._width # type: ignore + x._underscore = ( # type: ignore + self._underscore[:] if self._underscore is not None else None # type: ignore + ) # NOQA + return x + + def __isub__(self, a): # type: ignore + # type: (Any) -> Any + x = type(self)(self - a) + x._width = self._width # type: ignore + x._underscore = ( # type: ignore + self._underscore[:] if self._underscore is not None else None # type: ignore + ) # NOQA + return x + + @property + def anchor(self): + # type: () -> Any + if not hasattr(self, Anchor.attrib): + setattr(self, Anchor.attrib, Anchor()) + return getattr(self, Anchor.attrib) + + def yaml_anchor(self, any=False): + # type: (bool) -> Any + if not hasattr(self, Anchor.attrib): + return None + if any or self.anchor.always_dump: + return self.anchor + return None + + def yaml_set_anchor(self, value, always_dump=False): + # type: (Any, bool) -> None + self.anchor.value = value + self.anchor.always_dump = always_dump + + +class BinaryInt(ScalarInt): + def __new__(cls, value, width=None, underscore=None, anchor=None): + # type: (Any, Any, Any, Any) -> Any + return ScalarInt.__new__(cls, value, width=width, underscore=underscore, anchor=anchor) + + +class OctalInt(ScalarInt): + def __new__(cls, value, width=None, underscore=None, anchor=None): + # type: (Any, Any, Any, Any) -> Any + return ScalarInt.__new__(cls, value, width=width, underscore=underscore, anchor=anchor) + + +# mixed casing of A-F is not supported, when loading the first non digit +# determines the case + + +class HexInt(ScalarInt): + """uses lower case (a-f)""" + + def __new__(cls, value, width=None, underscore=None, anchor=None): + # type: (Any, Any, Any, Any) -> Any + return ScalarInt.__new__(cls, value, width=width, underscore=underscore, anchor=anchor) + + +class HexCapsInt(ScalarInt): + """uses upper case (A-F)""" + + def __new__(cls, value, width=None, underscore=None, anchor=None): + # type: (Any, Any, Any, Any) -> Any + return ScalarInt.__new__(cls, value, width=width, underscore=underscore, anchor=anchor) + + +class DecimalInt(ScalarInt): + """needed if anchor""" + + def __new__(cls, value, width=None, underscore=None, anchor=None): + # type: (Any, Any, Any, Any) -> Any + return ScalarInt.__new__(cls, value, width=width, underscore=underscore, anchor=anchor) diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/scalarstring.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/scalarstring.py new file mode 100644 index 0000000000..33ddf5e55e --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/scalarstring.py @@ -0,0 +1,156 @@ +# coding: utf-8 + +from __future__ import print_function, absolute_import, division, unicode_literals + +from ...ruamel.yaml.compat import text_type +from ...ruamel.yaml.anchor import Anchor + +if False: # MYPY + from typing import Text, Any, Dict, List # NOQA + +__all__ = [ + 'ScalarString', + 'LiteralScalarString', + 'FoldedScalarString', + 'SingleQuotedScalarString', + 'DoubleQuotedScalarString', + 'PlainScalarString', + # PreservedScalarString is the old name, as it was the first to be preserved on rt, + # use LiteralScalarString instead + 'PreservedScalarString', +] + + +class ScalarString(text_type): + __slots__ = Anchor.attrib + + def __new__(cls, *args, **kw): + # type: (Any, Any) -> Any + anchor = kw.pop('anchor', None) # type: ignore + ret_val = text_type.__new__(cls, *args, **kw) # type: ignore + if anchor is not None: + ret_val.yaml_set_anchor(anchor, always_dump=True) + return ret_val + + def replace(self, old, new, maxreplace=-1): + # type: (Any, Any, int) -> Any + return type(self)((text_type.replace(self, old, new, maxreplace))) + + @property + def anchor(self): + # type: () -> Any + if not hasattr(self, Anchor.attrib): + setattr(self, Anchor.attrib, Anchor()) + return getattr(self, Anchor.attrib) + + def yaml_anchor(self, any=False): + # type: (bool) -> Any + if not hasattr(self, Anchor.attrib): + return None + if any or self.anchor.always_dump: + return self.anchor + return None + + def yaml_set_anchor(self, value, always_dump=False): + # type: (Any, bool) -> None + self.anchor.value = value + self.anchor.always_dump = always_dump + + +class LiteralScalarString(ScalarString): + __slots__ = 'comment' # the comment after the | on the first line + + style = '|' + + def __new__(cls, value, anchor=None): + # type: (Text, Any) -> Any + return ScalarString.__new__(cls, value, anchor=anchor) + + +PreservedScalarString = LiteralScalarString + + +class FoldedScalarString(ScalarString): + __slots__ = ('fold_pos', 'comment') # the comment after the > on the first line + + style = '>' + + def __new__(cls, value, anchor=None): + # type: (Text, Any) -> Any + return ScalarString.__new__(cls, value, anchor=anchor) + + +class SingleQuotedScalarString(ScalarString): + __slots__ = () + + style = "'" + + def __new__(cls, value, anchor=None): + # type: (Text, Any) -> Any + return ScalarString.__new__(cls, value, anchor=anchor) + + +class DoubleQuotedScalarString(ScalarString): + __slots__ = () + + style = '"' + + def __new__(cls, value, anchor=None): + # type: (Text, Any) -> Any + return ScalarString.__new__(cls, value, anchor=anchor) + + +class PlainScalarString(ScalarString): + __slots__ = () + + style = '' + + def __new__(cls, value, anchor=None): + # type: (Text, Any) -> Any + return ScalarString.__new__(cls, value, anchor=anchor) + + +def preserve_literal(s): + # type: (Text) -> Text + return LiteralScalarString(s.replace('\r\n', '\n').replace('\r', '\n')) + + +def walk_tree(base, map=None): + # type: (Any, Any) -> None + """ + the routine here walks over a simple yaml tree (recursing in + dict values and list items) and converts strings that + have multiple lines to literal scalars + + You can also provide an explicit (ordered) mapping for multiple transforms + (first of which is executed): + map = ruamel.yaml.compat.ordereddict + map['\n'] = preserve_literal + map[':'] = SingleQuotedScalarString + walk_tree(data, map=map) + """ + from ...ruamel.yaml.compat import string_types + from ...ruamel.yaml.compat import MutableMapping, MutableSequence # type: ignore + + if map is None: + map = {'\n': preserve_literal} + + if isinstance(base, MutableMapping): + for k in base: + v = base[k] # type: Text + if isinstance(v, string_types): + for ch in map: + if ch in v: + base[k] = map[ch](v) + break + else: + walk_tree(v, map=map) + elif isinstance(base, MutableSequence): + for idx, elem in enumerate(base): + if isinstance(elem, string_types): + for ch in map: + if ch in elem: # type: ignore + base[idx] = map[ch](elem) + break + else: + walk_tree(elem, map=map) diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/scanner.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/scanner.py new file mode 100644 index 0000000000..084bca4e02 --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/scanner.py @@ -0,0 +1,1980 @@ +# coding: utf-8 + +from __future__ import print_function, absolute_import, division, unicode_literals + +# Scanner produces tokens of the following types: +# STREAM-START +# STREAM-END +# DIRECTIVE(name, value) +# DOCUMENT-START +# DOCUMENT-END +# BLOCK-SEQUENCE-START +# BLOCK-MAPPING-START +# BLOCK-END +# FLOW-SEQUENCE-START +# FLOW-MAPPING-START +# FLOW-SEQUENCE-END +# FLOW-MAPPING-END +# BLOCK-ENTRY +# FLOW-ENTRY +# KEY +# VALUE +# ALIAS(value) +# ANCHOR(value) +# TAG(value) +# SCALAR(value, plain, style) +# +# RoundTripScanner +# COMMENT(value) +# +# Read comments in the Scanner code for more details. +# + +from ...ruamel.yaml.error import MarkedYAMLError +from ...ruamel.yaml.tokens import * # NOQA +from ...ruamel.yaml.compat import utf8, unichr, PY3, check_anchorname_char, nprint # NOQA + +if False: # MYPY + from typing import Any, Dict, Optional, List, Union, Text # NOQA + from ...ruamel.yaml.compat import VersionType # NOQA + +__all__ = ['Scanner', 'RoundTripScanner', 'ScannerError'] + + +_THE_END = '\n\0\r\x85\u2028\u2029' +_THE_END_SPACE_TAB = ' \n\0\t\r\x85\u2028\u2029' +_SPACE_TAB = ' \t' + + +class ScannerError(MarkedYAMLError): + pass + + +class SimpleKey(object): + # See below simple keys treatment. + + def __init__(self, token_number, required, index, line, column, mark): + # type: (Any, Any, int, int, int, Any) -> None + self.token_number = token_number + self.required = required + self.index = index + self.line = line + self.column = column + self.mark = mark + + +class Scanner(object): + def __init__(self, loader=None): + # type: (Any) -> None + """Initialize the scanner.""" + # It is assumed that Scanner and Reader will have a common descendant. + # Reader do the dirty work of checking for BOM and converting the + # input data to Unicode. It also adds NUL to the end. + # + # Reader supports the following methods + # self.peek(i=0) # peek the next i-th character + # self.prefix(l=1) # peek the next l characters + # self.forward(l=1) # read the next l characters and move the pointer + + self.loader = loader + if self.loader is not None and getattr(self.loader, '_scanner', None) is None: + self.loader._scanner = self + self.reset_scanner() + self.first_time = False + self.yaml_version = None # type: Any + + @property + def flow_level(self): + # type: () -> int + return len(self.flow_context) + + def reset_scanner(self): + # type: () -> None + # Had we reached the end of the stream? + self.done = False + + # flow_context is an expanding/shrinking list consisting of '{' and '[' + # for each unclosed flow context. If empty list that means block context + self.flow_context = [] # type: List[Text] + + # List of processed tokens that are not yet emitted. + self.tokens = [] # type: List[Any] + + # Add the STREAM-START token. + self.fetch_stream_start() + + # Number of tokens that were emitted through the `get_token` method. + self.tokens_taken = 0 + + # The current indentation level. + self.indent = -1 + + # Past indentation levels. + self.indents = [] # type: List[int] + + # Variables related to simple keys treatment. + + # A simple key is a key that is not denoted by the '?' indicator. + # Example of simple keys: + # --- + # block simple key: value + # ? not a simple key: + # : { flow simple key: value } + # We emit the KEY token before all keys, so when we find a potential + # simple key, we try to locate the corresponding ':' indicator. + # Simple keys should be limited to a single line and 1024 characters. + + # Can a simple key start at the current position? A simple key may + # start: + # - at the beginning of the line, not counting indentation spaces + # (in block context), + # - after '{', '[', ',' (in the flow context), + # - after '?', ':', '-' (in the block context). + # In the block context, this flag also signifies if a block collection + # may start at the current position. + self.allow_simple_key = True + + # Keep track of possible simple keys. This is a dictionary. The key + # is `flow_level`; there can be no more that one possible simple key + # for each level. The value is a SimpleKey record: + # (token_number, required, index, line, column, mark) + # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow), + # '[', or '{' tokens. + self.possible_simple_keys = {} # type: Dict[Any, Any] + + @property + def reader(self): + # type: () -> Any + try: + return self._scanner_reader # type: ignore + except AttributeError: + if hasattr(self.loader, 'typ'): + self._scanner_reader = self.loader.reader + else: + self._scanner_reader = self.loader._reader + return self._scanner_reader + + @property + def scanner_processing_version(self): # prefix until un-composited + # type: () -> Any + if hasattr(self.loader, 'typ'): + return self.loader.resolver.processing_version + return self.loader.processing_version + + # Public methods. + + def check_token(self, *choices): + # type: (Any) -> bool + # Check if the next token is one of the given types. + while self.need_more_tokens(): + self.fetch_more_tokens() + if bool(self.tokens): + if not choices: + return True + for choice in choices: + if isinstance(self.tokens[0], choice): + return True + return False + + def peek_token(self): + # type: () -> Any + # Return the next token, but do not delete if from the queue. + while self.need_more_tokens(): + self.fetch_more_tokens() + if bool(self.tokens): + return self.tokens[0] + + def get_token(self): + # type: () -> Any + # Return the next token. + while self.need_more_tokens(): + self.fetch_more_tokens() + if bool(self.tokens): + self.tokens_taken += 1 + return self.tokens.pop(0) + + # Private methods. + + def need_more_tokens(self): + # type: () -> bool + if self.done: + return False + if not self.tokens: + return True + # The current token may be a potential simple key, so we + # need to look further. + self.stale_possible_simple_keys() + if self.next_possible_simple_key() == self.tokens_taken: + return True + return False + + def fetch_comment(self, comment): + # type: (Any) -> None + raise NotImplementedError + + def fetch_more_tokens(self): + # type: () -> Any + # Eat whitespaces and comments until we reach the next token. + comment = self.scan_to_next_token() + if comment is not None: # never happens for base scanner + return self.fetch_comment(comment) + # Remove obsolete possible simple keys. + self.stale_possible_simple_keys() + + # Compare the current indentation and column. It may add some tokens + # and decrease the current indentation level. + self.unwind_indent(self.reader.column) + + # Peek the next character. + ch = self.reader.peek() + + # Is it the end of stream? + if ch == '\0': + return self.fetch_stream_end() + + # Is it a directive? + if ch == '%' and self.check_directive(): + return self.fetch_directive() + + # Is it the document start? + if ch == '-' and self.check_document_start(): + return self.fetch_document_start() + + # Is it the document end? + if ch == '.' and self.check_document_end(): + return self.fetch_document_end() + + # TODO: support for BOM within a stream. + # if ch == u'\uFEFF': + # return self.fetch_bom() <-- issue BOMToken + + # Note: the order of the following checks is NOT significant. + + # Is it the flow sequence start indicator? + if ch == '[': + return self.fetch_flow_sequence_start() + + # Is it the flow mapping start indicator? + if ch == '{': + return self.fetch_flow_mapping_start() + + # Is it the flow sequence end indicator? + if ch == ']': + return self.fetch_flow_sequence_end() + + # Is it the flow mapping end indicator? + if ch == '}': + return self.fetch_flow_mapping_end() + + # Is it the flow entry indicator? + if ch == ',': + return self.fetch_flow_entry() + + # Is it the block entry indicator? + if ch == '-' and self.check_block_entry(): + return self.fetch_block_entry() + + # Is it the key indicator? + if ch == '?' and self.check_key(): + return self.fetch_key() + + # Is it the value indicator? + if ch == ':' and self.check_value(): + return self.fetch_value() + + # Is it an alias? + if ch == '*': + return self.fetch_alias() + + # Is it an anchor? + if ch == '&': + return self.fetch_anchor() + + # Is it a tag? + if ch == '!': + return self.fetch_tag() + + # Is it a literal scalar? + if ch == '|' and not self.flow_level: + return self.fetch_literal() + + # Is it a folded scalar? + if ch == '>' and not self.flow_level: + return self.fetch_folded() + + # Is it a single quoted scalar? + if ch == "'": + return self.fetch_single() + + # Is it a double quoted scalar? + if ch == '"': + return self.fetch_double() + + # It must be a plain scalar then. + if self.check_plain(): + return self.fetch_plain() + + # No? It's an error. Let's produce a nice error message. + raise ScannerError( + 'while scanning for the next token', + None, + 'found character %r that cannot start any token' % utf8(ch), + self.reader.get_mark(), + ) + + # Simple keys treatment. + + def next_possible_simple_key(self): + # type: () -> Any + # Return the number of the nearest possible simple key. Actually we + # don't need to loop through the whole dictionary. We may replace it + # with the following code: + # if not self.possible_simple_keys: + # return None + # return self.possible_simple_keys[ + # min(self.possible_simple_keys.keys())].token_number + min_token_number = None + for level in self.possible_simple_keys: + key = self.possible_simple_keys[level] + if min_token_number is None or key.token_number < min_token_number: + min_token_number = key.token_number + return min_token_number + + def stale_possible_simple_keys(self): + # type: () -> None + # Remove entries that are no longer possible simple keys. According to + # the YAML specification, simple keys + # - should be limited to a single line, + # - should be no longer than 1024 characters. + # Disabling this procedure will allow simple keys of any length and + # height (may cause problems if indentation is broken though). + for level in list(self.possible_simple_keys): + key = self.possible_simple_keys[level] + if key.line != self.reader.line or self.reader.index - key.index > 1024: + if key.required: + raise ScannerError( + 'while scanning a simple key', + key.mark, + "could not find expected ':'", + self.reader.get_mark(), + ) + del self.possible_simple_keys[level] + + def save_possible_simple_key(self): + # type: () -> None + # The next token may start a simple key. We check if it's possible + # and save its position. This function is called for + # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'. + + # Check if a simple key is required at the current position. + required = not self.flow_level and self.indent == self.reader.column + + # The next token might be a simple key. Let's save it's number and + # position. + if self.allow_simple_key: + self.remove_possible_simple_key() + token_number = self.tokens_taken + len(self.tokens) + key = SimpleKey( + token_number, + required, + self.reader.index, + self.reader.line, + self.reader.column, + self.reader.get_mark(), + ) + self.possible_simple_keys[self.flow_level] = key + + def remove_possible_simple_key(self): + # type: () -> None + # Remove the saved possible key position at the current flow level. + if self.flow_level in self.possible_simple_keys: + key = self.possible_simple_keys[self.flow_level] + + if key.required: + raise ScannerError( + 'while scanning a simple key', + key.mark, + "could not find expected ':'", + self.reader.get_mark(), + ) + + del self.possible_simple_keys[self.flow_level] + + # Indentation functions. + + def unwind_indent(self, column): + # type: (Any) -> None + # In flow context, tokens should respect indentation. + # Actually the condition should be `self.indent >= column` according to + # the spec. But this condition will prohibit intuitively correct + # constructions such as + # key : { + # } + # #### + # if self.flow_level and self.indent > column: + # raise ScannerError(None, None, + # "invalid intendation or unclosed '[' or '{'", + # self.reader.get_mark()) + + # In the flow context, indentation is ignored. We make the scanner less + # restrictive then specification requires. + if bool(self.flow_level): + return + + # In block context, we may need to issue the BLOCK-END tokens. + while self.indent > column: + mark = self.reader.get_mark() + self.indent = self.indents.pop() + self.tokens.append(BlockEndToken(mark, mark)) + + def add_indent(self, column): + # type: (int) -> bool + # Check if we need to increase indentation. + if self.indent < column: + self.indents.append(self.indent) + self.indent = column + return True + return False + + # Fetchers. + + def fetch_stream_start(self): + # type: () -> None + # We always add STREAM-START as the first token and STREAM-END as the + # last token. + # Read the token. + mark = self.reader.get_mark() + # Add STREAM-START. + self.tokens.append(StreamStartToken(mark, mark, encoding=self.reader.encoding)) + + def fetch_stream_end(self): + # type: () -> None + # Set the current intendation to -1. + self.unwind_indent(-1) + # Reset simple keys. + self.remove_possible_simple_key() + self.allow_simple_key = False + self.possible_simple_keys = {} + # Read the token. + mark = self.reader.get_mark() + # Add STREAM-END. + self.tokens.append(StreamEndToken(mark, mark)) + # The steam is finished. + self.done = True + + def fetch_directive(self): + # type: () -> None + # Set the current intendation to -1. + self.unwind_indent(-1) + + # Reset simple keys. + self.remove_possible_simple_key() + self.allow_simple_key = False + + # Scan and add DIRECTIVE. + self.tokens.append(self.scan_directive()) + + def fetch_document_start(self): + # type: () -> None + self.fetch_document_indicator(DocumentStartToken) + + def fetch_document_end(self): + # type: () -> None + self.fetch_document_indicator(DocumentEndToken) + + def fetch_document_indicator(self, TokenClass): + # type: (Any) -> None + # Set the current intendation to -1. + self.unwind_indent(-1) + + # Reset simple keys. Note that there could not be a block collection + # after '---'. + self.remove_possible_simple_key() + self.allow_simple_key = False + + # Add DOCUMENT-START or DOCUMENT-END. + start_mark = self.reader.get_mark() + self.reader.forward(3) + end_mark = self.reader.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_sequence_start(self): + # type: () -> None + self.fetch_flow_collection_start(FlowSequenceStartToken, to_push='[') + + def fetch_flow_mapping_start(self): + # type: () -> None + self.fetch_flow_collection_start(FlowMappingStartToken, to_push='{') + + def fetch_flow_collection_start(self, TokenClass, to_push): + # type: (Any, Text) -> None + # '[' and '{' may start a simple key. + self.save_possible_simple_key() + # Increase the flow level. + self.flow_context.append(to_push) + # Simple keys are allowed after '[' and '{'. + self.allow_simple_key = True + # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START. + start_mark = self.reader.get_mark() + self.reader.forward() + end_mark = self.reader.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_sequence_end(self): + # type: () -> None + self.fetch_flow_collection_end(FlowSequenceEndToken) + + def fetch_flow_mapping_end(self): + # type: () -> None + self.fetch_flow_collection_end(FlowMappingEndToken) + + def fetch_flow_collection_end(self, TokenClass): + # type: (Any) -> None + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + # Decrease the flow level. + try: + popped = self.flow_context.pop() # NOQA + except IndexError: + # We must not be in a list or object. + # Defer error handling to the parser. + pass + # No simple keys after ']' or '}'. + self.allow_simple_key = False + # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END. + start_mark = self.reader.get_mark() + self.reader.forward() + end_mark = self.reader.get_mark() + self.tokens.append(TokenClass(start_mark, end_mark)) + + def fetch_flow_entry(self): + # type: () -> None + # Simple keys are allowed after ','. + self.allow_simple_key = True + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + # Add FLOW-ENTRY. + start_mark = self.reader.get_mark() + self.reader.forward() + end_mark = self.reader.get_mark() + self.tokens.append(FlowEntryToken(start_mark, end_mark)) + + def fetch_block_entry(self): + # type: () -> None + # Block context needs additional checks. + if not self.flow_level: + # Are we allowed to start a new entry? + if not self.allow_simple_key: + raise ScannerError( + None, None, 'sequence entries are not allowed here', self.reader.get_mark() + ) + # We may need to add BLOCK-SEQUENCE-START. + if self.add_indent(self.reader.column): + mark = self.reader.get_mark() + self.tokens.append(BlockSequenceStartToken(mark, mark)) + # It's an error for the block entry to occur in the flow context, + # but we let the parser detect this. + else: + pass + # Simple keys are allowed after '-'. + self.allow_simple_key = True + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add BLOCK-ENTRY. + start_mark = self.reader.get_mark() + self.reader.forward() + end_mark = self.reader.get_mark() + self.tokens.append(BlockEntryToken(start_mark, end_mark)) + + def fetch_key(self): + # type: () -> None + # Block context needs additional checks. + if not self.flow_level: + + # Are we allowed to start a key (not nessesary a simple)? + if not self.allow_simple_key: + raise ScannerError( + None, None, 'mapping keys are not allowed here', self.reader.get_mark() + ) + + # We may need to add BLOCK-MAPPING-START. + if self.add_indent(self.reader.column): + mark = self.reader.get_mark() + self.tokens.append(BlockMappingStartToken(mark, mark)) + + # Simple keys are allowed after '?' in the block context. + self.allow_simple_key = not self.flow_level + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add KEY. + start_mark = self.reader.get_mark() + self.reader.forward() + end_mark = self.reader.get_mark() + self.tokens.append(KeyToken(start_mark, end_mark)) + + def fetch_value(self): + # type: () -> None + # Do we determine a simple key? + if self.flow_level in self.possible_simple_keys: + # Add KEY. + key = self.possible_simple_keys[self.flow_level] + del self.possible_simple_keys[self.flow_level] + self.tokens.insert( + key.token_number - self.tokens_taken, KeyToken(key.mark, key.mark) + ) + + # If this key starts a new block mapping, we need to add + # BLOCK-MAPPING-START. + if not self.flow_level: + if self.add_indent(key.column): + self.tokens.insert( + key.token_number - self.tokens_taken, + BlockMappingStartToken(key.mark, key.mark), + ) + + # There cannot be two simple keys one after another. + self.allow_simple_key = False + + # It must be a part of a complex key. + else: + + # Block context needs additional checks. + # (Do we really need them? They will be caught by the parser + # anyway.) + if not self.flow_level: + + # We are allowed to start a complex value if and only if + # we can start a simple key. + if not self.allow_simple_key: + raise ScannerError( + None, + None, + 'mapping values are not allowed here', + self.reader.get_mark(), + ) + + # If this value starts a new block mapping, we need to add + # BLOCK-MAPPING-START. It will be detected as an error later by + # the parser. + if not self.flow_level: + if self.add_indent(self.reader.column): + mark = self.reader.get_mark() + self.tokens.append(BlockMappingStartToken(mark, mark)) + + # Simple keys are allowed after ':' in the block context. + self.allow_simple_key = not self.flow_level + + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + + # Add VALUE. + start_mark = self.reader.get_mark() + self.reader.forward() + end_mark = self.reader.get_mark() + self.tokens.append(ValueToken(start_mark, end_mark)) + + def fetch_alias(self): + # type: () -> None + # ALIAS could be a simple key. + self.save_possible_simple_key() + # No simple keys after ALIAS. + self.allow_simple_key = False + # Scan and add ALIAS. + self.tokens.append(self.scan_anchor(AliasToken)) + + def fetch_anchor(self): + # type: () -> None + # ANCHOR could start a simple key. + self.save_possible_simple_key() + # No simple keys after ANCHOR. + self.allow_simple_key = False + # Scan and add ANCHOR. + self.tokens.append(self.scan_anchor(AnchorToken)) + + def fetch_tag(self): + # type: () -> None + # TAG could start a simple key. + self.save_possible_simple_key() + # No simple keys after TAG. + self.allow_simple_key = False + # Scan and add TAG. + self.tokens.append(self.scan_tag()) + + def fetch_literal(self): + # type: () -> None + self.fetch_block_scalar(style='|') + + def fetch_folded(self): + # type: () -> None + self.fetch_block_scalar(style='>') + + def fetch_block_scalar(self, style): + # type: (Any) -> None + # A simple key may follow a block scalar. + self.allow_simple_key = True + # Reset possible simple key on the current level. + self.remove_possible_simple_key() + # Scan and add SCALAR. + self.tokens.append(self.scan_block_scalar(style)) + + def fetch_single(self): + # type: () -> None + self.fetch_flow_scalar(style="'") + + def fetch_double(self): + # type: () -> None + self.fetch_flow_scalar(style='"') + + def fetch_flow_scalar(self, style): + # type: (Any) -> None + # A flow scalar could be a simple key. + self.save_possible_simple_key() + # No simple keys after flow scalars. + self.allow_simple_key = False + # Scan and add SCALAR. + self.tokens.append(self.scan_flow_scalar(style)) + + def fetch_plain(self): + # type: () -> None + # A plain scalar could be a simple key. + self.save_possible_simple_key() + # No simple keys after plain scalars. But note that `scan_plain` will + # change this flag if the scan is finished at the beginning of the + # line. + self.allow_simple_key = False + # Scan and add SCALAR. May change `allow_simple_key`. + self.tokens.append(self.scan_plain()) + + # Checkers. + + def check_directive(self): + # type: () -> Any + # DIRECTIVE: ^ '%' ... + # The '%' indicator is already checked. + if self.reader.column == 0: + return True + return None + + def check_document_start(self): + # type: () -> Any + # DOCUMENT-START: ^ '---' (' '|'\n') + if self.reader.column == 0: + if self.reader.prefix(3) == '---' and self.reader.peek(3) in _THE_END_SPACE_TAB: + return True + return None + + def check_document_end(self): + # type: () -> Any + # DOCUMENT-END: ^ '...' (' '|'\n') + if self.reader.column == 0: + if self.reader.prefix(3) == '...' and self.reader.peek(3) in _THE_END_SPACE_TAB: + return True + return None + + def check_block_entry(self): + # type: () -> Any + # BLOCK-ENTRY: '-' (' '|'\n') + return self.reader.peek(1) in _THE_END_SPACE_TAB + + def check_key(self): + # type: () -> Any + # KEY(flow context): '?' + if bool(self.flow_level): + return True + # KEY(block context): '?' (' '|'\n') + return self.reader.peek(1) in _THE_END_SPACE_TAB + + def check_value(self): + # type: () -> Any + # VALUE(flow context): ':' + if self.scanner_processing_version == (1, 1): + if bool(self.flow_level): + return True + else: + if bool(self.flow_level): + if self.flow_context[-1] == '[': + if self.reader.peek(1) not in _THE_END_SPACE_TAB: + return False + elif self.tokens and isinstance(self.tokens[-1], ValueToken): + # mapping flow context scanning a value token + if self.reader.peek(1) not in _THE_END_SPACE_TAB: + return False + return True + # VALUE(block context): ':' (' '|'\n') + return self.reader.peek(1) in _THE_END_SPACE_TAB + + def check_plain(self): + # type: () -> Any + # A plain scalar may start with any non-space character except: + # '-', '?', ':', ',', '[', ']', '{', '}', + # '#', '&', '*', '!', '|', '>', '\'', '\"', + # '%', '@', '`'. + # + # It may also start with + # '-', '?', ':' + # if it is followed by a non-space character. + # + # Note that we limit the last rule to the block context (except the + # '-' character) because we want the flow context to be space + # independent. + srp = self.reader.peek + ch = srp() + if self.scanner_processing_version == (1, 1): + return ch not in '\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'"%@`' or ( + srp(1) not in _THE_END_SPACE_TAB + and (ch == '-' or (not self.flow_level and ch in '?:')) + ) + # YAML 1.2 + if ch not in '\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'"%@`': + # ################### ^ ??? + return True + ch1 = srp(1) + if ch == '-' and ch1 not in _THE_END_SPACE_TAB: + return True + if ch == ':' and bool(self.flow_level) and ch1 not in _SPACE_TAB: + return True + + return srp(1) not in _THE_END_SPACE_TAB and ( + ch == '-' or (not self.flow_level and ch in '?:') + ) + + # Scanners. + + def scan_to_next_token(self): + # type: () -> Any + # We ignore spaces, line breaks and comments. + # If we find a line break in the block context, we set the flag + # `allow_simple_key` on. + # The byte order mark is stripped if it's the first character in the + # stream. We do not yet support BOM inside the stream as the + # specification requires. Any such mark will be considered as a part + # of the document. + # + # TODO: We need to make tab handling rules more sane. A good rule is + # Tabs cannot precede tokens + # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END, + # KEY(block), VALUE(block), BLOCK-ENTRY + # So the checking code is + # if : + # self.allow_simple_keys = False + # We also need to add the check for `allow_simple_keys == True` to + # `unwind_indent` before issuing BLOCK-END. + # Scanners for block, flow, and plain scalars need to be modified. + srp = self.reader.peek + srf = self.reader.forward + if self.reader.index == 0 and srp() == '\uFEFF': + srf() + found = False + _the_end = _THE_END + while not found: + while srp() == ' ': + srf() + if srp() == '#': + while srp() not in _the_end: + srf() + if self.scan_line_break(): + if not self.flow_level: + self.allow_simple_key = True + else: + found = True + return None + + def scan_directive(self): + # type: () -> Any + # See the specification for details. + srp = self.reader.peek + srf = self.reader.forward + start_mark = self.reader.get_mark() + srf() + name = self.scan_directive_name(start_mark) + value = None + if name == 'YAML': + value = self.scan_yaml_directive_value(start_mark) + end_mark = self.reader.get_mark() + elif name == 'TAG': + value = self.scan_tag_directive_value(start_mark) + end_mark = self.reader.get_mark() + else: + end_mark = self.reader.get_mark() + while srp() not in _THE_END: + srf() + self.scan_directive_ignored_line(start_mark) + return DirectiveToken(name, value, start_mark, end_mark) + + def scan_directive_name(self, start_mark): + # type: (Any) -> Any + # See the specification for details. + length = 0 + srp = self.reader.peek + ch = srp(length) + while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' or ch in '-_:.': + length += 1 + ch = srp(length) + if not length: + raise ScannerError( + 'while scanning a directive', + start_mark, + 'expected alphabetic or numeric character, but found %r' % utf8(ch), + self.reader.get_mark(), + ) + value = self.reader.prefix(length) + self.reader.forward(length) + ch = srp() + if ch not in '\0 \r\n\x85\u2028\u2029': + raise ScannerError( + 'while scanning a directive', + start_mark, + 'expected alphabetic or numeric character, but found %r' % utf8(ch), + self.reader.get_mark(), + ) + return value + + def scan_yaml_directive_value(self, start_mark): + # type: (Any) -> Any + # See the specification for details. + srp = self.reader.peek + srf = self.reader.forward + while srp() == ' ': + srf() + major = self.scan_yaml_directive_number(start_mark) + if srp() != '.': + raise ScannerError( + 'while scanning a directive', + start_mark, + "expected a digit or '.', but found %r" % utf8(srp()), + self.reader.get_mark(), + ) + srf() + minor = self.scan_yaml_directive_number(start_mark) + if srp() not in '\0 \r\n\x85\u2028\u2029': + raise ScannerError( + 'while scanning a directive', + start_mark, + "expected a digit or ' ', but found %r" % utf8(srp()), + self.reader.get_mark(), + ) + self.yaml_version = (major, minor) + return self.yaml_version + + def scan_yaml_directive_number(self, start_mark): + # type: (Any) -> Any + # See the specification for details. + srp = self.reader.peek + srf = self.reader.forward + ch = srp() + if not ('0' <= ch <= '9'): + raise ScannerError( + 'while scanning a directive', + start_mark, + 'expected a digit, but found %r' % utf8(ch), + self.reader.get_mark(), + ) + length = 0 + while '0' <= srp(length) <= '9': + length += 1 + value = int(self.reader.prefix(length)) + srf(length) + return value + + def scan_tag_directive_value(self, start_mark): + # type: (Any) -> Any + # See the specification for details. + srp = self.reader.peek + srf = self.reader.forward + while srp() == ' ': + srf() + handle = self.scan_tag_directive_handle(start_mark) + while srp() == ' ': + srf() + prefix = self.scan_tag_directive_prefix(start_mark) + return (handle, prefix) + + def scan_tag_directive_handle(self, start_mark): + # type: (Any) -> Any + # See the specification for details. + value = self.scan_tag_handle('directive', start_mark) + ch = self.reader.peek() + if ch != ' ': + raise ScannerError( + 'while scanning a directive', + start_mark, + "expected ' ', but found %r" % utf8(ch), + self.reader.get_mark(), + ) + return value + + def scan_tag_directive_prefix(self, start_mark): + # type: (Any) -> Any + # See the specification for details. + value = self.scan_tag_uri('directive', start_mark) + ch = self.reader.peek() + if ch not in '\0 \r\n\x85\u2028\u2029': + raise ScannerError( + 'while scanning a directive', + start_mark, + "expected ' ', but found %r" % utf8(ch), + self.reader.get_mark(), + ) + return value + + def scan_directive_ignored_line(self, start_mark): + # type: (Any) -> None + # See the specification for details. + srp = self.reader.peek + srf = self.reader.forward + while srp() == ' ': + srf() + if srp() == '#': + while srp() not in _THE_END: + srf() + ch = srp() + if ch not in _THE_END: + raise ScannerError( + 'while scanning a directive', + start_mark, + 'expected a comment or a line break, but found %r' % utf8(ch), + self.reader.get_mark(), + ) + self.scan_line_break() + + def scan_anchor(self, TokenClass): + # type: (Any) -> Any + # The specification does not restrict characters for anchors and + # aliases. This may lead to problems, for instance, the document: + # [ *alias, value ] + # can be interpteted in two ways, as + # [ "value" ] + # and + # [ *alias , "value" ] + # Therefore we restrict aliases to numbers and ASCII letters. + srp = self.reader.peek + start_mark = self.reader.get_mark() + indicator = srp() + if indicator == '*': + name = 'alias' + else: + name = 'anchor' + self.reader.forward() + length = 0 + ch = srp(length) + # while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \ + # or ch in u'-_': + while check_anchorname_char(ch): + length += 1 + ch = srp(length) + if not length: + raise ScannerError( + 'while scanning an %s' % (name,), + start_mark, + 'expected alphabetic or numeric character, but found %r' % utf8(ch), + self.reader.get_mark(), + ) + value = self.reader.prefix(length) + self.reader.forward(length) + # ch1 = ch + # ch = srp() # no need to peek, ch is already set + # assert ch1 == ch + if ch not in '\0 \t\r\n\x85\u2028\u2029?:,[]{}%@`': + raise ScannerError( + 'while scanning an %s' % (name,), + start_mark, + 'expected alphabetic or numeric character, but found %r' % utf8(ch), + self.reader.get_mark(), + ) + end_mark = self.reader.get_mark() + return TokenClass(value, start_mark, end_mark) + + def scan_tag(self): + # type: () -> Any + # See the specification for details. + srp = self.reader.peek + start_mark = self.reader.get_mark() + ch = srp(1) + if ch == '<': + handle = None + self.reader.forward(2) + suffix = self.scan_tag_uri('tag', start_mark) + if srp() != '>': + raise ScannerError( + 'while parsing a tag', + start_mark, + "expected '>', but found %r" % utf8(srp()), + self.reader.get_mark(), + ) + self.reader.forward() + elif ch in _THE_END_SPACE_TAB: + handle = None + suffix = '!' + self.reader.forward() + else: + length = 1 + use_handle = False + while ch not in '\0 \r\n\x85\u2028\u2029': + if ch == '!': + use_handle = True + break + length += 1 + ch = srp(length) + handle = '!' + if use_handle: + handle = self.scan_tag_handle('tag', start_mark) + else: + handle = '!' + self.reader.forward() + suffix = self.scan_tag_uri('tag', start_mark) + ch = srp() + if ch not in '\0 \r\n\x85\u2028\u2029': + raise ScannerError( + 'while scanning a tag', + start_mark, + "expected ' ', but found %r" % utf8(ch), + self.reader.get_mark(), + ) + value = (handle, suffix) + end_mark = self.reader.get_mark() + return TagToken(value, start_mark, end_mark) + + def scan_block_scalar(self, style, rt=False): + # type: (Any, Optional[bool]) -> Any + # See the specification for details. + srp = self.reader.peek + if style == '>': + folded = True + else: + folded = False + + chunks = [] # type: List[Any] + start_mark = self.reader.get_mark() + + # Scan the header. + self.reader.forward() + chomping, increment = self.scan_block_scalar_indicators(start_mark) + # block scalar comment e.g. : |+ # comment text + block_scalar_comment = self.scan_block_scalar_ignored_line(start_mark) + + # Determine the indentation level and go to the first non-empty line. + min_indent = self.indent + 1 + if increment is None: + # no increment and top level, min_indent could be 0 + if min_indent < 1 and ( + style not in '|>' + or (self.scanner_processing_version == (1, 1)) + and getattr( + self.loader, 'top_level_block_style_scalar_no_indent_error_1_1', False + ) + ): + min_indent = 1 + breaks, max_indent, end_mark = self.scan_block_scalar_indentation() + indent = max(min_indent, max_indent) + else: + if min_indent < 1: + min_indent = 1 + indent = min_indent + increment - 1 + breaks, end_mark = self.scan_block_scalar_breaks(indent) + line_break = "" + + # Scan the inner part of the block scalar. + while self.reader.column == indent and srp() != '\0': + chunks.extend(breaks) + leading_non_space = srp() not in ' \t' + length = 0 + while srp(length) not in _THE_END: + length += 1 + chunks.append(self.reader.prefix(length)) + self.reader.forward(length) + line_break = self.scan_line_break() + breaks, end_mark = self.scan_block_scalar_breaks(indent) + if style in '|>' and min_indent == 0: + # at the beginning of a line, if in block style see if + # end of document/start_new_document + if self.check_document_start() or self.check_document_end(): + break + if self.reader.column == indent and srp() != '\0': + + # Unfortunately, folding rules are ambiguous. + # + # This is the folding according to the specification: + + if rt and folded and line_break == '\n': + chunks.append('\a') + if folded and line_break == '\n' and leading_non_space and srp() not in ' \t': + if not breaks: + chunks.append(' ') + else: + chunks.append(line_break) + + # This is Clark Evans's interpretation (also in the spec + # examples): + # + # if folded and line_break == u'\n': + # if not breaks: + # if srp() not in ' \t': + # chunks.append(u' ') + # else: + # chunks.append(line_break) + # else: + # chunks.append(line_break) + else: + break + + # Process trailing line breaks. The 'chomping' setting determines + # whether they are included in the value. + trailing = [] # type: List[Any] + if chomping in [None, True]: + chunks.append(line_break) + if chomping is True: + chunks.extend(breaks) + elif chomping in [None, False]: + trailing.extend(breaks) + + # We are done. + token = ScalarToken("".join(chunks), False, start_mark, end_mark, style) + if block_scalar_comment is not None: + token.add_pre_comments([block_scalar_comment]) + if len(trailing) > 0: + # nprint('trailing 1', trailing) # XXXXX + # Eat whitespaces and comments until we reach the next token. + comment = self.scan_to_next_token() + while comment: + trailing.append(' ' * comment[1].column + comment[0]) + comment = self.scan_to_next_token() + + # Keep track of the trailing whitespace and following comments + # as a comment token, if isn't all included in the actual value. + comment_end_mark = self.reader.get_mark() + comment = CommentToken("".join(trailing), end_mark, comment_end_mark) + token.add_post_comment(comment) + return token + + def scan_block_scalar_indicators(self, start_mark): + # type: (Any) -> Any + # See the specification for details. + srp = self.reader.peek + chomping = None + increment = None + ch = srp() + if ch in '+-': + if ch == '+': + chomping = True + else: + chomping = False + self.reader.forward() + ch = srp() + if ch in '0123456789': + increment = int(ch) + if increment == 0: + raise ScannerError( + 'while scanning a block scalar', + start_mark, + 'expected indentation indicator in the range 1-9, ' 'but found 0', + self.reader.get_mark(), + ) + self.reader.forward() + elif ch in '0123456789': + increment = int(ch) + if increment == 0: + raise ScannerError( + 'while scanning a block scalar', + start_mark, + 'expected indentation indicator in the range 1-9, ' 'but found 0', + self.reader.get_mark(), + ) + self.reader.forward() + ch = srp() + if ch in '+-': + if ch == '+': + chomping = True + else: + chomping = False + self.reader.forward() + ch = srp() + if ch not in '\0 \r\n\x85\u2028\u2029': + raise ScannerError( + 'while scanning a block scalar', + start_mark, + 'expected chomping or indentation indicators, but found %r' % utf8(ch), + self.reader.get_mark(), + ) + return chomping, increment + + def scan_block_scalar_ignored_line(self, start_mark): + # type: (Any) -> Any + # See the specification for details. + srp = self.reader.peek + srf = self.reader.forward + prefix = '' + comment = None + while srp() == ' ': + prefix += srp() + srf() + if srp() == '#': + comment = prefix + while srp() not in _THE_END: + comment += srp() + srf() + ch = srp() + if ch not in _THE_END: + raise ScannerError( + 'while scanning a block scalar', + start_mark, + 'expected a comment or a line break, but found %r' % utf8(ch), + self.reader.get_mark(), + ) + self.scan_line_break() + return comment + + def scan_block_scalar_indentation(self): + # type: () -> Any + # See the specification for details. + srp = self.reader.peek + srf = self.reader.forward + chunks = [] + max_indent = 0 + end_mark = self.reader.get_mark() + while srp() in ' \r\n\x85\u2028\u2029': + if srp() != ' ': + chunks.append(self.scan_line_break()) + end_mark = self.reader.get_mark() + else: + srf() + if self.reader.column > max_indent: + max_indent = self.reader.column + return chunks, max_indent, end_mark + + def scan_block_scalar_breaks(self, indent): + # type: (int) -> Any + # See the specification for details. + chunks = [] + srp = self.reader.peek + srf = self.reader.forward + end_mark = self.reader.get_mark() + while self.reader.column < indent and srp() == ' ': + srf() + while srp() in '\r\n\x85\u2028\u2029': + chunks.append(self.scan_line_break()) + end_mark = self.reader.get_mark() + while self.reader.column < indent and srp() == ' ': + srf() + return chunks, end_mark + + def scan_flow_scalar(self, style): + # type: (Any) -> Any + # See the specification for details. + # Note that we loose indentation rules for quoted scalars. Quoted + # scalars don't need to adhere indentation because " and ' clearly + # mark the beginning and the end of them. Therefore we are less + # restrictive then the specification requires. We only need to check + # that document separators are not included in scalars. + if style == '"': + double = True + else: + double = False + srp = self.reader.peek + chunks = [] # type: List[Any] + start_mark = self.reader.get_mark() + quote = srp() + self.reader.forward() + chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark)) + while srp() != quote: + chunks.extend(self.scan_flow_scalar_spaces(double, start_mark)) + chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark)) + self.reader.forward() + end_mark = self.reader.get_mark() + return ScalarToken("".join(chunks), False, start_mark, end_mark, style) + + ESCAPE_REPLACEMENTS = { + '0': '\0', + 'a': '\x07', + 'b': '\x08', + 't': '\x09', + '\t': '\x09', + 'n': '\x0A', + 'v': '\x0B', + 'f': '\x0C', + 'r': '\x0D', + 'e': '\x1B', + ' ': '\x20', + '"': '"', + '/': '/', # as per http://www.json.org/ + '\\': '\\', + 'N': '\x85', + '_': '\xA0', + 'L': '\u2028', + 'P': '\u2029', + } + + ESCAPE_CODES = {'x': 2, 'u': 4, 'U': 8} + + def scan_flow_scalar_non_spaces(self, double, start_mark): + # type: (Any, Any) -> Any + # See the specification for details. + chunks = [] # type: List[Any] + srp = self.reader.peek + srf = self.reader.forward + while True: + length = 0 + while srp(length) not in ' \n\'"\\\0\t\r\x85\u2028\u2029': + length += 1 + if length != 0: + chunks.append(self.reader.prefix(length)) + srf(length) + ch = srp() + if not double and ch == "'" and srp(1) == "'": + chunks.append("'") + srf(2) + elif (double and ch == "'") or (not double and ch in '"\\'): + chunks.append(ch) + srf() + elif double and ch == '\\': + srf() + ch = srp() + if ch in self.ESCAPE_REPLACEMENTS: + chunks.append(self.ESCAPE_REPLACEMENTS[ch]) + srf() + elif ch in self.ESCAPE_CODES: + length = self.ESCAPE_CODES[ch] + srf() + for k in range(length): + if srp(k) not in '0123456789ABCDEFabcdef': + raise ScannerError( + 'while scanning a double-quoted scalar', + start_mark, + 'expected escape sequence of %d hexdecimal ' + 'numbers, but found %r' % (length, utf8(srp(k))), + self.reader.get_mark(), + ) + code = int(self.reader.prefix(length), 16) + chunks.append(unichr(code)) + srf(length) + elif ch in '\n\r\x85\u2028\u2029': + self.scan_line_break() + chunks.extend(self.scan_flow_scalar_breaks(double, start_mark)) + else: + raise ScannerError( + 'while scanning a double-quoted scalar', + start_mark, + 'found unknown escape character %r' % utf8(ch), + self.reader.get_mark(), + ) + else: + return chunks + + def scan_flow_scalar_spaces(self, double, start_mark): + # type: (Any, Any) -> Any + # See the specification for details. + srp = self.reader.peek + chunks = [] + length = 0 + while srp(length) in ' \t': + length += 1 + whitespaces = self.reader.prefix(length) + self.reader.forward(length) + ch = srp() + if ch == '\0': + raise ScannerError( + 'while scanning a quoted scalar', + start_mark, + 'found unexpected end of stream', + self.reader.get_mark(), + ) + elif ch in '\r\n\x85\u2028\u2029': + line_break = self.scan_line_break() + breaks = self.scan_flow_scalar_breaks(double, start_mark) + if line_break != '\n': + chunks.append(line_break) + elif not breaks: + chunks.append(' ') + chunks.extend(breaks) + else: + chunks.append(whitespaces) + return chunks + + def scan_flow_scalar_breaks(self, double, start_mark): + # type: (Any, Any) -> Any + # See the specification for details. + chunks = [] # type: List[Any] + srp = self.reader.peek + srf = self.reader.forward + while True: + # Instead of checking indentation, we check for document + # separators. + prefix = self.reader.prefix(3) + if (prefix == '---' or prefix == '...') and srp(3) in _THE_END_SPACE_TAB: + raise ScannerError( + 'while scanning a quoted scalar', + start_mark, + 'found unexpected document separator', + self.reader.get_mark(), + ) + while srp() in ' \t': + srf() + if srp() in '\r\n\x85\u2028\u2029': + chunks.append(self.scan_line_break()) + else: + return chunks + + def scan_plain(self): + # type: () -> Any + # See the specification for details. + # We add an additional restriction for the flow context: + # plain scalars in the flow context cannot contain ',', ': ' and '?'. + # We also keep track of the `allow_simple_key` flag here. + # Indentation rules are loosed for the flow context. + srp = self.reader.peek + srf = self.reader.forward + chunks = [] # type: List[Any] + start_mark = self.reader.get_mark() + end_mark = start_mark + indent = self.indent + 1 + # We allow zero indentation for scalars, but then we need to check for + # document separators at the beginning of the line. + # if indent == 0: + # indent = 1 + spaces = [] # type: List[Any] + while True: + length = 0 + if srp() == '#': + break + while True: + ch = srp(length) + if ch == ':' and srp(length + 1) not in _THE_END_SPACE_TAB: + pass + elif ch == '?' and self.scanner_processing_version != (1, 1): + pass + elif ( + ch in _THE_END_SPACE_TAB + or ( + not self.flow_level + and ch == ':' + and srp(length + 1) in _THE_END_SPACE_TAB + ) + or (self.flow_level and ch in ',:?[]{}') + ): + break + length += 1 + # It's not clear what we should do with ':' in the flow context. + if ( + self.flow_level + and ch == ':' + and srp(length + 1) not in '\0 \t\r\n\x85\u2028\u2029,[]{}' + ): + srf(length) + raise ScannerError( + 'while scanning a plain scalar', + start_mark, + "found unexpected ':'", + self.reader.get_mark(), + 'Please check ' + 'http://pyyaml.org/wiki/YAMLColonInFlowContext ' + 'for details.', + ) + if length == 0: + break + self.allow_simple_key = False + chunks.extend(spaces) + chunks.append(self.reader.prefix(length)) + srf(length) + end_mark = self.reader.get_mark() + spaces = self.scan_plain_spaces(indent, start_mark) + if ( + not spaces + or srp() == '#' + or (not self.flow_level and self.reader.column < indent) + ): + break + + token = ScalarToken("".join(chunks), True, start_mark, end_mark) + if spaces and spaces[0] == '\n': + # Create a comment token to preserve the trailing line breaks. + comment = CommentToken("".join(spaces) + '\n', start_mark, end_mark) + token.add_post_comment(comment) + return token + + def scan_plain_spaces(self, indent, start_mark): + # type: (Any, Any) -> Any + # See the specification for details. + # The specification is really confusing about tabs in plain scalars. + # We just forbid them completely. Do not use tabs in YAML! + srp = self.reader.peek + srf = self.reader.forward + chunks = [] + length = 0 + while srp(length) in ' ': + length += 1 + whitespaces = self.reader.prefix(length) + self.reader.forward(length) + ch = srp() + if ch in '\r\n\x85\u2028\u2029': + line_break = self.scan_line_break() + self.allow_simple_key = True + prefix = self.reader.prefix(3) + if (prefix == '---' or prefix == '...') and srp(3) in _THE_END_SPACE_TAB: + return + breaks = [] + while srp() in ' \r\n\x85\u2028\u2029': + if srp() == ' ': + srf() + else: + breaks.append(self.scan_line_break()) + prefix = self.reader.prefix(3) + if (prefix == '---' or prefix == '...') and srp(3) in _THE_END_SPACE_TAB: + return + if line_break != '\n': + chunks.append(line_break) + elif not breaks: + chunks.append(' ') + chunks.extend(breaks) + elif whitespaces: + chunks.append(whitespaces) + return chunks + + def scan_tag_handle(self, name, start_mark): + # type: (Any, Any) -> Any + # See the specification for details. + # For some strange reasons, the specification does not allow '_' in + # tag handles. I have allowed it anyway. + srp = self.reader.peek + ch = srp() + if ch != '!': + raise ScannerError( + 'while scanning a %s' % (name,), + start_mark, + "expected '!', but found %r" % utf8(ch), + self.reader.get_mark(), + ) + length = 1 + ch = srp(length) + if ch != ' ': + while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' or ch in '-_': + length += 1 + ch = srp(length) + if ch != '!': + self.reader.forward(length) + raise ScannerError( + 'while scanning a %s' % (name,), + start_mark, + "expected '!', but found %r" % utf8(ch), + self.reader.get_mark(), + ) + length += 1 + value = self.reader.prefix(length) + self.reader.forward(length) + return value + + def scan_tag_uri(self, name, start_mark): + # type: (Any, Any) -> Any + # See the specification for details. + # Note: we do not check if URI is well-formed. + srp = self.reader.peek + chunks = [] + length = 0 + ch = srp(length) + while ( + '0' <= ch <= '9' + or 'A' <= ch <= 'Z' + or 'a' <= ch <= 'z' + or ch in "-;/?:@&=+$,_.!~*'()[]%" + or ((self.scanner_processing_version > (1, 1)) and ch == '#') + ): + if ch == '%': + chunks.append(self.reader.prefix(length)) + self.reader.forward(length) + length = 0 + chunks.append(self.scan_uri_escapes(name, start_mark)) + else: + length += 1 + ch = srp(length) + if length != 0: + chunks.append(self.reader.prefix(length)) + self.reader.forward(length) + length = 0 + if not chunks: + raise ScannerError( + 'while parsing a %s' % (name,), + start_mark, + 'expected URI, but found %r' % utf8(ch), + self.reader.get_mark(), + ) + return "".join(chunks) + + def scan_uri_escapes(self, name, start_mark): + # type: (Any, Any) -> Any + # See the specification for details. + srp = self.reader.peek + srf = self.reader.forward + code_bytes = [] # type: List[Any] + mark = self.reader.get_mark() + while srp() == '%': + srf() + for k in range(2): + if srp(k) not in '0123456789ABCDEFabcdef': + raise ScannerError( + 'while scanning a %s' % (name,), + start_mark, + 'expected URI escape sequence of 2 hexdecimal numbers,' + ' but found %r' % utf8(srp(k)), + self.reader.get_mark(), + ) + if PY3: + code_bytes.append(int(self.reader.prefix(2), 16)) + else: + code_bytes.append(chr(int(self.reader.prefix(2), 16))) + srf(2) + try: + if PY3: + value = bytes(code_bytes).decode('utf-8') + else: + value = unicode(b"".join(code_bytes), 'utf-8') + except UnicodeDecodeError as exc: + raise ScannerError('while scanning a %s' % (name,), start_mark, str(exc), mark) + return value + + def scan_line_break(self): + # type: () -> Any + # Transforms: + # '\r\n' : '\n' + # '\r' : '\n' + # '\n' : '\n' + # '\x85' : '\n' + # '\u2028' : '\u2028' + # '\u2029 : '\u2029' + # default : '' + ch = self.reader.peek() + if ch in '\r\n\x85': + if self.reader.prefix(2) == '\r\n': + self.reader.forward(2) + else: + self.reader.forward() + return '\n' + elif ch in '\u2028\u2029': + self.reader.forward() + return ch + return "" + + +class RoundTripScanner(Scanner): + def check_token(self, *choices): + # type: (Any) -> bool + # Check if the next token is one of the given types. + while self.need_more_tokens(): + self.fetch_more_tokens() + self._gather_comments() + if bool(self.tokens): + if not choices: + return True + for choice in choices: + if isinstance(self.tokens[0], choice): + return True + return False + + def peek_token(self): + # type: () -> Any + # Return the next token, but do not delete if from the queue. + while self.need_more_tokens(): + self.fetch_more_tokens() + self._gather_comments() + if bool(self.tokens): + return self.tokens[0] + return None + + def _gather_comments(self): + # type: () -> Any + """combine multiple comment lines""" + comments = [] # type: List[Any] + if not self.tokens: + return comments + if isinstance(self.tokens[0], CommentToken): + comment = self.tokens.pop(0) + self.tokens_taken += 1 + comments.append(comment) + while self.need_more_tokens(): + self.fetch_more_tokens() + if not self.tokens: + return comments + if isinstance(self.tokens[0], CommentToken): + self.tokens_taken += 1 + comment = self.tokens.pop(0) + # nprint('dropping2', comment) + comments.append(comment) + if len(comments) >= 1: + self.tokens[0].add_pre_comments(comments) + # pull in post comment on e.g. ':' + if not self.done and len(self.tokens) < 2: + self.fetch_more_tokens() + + def get_token(self): + # type: () -> Any + # Return the next token. + while self.need_more_tokens(): + self.fetch_more_tokens() + self._gather_comments() + if bool(self.tokens): + # nprint('tk', self.tokens) + # only add post comment to single line tokens: + # scalar, value token. FlowXEndToken, otherwise + # hidden streamtokens could get them (leave them and they will be + # pre comments for the next map/seq + if ( + len(self.tokens) > 1 + and isinstance( + self.tokens[0], + (ScalarToken, ValueToken, FlowSequenceEndToken, FlowMappingEndToken), + ) + and isinstance(self.tokens[1], CommentToken) + and self.tokens[0].end_mark.line == self.tokens[1].start_mark.line + ): + self.tokens_taken += 1 + c = self.tokens.pop(1) + self.fetch_more_tokens() + while len(self.tokens) > 1 and isinstance(self.tokens[1], CommentToken): + self.tokens_taken += 1 + c1 = self.tokens.pop(1) + c.value = c.value + (' ' * c1.start_mark.column) + c1.value + self.fetch_more_tokens() + self.tokens[0].add_post_comment(c) + elif ( + len(self.tokens) > 1 + and isinstance(self.tokens[0], ScalarToken) + and isinstance(self.tokens[1], CommentToken) + and self.tokens[0].end_mark.line != self.tokens[1].start_mark.line + ): + self.tokens_taken += 1 + c = self.tokens.pop(1) + c.value = ( + '\n' * (c.start_mark.line - self.tokens[0].end_mark.line) + + (' ' * c.start_mark.column) + + c.value + ) + self.tokens[0].add_post_comment(c) + self.fetch_more_tokens() + while len(self.tokens) > 1 and isinstance(self.tokens[1], CommentToken): + self.tokens_taken += 1 + c1 = self.tokens.pop(1) + c.value = c.value + (' ' * c1.start_mark.column) + c1.value + self.fetch_more_tokens() + self.tokens_taken += 1 + return self.tokens.pop(0) + return None + + def fetch_comment(self, comment): + # type: (Any) -> None + value, start_mark, end_mark = comment + while value and value[-1] == ' ': + # empty line within indented key context + # no need to update end-mark, that is not used + value = value[:-1] + self.tokens.append(CommentToken(value, start_mark, end_mark)) + + # scanner + + def scan_to_next_token(self): + # type: () -> Any + # We ignore spaces, line breaks and comments. + # If we find a line break in the block context, we set the flag + # `allow_simple_key` on. + # The byte order mark is stripped if it's the first character in the + # stream. We do not yet support BOM inside the stream as the + # specification requires. Any such mark will be considered as a part + # of the document. + # + # TODO: We need to make tab handling rules more sane. A good rule is + # Tabs cannot precede tokens + # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END, + # KEY(block), VALUE(block), BLOCK-ENTRY + # So the checking code is + # if : + # self.allow_simple_keys = False + # We also need to add the check for `allow_simple_keys == True` to + # `unwind_indent` before issuing BLOCK-END. + # Scanners for block, flow, and plain scalars need to be modified. + + srp = self.reader.peek + srf = self.reader.forward + if self.reader.index == 0 and srp() == '\uFEFF': + srf() + found = False + while not found: + while srp() == ' ': + srf() + ch = srp() + if ch == '#': + start_mark = self.reader.get_mark() + comment = ch + srf() + while ch not in _THE_END: + ch = srp() + if ch == '\0': # don't gobble the end-of-stream character + # but add an explicit newline as "YAML processors should terminate + # the stream with an explicit line break + # https://yaml.org/spec/1.2/spec.html#id2780069 + comment += '\n' + break + comment += ch + srf() + # gather any blank lines following the comment too + ch = self.scan_line_break() + while len(ch) > 0: + comment += ch + ch = self.scan_line_break() + end_mark = self.reader.get_mark() + if not self.flow_level: + self.allow_simple_key = True + return comment, start_mark, end_mark + if bool(self.scan_line_break()): + start_mark = self.reader.get_mark() + if not self.flow_level: + self.allow_simple_key = True + ch = srp() + if ch == '\n': # empty toplevel lines + start_mark = self.reader.get_mark() + comment = "" + while ch: + ch = self.scan_line_break(empty_line=True) + comment += ch + if srp() == '#': + # empty line followed by indented real comment + comment = comment.rsplit('\n', 1)[0] + '\n' + end_mark = self.reader.get_mark() + return comment, start_mark, end_mark + else: + found = True + return None + + def scan_line_break(self, empty_line=False): + # type: (bool) -> Text + # Transforms: + # '\r\n' : '\n' + # '\r' : '\n' + # '\n' : '\n' + # '\x85' : '\n' + # '\u2028' : '\u2028' + # '\u2029 : '\u2029' + # default : '' + ch = self.reader.peek() # type: Text + if ch in '\r\n\x85': + if self.reader.prefix(2) == '\r\n': + self.reader.forward(2) + else: + self.reader.forward() + return '\n' + elif ch in '\u2028\u2029': + self.reader.forward() + return ch + elif empty_line and ch in '\t ': + self.reader.forward() + return ch + return "" + + def scan_block_scalar(self, style, rt=True): + # type: (Any, Optional[bool]) -> Any + return Scanner.scan_block_scalar(self, style, rt=rt) + + +# try: +# import psyco +# psyco.bind(Scanner) +# except ImportError: +# pass diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/serializer.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/serializer.py new file mode 100644 index 0000000000..522e9e9ab1 --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/serializer.py @@ -0,0 +1,240 @@ +# coding: utf-8 + +from __future__ import absolute_import + +from ...ruamel.yaml.error import YAMLError +from ...ruamel.yaml.compat import nprint, DBG_NODE, dbg, string_types, nprintf # NOQA +from ...ruamel.yaml.util import RegExp + +from ...ruamel.yaml.events import ( + StreamStartEvent, + StreamEndEvent, + MappingStartEvent, + MappingEndEvent, + SequenceStartEvent, + SequenceEndEvent, + AliasEvent, + ScalarEvent, + DocumentStartEvent, + DocumentEndEvent, +) +from ...ruamel.yaml.nodes import MappingNode, ScalarNode, SequenceNode + +if False: # MYPY + from typing import Any, Dict, Union, Text, Optional # NOQA + from ...ruamel.yaml.compat import VersionType # NOQA + +__all__ = ['Serializer', 'SerializerError'] + + +class SerializerError(YAMLError): + pass + + +class Serializer(object): + + # 'id' and 3+ numbers, but not 000 + ANCHOR_TEMPLATE = u'id%03d' + ANCHOR_RE = RegExp(u'id(?!000$)\\d{3,}') + + def __init__( + self, + encoding=None, + explicit_start=None, + explicit_end=None, + version=None, + tags=None, + dumper=None, + ): + # type: (Any, Optional[bool], Optional[bool], Optional[VersionType], Any, Any) -> None # NOQA + self.dumper = dumper + if self.dumper is not None: + self.dumper._serializer = self + self.use_encoding = encoding + self.use_explicit_start = explicit_start + self.use_explicit_end = explicit_end + if isinstance(version, string_types): + self.use_version = tuple(map(int, version.split('.'))) + else: + self.use_version = version # type: ignore + self.use_tags = tags + self.serialized_nodes = {} # type: Dict[Any, Any] + self.anchors = {} # type: Dict[Any, Any] + self.last_anchor_id = 0 + self.closed = None # type: Optional[bool] + self._templated_id = None + + @property + def emitter(self): + # type: () -> Any + if hasattr(self.dumper, 'typ'): + return self.dumper.emitter + return self.dumper._emitter + + @property + def resolver(self): + # type: () -> Any + if hasattr(self.dumper, 'typ'): + self.dumper.resolver + return self.dumper._resolver + + def open(self): + # type: () -> None + if self.closed is None: + self.emitter.emit(StreamStartEvent(encoding=self.use_encoding)) + self.closed = False + elif self.closed: + raise SerializerError('serializer is closed') + else: + raise SerializerError('serializer is already opened') + + def close(self): + # type: () -> None + if self.closed is None: + raise SerializerError('serializer is not opened') + elif not self.closed: + self.emitter.emit(StreamEndEvent()) + self.closed = True + + # def __del__(self): + # self.close() + + def serialize(self, node): + # type: (Any) -> None + if dbg(DBG_NODE): + nprint('Serializing nodes') + node.dump() + if self.closed is None: + raise SerializerError('serializer is not opened') + elif self.closed: + raise SerializerError('serializer is closed') + self.emitter.emit( + DocumentStartEvent( + explicit=self.use_explicit_start, version=self.use_version, tags=self.use_tags + ) + ) + self.anchor_node(node) + self.serialize_node(node, None, None) + self.emitter.emit(DocumentEndEvent(explicit=self.use_explicit_end)) + self.serialized_nodes = {} + self.anchors = {} + self.last_anchor_id = 0 + + def anchor_node(self, node): + # type: (Any) -> None + if node in self.anchors: + if self.anchors[node] is None: + self.anchors[node] = self.generate_anchor(node) + else: + anchor = None + try: + if node.anchor.always_dump: + anchor = node.anchor.value + except: # NOQA + pass + self.anchors[node] = anchor + if isinstance(node, SequenceNode): + for item in node.value: + self.anchor_node(item) + elif isinstance(node, MappingNode): + for key, value in node.value: + self.anchor_node(key) + self.anchor_node(value) + + def generate_anchor(self, node): + # type: (Any) -> Any + try: + anchor = node.anchor.value + except: # NOQA + anchor = None + if anchor is None: + self.last_anchor_id += 1 + return self.ANCHOR_TEMPLATE % self.last_anchor_id + return anchor + + def serialize_node(self, node, parent, index): + # type: (Any, Any, Any) -> None + alias = self.anchors[node] + if node in self.serialized_nodes: + self.emitter.emit(AliasEvent(alias)) + else: + self.serialized_nodes[node] = True + self.resolver.descend_resolver(parent, index) + if isinstance(node, ScalarNode): + # here check if the node.tag equals the one that would result from parsing + # if not equal quoting is necessary for strings + detected_tag = self.resolver.resolve(ScalarNode, node.value, (True, False)) + default_tag = self.resolver.resolve(ScalarNode, node.value, (False, True)) + implicit = ( + (node.tag == detected_tag), + (node.tag == default_tag), + node.tag.startswith('tag:yaml.org,2002:'), + ) + self.emitter.emit( + ScalarEvent( + alias, + node.tag, + implicit, + node.value, + style=node.style, + comment=node.comment, + ) + ) + elif isinstance(node, SequenceNode): + implicit = node.tag == self.resolver.resolve(SequenceNode, node.value, True) + comment = node.comment + end_comment = None + seq_comment = None + if node.flow_style is True: + if comment: # eol comment on flow style sequence + seq_comment = comment[0] + # comment[0] = None + if comment and len(comment) > 2: + end_comment = comment[2] + else: + end_comment = None + self.emitter.emit( + SequenceStartEvent( + alias, + node.tag, + implicit, + flow_style=node.flow_style, + comment=node.comment, + ) + ) + index = 0 + for item in node.value: + self.serialize_node(item, node, index) + index += 1 + self.emitter.emit(SequenceEndEvent(comment=[seq_comment, end_comment])) + elif isinstance(node, MappingNode): + implicit = node.tag == self.resolver.resolve(MappingNode, node.value, True) + comment = node.comment + end_comment = None + map_comment = None + if node.flow_style is True: + if comment: # eol comment on flow style sequence + map_comment = comment[0] + # comment[0] = None + if comment and len(comment) > 2: + end_comment = comment[2] + self.emitter.emit( + MappingStartEvent( + alias, + node.tag, + implicit, + flow_style=node.flow_style, + comment=node.comment, + nr_items=len(node.value), + ) + ) + for key, value in node.value: + self.serialize_node(key, node, None) + self.serialize_node(value, node, key) + self.emitter.emit(MappingEndEvent(comment=[map_comment, end_comment])) + self.resolver.ascend_resolver() + + +def templated_id(s): + # type: (Text) -> Any + return Serializer.ANCHOR_RE.match(s) diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/timestamp.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/timestamp.py new file mode 100644 index 0000000000..e44db44d08 --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/timestamp.py @@ -0,0 +1,54 @@ + +# coding: utf-8 + +from __future__ import print_function, absolute_import, division, unicode_literals + +import datetime +import copy + +# ToDo: at least on PY3 you could probably attach the tzinfo correctly to the object +# a more complete datetime might be used by safe loading as well + +if False: # MYPY + from typing import Any, Dict, Optional, List # NOQA + + +class TimeStamp(datetime.datetime): + def __init__(self, *args, **kw): + # type: (Any, Any) -> None + self._yaml = dict(t=False, tz=None, delta=0) # type: Dict[Any, Any] + + def __new__(cls, *args, **kw): # datetime is immutable + # type: (Any, Any) -> Any + return datetime.datetime.__new__(cls, *args, **kw) # type: ignore + + def __deepcopy__(self, memo): + # type: (Any) -> Any + ts = TimeStamp(self.year, self.month, self.day, self.hour, self.minute, self.second) + ts._yaml = copy.deepcopy(self._yaml) + return ts + + def replace(self, year=None, month=None, day=None, hour=None, + minute=None, second=None, microsecond=None, tzinfo=True, + fold=None): + if year is None: + year = self.year + if month is None: + month = self.month + if day is None: + day = self.day + if hour is None: + hour = self.hour + if minute is None: + minute = self.minute + if second is None: + second = self.second + if microsecond is None: + microsecond = self.microsecond + if tzinfo is True: + tzinfo = self.tzinfo + if fold is None: + fold = self.fold + ts = type(self)(year, month, day, hour, minute, second, microsecond, tzinfo, fold=fold) + ts._yaml = copy.deepcopy(self._yaml) + return ts diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/tokens.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/tokens.py new file mode 100644 index 0000000000..5f5a663534 --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/tokens.py @@ -0,0 +1,286 @@ +# # header +# coding: utf-8 + +from __future__ import unicode_literals + +if False: # MYPY + from typing import Text, Any, Dict, Optional, List # NOQA + from .error import StreamMark # NOQA + +SHOWLINES = True + + +class Token(object): + __slots__ = 'start_mark', 'end_mark', '_comment' + + def __init__(self, start_mark, end_mark): + # type: (StreamMark, StreamMark) -> None + self.start_mark = start_mark + self.end_mark = end_mark + + def __repr__(self): + # type: () -> Any + # attributes = [key for key in self.__slots__ if not key.endswith('_mark') and + # hasattr('self', key)] + attributes = [key for key in self.__slots__ if not key.endswith('_mark')] + attributes.sort() + arguments = ', '.join(['%s=%r' % (key, getattr(self, key)) for key in attributes]) + if SHOWLINES: + try: + arguments += ', line: ' + str(self.start_mark.line) + except: # NOQA + pass + try: + arguments += ', comment: ' + str(self._comment) + except: # NOQA + pass + return '{}({})'.format(self.__class__.__name__, arguments) + + def add_post_comment(self, comment): + # type: (Any) -> None + if not hasattr(self, '_comment'): + self._comment = [None, None] + self._comment[0] = comment + + def add_pre_comments(self, comments): + # type: (Any) -> None + if not hasattr(self, '_comment'): + self._comment = [None, None] + assert self._comment[1] is None + self._comment[1] = comments + + def get_comment(self): + # type: () -> Any + return getattr(self, '_comment', None) + + @property + def comment(self): + # type: () -> Any + return getattr(self, '_comment', None) + + def move_comment(self, target, empty=False): + # type: (Any, bool) -> Any + """move a comment from this token to target (normally next token) + used to combine e.g. comments before a BlockEntryToken to the + ScalarToken that follows it + empty is a special for empty values -> comment after key + """ + c = self.comment + if c is None: + return + # don't push beyond last element + if isinstance(target, (StreamEndToken, DocumentStartToken)): + return + delattr(self, '_comment') + tc = target.comment + if not tc: # target comment, just insert + # special for empty value in key: value issue 25 + if empty: + c = [c[0], c[1], None, None, c[0]] + target._comment = c + # nprint('mco2:', self, target, target.comment, empty) + return self + if c[0] and tc[0] or c[1] and tc[1]: + raise NotImplementedError('overlap in comment %r %r' % (c, tc)) + if c[0]: + tc[0] = c[0] + if c[1]: + tc[1] = c[1] + return self + + def split_comment(self): + # type: () -> Any + """ split the post part of a comment, and return it + as comment to be added. Delete second part if [None, None] + abc: # this goes to sequence + # this goes to first element + - first element + """ + comment = self.comment + if comment is None or comment[0] is None: + return None # nothing to do + ret_val = [comment[0], None] + if comment[1] is None: + delattr(self, '_comment') + return ret_val + + +# class BOMToken(Token): +# id = '' + + +class DirectiveToken(Token): + __slots__ = 'name', 'value' + id = '' + + def __init__(self, name, value, start_mark, end_mark): + # type: (Any, Any, Any, Any) -> None + Token.__init__(self, start_mark, end_mark) + self.name = name + self.value = value + + +class DocumentStartToken(Token): + __slots__ = () + id = '' + + +class DocumentEndToken(Token): + __slots__ = () + id = '' + + +class StreamStartToken(Token): + __slots__ = ('encoding',) + id = '' + + def __init__(self, start_mark=None, end_mark=None, encoding=None): + # type: (Any, Any, Any) -> None + Token.__init__(self, start_mark, end_mark) + self.encoding = encoding + + +class StreamEndToken(Token): + __slots__ = () + id = '' + + +class BlockSequenceStartToken(Token): + __slots__ = () + id = '' + + +class BlockMappingStartToken(Token): + __slots__ = () + id = '' + + +class BlockEndToken(Token): + __slots__ = () + id = '' + + +class FlowSequenceStartToken(Token): + __slots__ = () + id = '[' + + +class FlowMappingStartToken(Token): + __slots__ = () + id = '{' + + +class FlowSequenceEndToken(Token): + __slots__ = () + id = ']' + + +class FlowMappingEndToken(Token): + __slots__ = () + id = '}' + + +class KeyToken(Token): + __slots__ = () + id = '?' + + # def x__repr__(self): + # return 'KeyToken({})'.format( + # self.start_mark.buffer[self.start_mark.index:].split(None, 1)[0]) + + +class ValueToken(Token): + __slots__ = () + id = ':' + + +class BlockEntryToken(Token): + __slots__ = () + id = '-' + + +class FlowEntryToken(Token): + __slots__ = () + id = ',' + + +class AliasToken(Token): + __slots__ = ('value',) + id = '' + + def __init__(self, value, start_mark, end_mark): + # type: (Any, Any, Any) -> None + Token.__init__(self, start_mark, end_mark) + self.value = value + + +class AnchorToken(Token): + __slots__ = ('value',) + id = '' + + def __init__(self, value, start_mark, end_mark): + # type: (Any, Any, Any) -> None + Token.__init__(self, start_mark, end_mark) + self.value = value + + +class TagToken(Token): + __slots__ = ('value',) + id = '' + + def __init__(self, value, start_mark, end_mark): + # type: (Any, Any, Any) -> None + Token.__init__(self, start_mark, end_mark) + self.value = value + + +class ScalarToken(Token): + __slots__ = 'value', 'plain', 'style' + id = '' + + def __init__(self, value, plain, start_mark, end_mark, style=None): + # type: (Any, Any, Any, Any, Any) -> None + Token.__init__(self, start_mark, end_mark) + self.value = value + self.plain = plain + self.style = style + + +class CommentToken(Token): + __slots__ = 'value', 'pre_done' + id = '' + + def __init__(self, value, start_mark, end_mark): + # type: (Any, Any, Any) -> None + Token.__init__(self, start_mark, end_mark) + self.value = value + + def reset(self): + # type: () -> None + if hasattr(self, 'pre_done'): + delattr(self, 'pre_done') + + def __repr__(self): + # type: () -> Any + v = '{!r}'.format(self.value) + if SHOWLINES: + try: + v += ', line: ' + str(self.start_mark.line) + v += ', col: ' + str(self.start_mark.column) + except: # NOQA + pass + return 'CommentToken({})'.format(v) + + def __eq__(self, other): + # type: (Any) -> bool + if self.start_mark != other.start_mark: + return False + if self.end_mark != other.end_mark: + return False + if self.value != other.value: + return False + return True + + def __ne__(self, other): + # type: (Any) -> bool + return not self.__eq__(other) diff --git a/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/util.py b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/util.py new file mode 100644 index 0000000000..1788254924 --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/contrib/ruamel_yaml/ruamel/yaml/util.py @@ -0,0 +1,190 @@ +# coding: utf-8 + +""" +some helper functions that might be generally useful +""" + +from __future__ import absolute_import, print_function + +from functools import partial +import re + +from .compat import text_type, binary_type + +if False: # MYPY + from typing import Any, Dict, Optional, List, Text # NOQA + from .compat import StreamTextType # NOQA + + +class LazyEval(object): + """ + Lightweight wrapper around lazily evaluated func(*args, **kwargs). + + func is only evaluated when any attribute of its return value is accessed. + Every attribute access is passed through to the wrapped value. + (This only excludes special cases like method-wrappers, e.g., __hash__.) + The sole additional attribute is the lazy_self function which holds the + return value (or, prior to evaluation, func and arguments), in its closure. + """ + + def __init__(self, func, *args, **kwargs): + # type: (Any, Any, Any) -> None + def lazy_self(): + # type: () -> Any + return_value = func(*args, **kwargs) + object.__setattr__(self, 'lazy_self', lambda: return_value) + return return_value + + object.__setattr__(self, 'lazy_self', lazy_self) + + def __getattribute__(self, name): + # type: (Any) -> Any + lazy_self = object.__getattribute__(self, 'lazy_self') + if name == 'lazy_self': + return lazy_self + return getattr(lazy_self(), name) + + def __setattr__(self, name, value): + # type: (Any, Any) -> None + setattr(self.lazy_self(), name, value) + + +RegExp = partial(LazyEval, re.compile) + + +# originally as comment +# https://github.com/pre-commit/pre-commit/pull/211#issuecomment-186466605 +# if you use this in your code, I suggest adding a test in your test suite +# that check this routines output against a known piece of your YAML +# before upgrades to this code break your round-tripped YAML +def load_yaml_guess_indent(stream, **kw): + # type: (StreamTextType, Any) -> Any + """guess the indent and block sequence indent of yaml stream/string + + returns round_trip_loaded stream, indent level, block sequence indent + - block sequence indent is the number of spaces before a dash relative to previous indent + - if there are no block sequences, indent is taken from nested mappings, block sequence + indent is unset (None) in that case + """ + from .main import round_trip_load + + # load a YAML document, guess the indentation, if you use TABs you're on your own + def leading_spaces(line): + # type: (Any) -> int + idx = 0 + while idx < len(line) and line[idx] == ' ': + idx += 1 + return idx + + if isinstance(stream, text_type): + yaml_str = stream # type: Any + elif isinstance(stream, binary_type): + # most likely, but the Reader checks BOM for this + yaml_str = stream.decode('utf-8') + else: + yaml_str = stream.read() + map_indent = None + indent = None # default if not found for some reason + block_seq_indent = None + prev_line_key_only = None + key_indent = 0 + for line in yaml_str.splitlines(): + rline = line.rstrip() + lline = rline.lstrip() + if lline.startswith('- '): + l_s = leading_spaces(line) + block_seq_indent = l_s - key_indent + idx = l_s + 1 + while line[idx] == ' ': # this will end as we rstripped + idx += 1 + if line[idx] == '#': # comment after - + continue + indent = idx - key_indent + break + if map_indent is None and prev_line_key_only is not None and rline: + idx = 0 + while line[idx] in ' -': + idx += 1 + if idx > prev_line_key_only: + map_indent = idx - prev_line_key_only + if rline.endswith(':'): + key_indent = leading_spaces(line) + idx = 0 + while line[idx] == ' ': # this will end on ':' + idx += 1 + prev_line_key_only = idx + continue + prev_line_key_only = None + if indent is None and map_indent is not None: + indent = map_indent + return round_trip_load(yaml_str, **kw), indent, block_seq_indent + + +def configobj_walker(cfg): + # type: (Any) -> Any + """ + walks over a ConfigObj (INI file with comments) generating + corresponding YAML output (including comments + """ + from configobj import ConfigObj # type: ignore + + assert isinstance(cfg, ConfigObj) + for c in cfg.initial_comment: + if c.strip(): + yield c + for s in _walk_section(cfg): + if s.strip(): + yield s + for c in cfg.final_comment: + if c.strip(): + yield c + + +def _walk_section(s, level=0): + # type: (Any, int) -> Any + from configobj import Section + + assert isinstance(s, Section) + indent = u' ' * level + for name in s.scalars: + for c in s.comments[name]: + yield indent + c.strip() + x = s[name] + if u'\n' in x: + i = indent + u' ' + x = u'|\n' + i + x.strip().replace(u'\n', u'\n' + i) + elif ':' in x: + x = u"'" + x.replace(u"'", u"''") + u"'" + line = u'{0}{1}: {2}'.format(indent, name, x) + c = s.inline_comments[name] + if c: + line += u' ' + c + yield line + for name in s.sections: + for c in s.comments[name]: + yield indent + c.strip() + line = u'{0}{1}:'.format(indent, name) + c = s.inline_comments[name] + if c: + line += u' ' + c + yield line + for val in _walk_section(s[name], level=level + 1): + yield val + + +# def config_obj_2_rt_yaml(cfg): +# from .comments import CommentedMap, CommentedSeq +# from configobj import ConfigObj +# assert isinstance(cfg, ConfigObj) +# #for c in cfg.initial_comment: +# # if c.strip(): +# # pass +# cm = CommentedMap() +# for name in s.sections: +# cm[name] = d = CommentedMap() +# +# +# #for c in cfg.final_comment: +# # if c.strip(): +# # yield c +# return cm diff --git a/insights/client/apps/ansible/playbook_verifier/public.gpg b/insights/client/apps/ansible/playbook_verifier/public.gpg new file mode 100644 index 0000000000..676a90fa90 --- /dev/null +++ b/insights/client/apps/ansible/playbook_verifier/public.gpg @@ -0,0 +1,29 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v2.0.22 (GNU/Linux) + +mQINBGBCnLUBEACkdDE/r1VvmVV6ZzLT0B1MjUftIonMSsC5dqMpSNnFg4apxGWw +YtQQrpxLcMbLjG3F3823SfGHBXk8VJRsJNi2lHmCDmuj4KkyWxP6t8a4jfDLoXU/ +DfO2OG7zAhss+OEDmEO6qzQPVnlVIWdQryeUTrkZLmnGtKI0kaDQaOUYWrj9+9cj +Z50svXgMlK7PnFrp+af6Vp3ul312dZKyccVdQ+bVZHVNclddd1ONThAM74+rhnZt +TvmgaWDobdbH7jGi//lXEtrAtOOBB5ohmqMIyNCzonFFG82fhw8U2nwE6CbwRLIN +aZcB0BbkJ2a62JEBs72fZ8ridOSmec6nsPrslT+w/oUU6Xap1XSbkpCBmy/vAHZK +TAYw8jDFJcZjLRJtJBQD/J3ep5/tAJsTcCXRWoisdxq5sOLWCHTDHRd3mleEzdDV +/pzFWK7IWMpaO4kr3nxGujCFgO4u/a0RwqcikbOjh80nIBPQA+D5qBBEYR8RNapn +ZcURGaoWqxm687AyvJJ4x7Ng3KYENl1vk4S45YJMbO4B+GQuNUzHIdJN7l6Fz+qB +fGHElMF0fHMaqBMcfMsTM88J6OejgxHyO2f8xsBGdaJDoNOcuXgtHRowwF/DfRue +rNbylIE+WTDn8q+biDcDHBqvcMCv93AMYV/STYGsm5nWD4lVUNLs/jIjYQARAQAB +tDRSZWQgSGF0LCBJbmMuIChQbGF5Ym9vayBLZXkgMSkgPHNlY3VyaXR5QHJlZGhh +dC5jb20+iQI3BBMBAgAhBQJgQpy1AhsDBgsJCAcDAgYVCAIJCgsDFgIBAh4BAheA +AAoJEMvw58D+j5pNgGUP/1dFIjBbiMFVW7yR4I1SZEFAkqrpklzmHz/eCMKPVDvT +TGcTaQSamxYgsbuS+2bny6vIl5lJ4gPYsDJ2ss5kYLx7JfN+rJT/rMVV+t+E60U5 +UsW2zBnLH906kBDhFYr9YgAQ4Svd2WCCV6HHBOJuKxWJY/1QB7DnZOpX59gzb2dc +AyeDKwSeTpgRdDOhNC5T96g14OuaNGlnOwNJ55Hqx2xs/C0O54Zqftu4JDHsjcA5 +Ec0wiheVH8oSB03AY1lDdx9SmOfLg9BiTL99+Zqoggzwc8nLXvqnzL7vrxit9k4E +RZiJkEI6micIzpdnjY8SnsXpayYEI0r+f+4vJcQt4rQnqzgE70mJZtX2c6wDVuiB +lKcmWnu4gnSbA/HdwDVyAIPe59r4ASZ1yX6ylfesr5MGAgsw0n2eLO8MeuuWoqFc +lCTps3I9n0W+9b1mmVrBKjLd2QhwToBIUXnl79HhILRm5IQxNE02owfD2crL8CdF +btzuuxSm7+V35y7bvsWhl2WoVuUT3Cgs8QEveejbZMG5q0m58SXItTr7d9cqpERY +s4iD1gttM/UES8COjp4zM4aOnqqlY+/96LJujN2MVeY6gY12y9ykg5Y9CzQdySTB +7+S7LMgwT6P8Af/xTYCujcUruY93AdcvDFHWM3zjIUVhEOvJCjHoiSwpRJqiqOVQ +=thvC +-----END PGP PUBLIC KEY BLOCK----- diff --git a/insights/client/apps/aws/__init__.py b/insights/client/apps/aws/__init__.py deleted file mode 100644 index 6fc01b4fec..0000000000 --- a/insights/client/apps/aws/__init__.py +++ /dev/null @@ -1,131 +0,0 @@ -import logging -import base64 -import json -from requests import ConnectionError, Timeout -from requests.exceptions import HTTPError, MissingSchema -from ssl import SSLError -from urllib3.exceptions import MaxRetryError -from insights.client.connection import InsightsConnection -from insights.client.schedule import get_scheduler -from insights.client.constants import InsightsConstants as constants -from insights.client.utilities import write_to_disk - -logger = logging.getLogger(__name__) -net_logger = logging.getLogger('network') - -IDENTITY_URI = 'http://169.254.169.254/latest/dynamic/instance-identity' -IDENTITY_DOC_URI = IDENTITY_URI + '/document' -IDENTITY_PKCS7_URI = IDENTITY_URI + '/pkcs7' - - -def aws_main(config): - ''' - Process AWS entitlements with Hydra - ''' - if config.authmethod != 'BASIC': - logger.error('AWS entitlement is only available when BASIC auth is used.\n' - 'Set auto_config=False and authmethod=BASIC in %s.', config.conf) - return False - # workaround for a workaround - # the hydra API doesn't accept the legacy cert - # and legacy_upload=False currently just - # redirects to the classic API with /platform added - # so if doing AWS entitlement, use cert_verify=True - config.cert_verify = True - conn = InsightsConnection(config) - - bundle = get_aws_identity(conn) - if not bundle: - return False - succ = post_to_hydra(conn, bundle) - if not succ: - return False - # register with insights if this option - # wasn't specified - if not config.portal_access_no_insights: - enable_delayed_registration(config) - return True - - -def get_uri(conn, uri): - ''' - Fetch information from URIs - ''' - try: - net_logger.info('GET %s', uri) - res = conn.session.get(uri, timeout=conn.config.http_timeout) - except (ConnectionError, Timeout) as e: - logger.error(e) - logger.error('Could not reach %s', uri) - return None - net_logger.info('Status code: %s', res.status_code) - return res - - -def get_aws_identity(conn): - ''' - Get data from AWS - ''' - logger.info('Fetching AWS identity information.') - doc_res = get_uri(conn, IDENTITY_DOC_URI) - pkcs7_res = get_uri(conn, IDENTITY_PKCS7_URI) - if not (doc_res and pkcs7_res) or not (doc_res.ok and pkcs7_res.ok): - logger.error('Error getting identity information.') - return None - logger.debug('Identity information obtained successfully.') - identity_doc = base64.b64encode(doc_res.content) - - return { - 'document': identity_doc.decode('utf-8'), - 'pkcs7': pkcs7_res.text - } - - -def post_to_hydra(conn, data): - ''' - Post data to Hydra - ''' - logger.info('Submitting identity information to Red Hat.') - hydra_endpoint = conn.config.portal_access_hydra_url - - # POST to hydra - try: - json_data = json.dumps(data) - net_logger.info('POST %s', hydra_endpoint) - net_logger.info('POST body: %s', json_data) - res = conn.session.post(hydra_endpoint, data=json_data, timeout=conn.config.http_timeout) - except MissingSchema as e: - logger.error(e) - return False - except (ConnectionError, Timeout, SSLError, MaxRetryError) as e: - logger.error(e) - logger.error('Could not reach %s', hydra_endpoint) - return False - net_logger.info('Status code: %s', res.status_code) - try: - res.raise_for_status() - except HTTPError as e: - # if failure, - # error, return False - logger.error(e) - try: - res_json = res.json() - err_msg = res_json.get('message', '') - err_details = res_json.get('detailMessage', '') - logger.error('%s\n%s', err_msg, err_details) - except ValueError: - logger.error('Could not parse JSON response.') - return False - logger.info('Entitlement information has been sent.') - return True - - -def enable_delayed_registration(config): - ''' - Write a marker file to allow client to know that - it should attempt to register when it runs - ''' - logger.debug('Writing to %s', constants.register_marker_file) - write_to_disk(constants.register_marker_file) - job = get_scheduler(config) - job.set_daily() diff --git a/insights/client/apps/compliance/__init__.py b/insights/client/apps/compliance/__init__.py index 862f501114..3467f9f33e 100644 --- a/insights/client/apps/compliance/__init__.py +++ b/insights/client/apps/compliance/__init__.py @@ -2,18 +2,45 @@ from insights.client.archive import InsightsArchive from insights.client.connection import InsightsConnection from insights.client.constants import InsightsConstants as constants -from insights.util.canonical_facts import get_canonical_facts +from insights.client.utilities import os_release_info from logging import getLogger -from platform import linux_distribution from re import findall from sys import exit +import tempfile from insights.util.subproc import call import os +import os.path +import pkgutil +import six +import sys +import yaml + +# Since XPath expression is not supported by the ElementTree in Python 2.6, +# import insights.contrib.ElementTree when running python is prior to 2.6 for compatibility. +# Script insights.contrib.ElementTree is the same with xml.etree.ElementTree in Python 2.7.14 +# Otherwise, import defusedxml.ElementTree to avoid XML vulnerabilities, +# if dependency not installed import xml.etree.ElementTree instead. +if sys.version_info[0] == 2 and sys.version_info[1] <= 6: + import insights.contrib.ElementTree as ET +else: + try: + import defusedxml.ElementTree as ET + except: + import xml.etree.ElementTree as ET NONCOMPLIANT_STATUS = 2 +OUT_OF_MEMORY_STATUS = -9 # 247 COMPLIANCE_CONTENT_TYPE = 'application/vnd.redhat.compliance.something+tgz' POLICY_FILE_LOCATION = '/usr/share/xml/scap/ssg/content/' -REQUIRED_PACKAGES = ['scap-security-guide', 'openscap-scanner', 'openscap'] +SCAP_DATASTREAMS_PATH = '/usr/share/xml/scap/' +SSG_PACKAGE = 'scap-security-guide' +REQUIRED_PACKAGES = [SSG_PACKAGE, 'openscap-scanner', 'openscap'] +OOM_ERROR_LINK = 'https://access.redhat.com/articles/6999111' + +# SSG versions that need the in XML repaired +VERSIONS_FOR_REPAIR = '0.1.18 0.1.19 0.1.21 0.1.25'.split() +SNIPPET_TO_FIX = '0.9' + logger = getLogger(__name__) @@ -21,66 +48,228 @@ class ComplianceClient: def __init__(self, config): self.config = config self.conn = InsightsConnection(config) - self.hostname = get_canonical_facts().get('fqdn', '') self.archive = InsightsArchive(config) + self._ssg_version = None def oscap_scan(self): + self.inventory_id = self._get_inventory_id() self._assert_oscap_rpms_exist() - policies = self.get_policies() - if not policies: - logger.error("System is not associated with any profiles. Assign profiles by either uploading a SCAP scan or using the compliance web UI.\n") + initial_profiles = self.get_initial_profiles() + matching_os_profiles = self.get_profiles_matching_os() + profiles = self.profile_union_by_ref_id(matching_os_profiles, initial_profiles) + if not profiles: + logger.error("System is not associated with any profiles. Assign profiles using the Compliance web UI.\n") exit(constants.sig_kill_bad) - profile_ref_ids = [policy['ref_id'] for policy in policies] - for profile_ref_id in profile_ref_ids: + + archive_dir = self.archive.create_archive_dir() + results_need_repair = self.results_need_repair() + + for profile in profiles: + tailoring_file = self.download_tailoring_file(profile) + results_file = self._results_file(archive_dir, profile) self.run_scan( - profile_ref_id, - self.find_scap_policy(profile_ref_id), - '/var/tmp/oscap_results-{0}.xml'.format(profile_ref_id) + profile['attributes']['ref_id'], + self.find_scap_policy(profile['attributes']['ref_id']), + results_file, + tailoring_file_path=tailoring_file ) + if self.config.obfuscate: + tree = ET.parse(results_file) + # Retrieve the list of xpaths that need to be obfuscated + xpaths = yaml.load(pkgutil.get_data('insights', 'compliance_obfuscations.yaml'), Loader=yaml.SafeLoader) + # Obfuscate IP addresses in the XCCDF report + self.obfuscate(tree, xpaths['obfuscate']) + if self.config.obfuscate_hostname: + # Obfuscate the hostname in the XCCDF report + self.obfuscate(tree, xpaths['obfuscate_hostname']) + # Overwrite the results file with the obfuscations + tree.write(results_file) + if results_need_repair: + self.repair_results(results_file) + if tailoring_file: + os.remove(tailoring_file) return self.archive.create_tar_file(), COMPLIANCE_CONTENT_TYPE - # TODO: Not a typo! This endpoint gives OSCAP policies, not profiles - # We need to update compliance-backend to fix this - def get_policies(self): - response = self.conn.session.get("https://{0}/compliance/systems".format(self.config.base_url), params={'search': 'name={0}'.format(self.hostname)}) + def download_tailoring_file(self, profile): + if ('tailored' not in profile['attributes'] or profile['attributes']['tailored'] is False or + ('os_minor_version' in profile['attributes'] and profile['attributes']['os_minor_version'] != self.os_minor_version())): + return None + + # Download tailoring file to pass as argument to run_scan + logger.debug( + "Policy {0} is a tailored policy. Starting tailoring file download...".format(profile['attributes']['ref_id']) + ) + tailoring_file_path = tempfile.mkstemp( + prefix='oscap_tailoring_file-{0}.'.format(profile['attributes']['ref_id']), + suffix='.xml', + dir='/var/tmp' + )[1] + response = self.conn.session.get( + "https://{0}/compliance/profiles/{1}/tailoring_file".format(self.config.base_url, profile['id']) + ) + logger.debug("Response code: {0}".format(response.status_code)) + if response.content is None: + logger.info("Problem downloading tailoring file for {0} to {1}".format(profile['attributes']['ref_id'], tailoring_file_path)) + return None + + with open(tailoring_file_path, mode="w+b") as f: + f.write(response.content) + logger.info("Saved tailoring file for {0} to {1}".format(profile['attributes']['ref_id'], tailoring_file_path)) + + logger.debug("Policy {0} tailoring file download finished".format(profile['attributes']['ref_id'])) + + return tailoring_file_path + + def get_profiles(self, search): + response = self.conn.session.get("https://{0}/compliance/profiles".format(self.config.base_url), + params={'search': search, 'relationships': 'false'}) + logger.debug("Content of the response: {0} - {1}".format(response, + response.json())) if response.status_code == 200: - return (response.json().get('data') or [{}])[0].get('attributes', {}).get('profiles', []) + return (response.json().get('data') or []) else: return [] + def get_initial_profiles(self): + return self.get_profiles('system_ids={0} canonical=false external=false'.format(self.inventory_id)) + + def get_profiles_matching_os(self): + return self.get_profiles('system_ids={0} canonical=false os_minor_version={1}'.format(self.inventory_id, self.os_minor_version())) + + def profile_union_by_ref_id(self, prioritized_profiles, merged_profiles): + profiles = dict((p['attributes']['ref_id'], p) for p in merged_profiles) + profiles.update(dict((p['attributes']['ref_id'], p) for p in prioritized_profiles)) + + return list(profiles.values()) + def os_release(self): - _, version, _ = linux_distribution() - return findall("^[6-8]", version)[0] + _, version = os_release_info() + return version + + def os_major_version(self): + return findall("^[6-9]", self.os_release())[0] + + def os_minor_version(self): + return findall("\d+$", self.os_release())[0] def profile_files(self): - return glob("{0}*rhel{1}*.xml".format(POLICY_FILE_LOCATION, self.os_release())) + return glob("{0}*rhel{1}-ds.xml".format(POLICY_FILE_LOCATION, self.os_major_version())) def find_scap_policy(self, profile_ref_id): - rc, grep = call('grep ' + profile_ref_id + ' ' + ' '.join(self.profile_files()), keep_rc=True) + grepcmd = 'grep -H ' + profile_ref_id + ' ' + ' '.join(self.profile_files()) + if not six.PY3: + grepcmd = grepcmd.encode() + rc, grep = call(grepcmd, keep_rc=True) if rc: logger.error('XML profile file not found matching ref_id {0}\n{1}\n'.format(profile_ref_id, grep)) - exit(constants.sig_kill_bad) - filenames = findall('/usr/share/xml/scap/.+xml', grep) + return None + filenames = findall(SCAP_DATASTREAMS_PATH + '.+xml', grep) if not filenames: logger.error('No XML profile files found matching ref_id {0}\n{1}\n'.format(profile_ref_id, ' '.join(filenames))) exit(constants.sig_kill_bad) return filenames[0] - def run_scan(self, profile_ref_id, policy_xml, output_path): + def build_oscap_command(self, profile_ref_id, policy_xml, output_path, tailoring_file_path): + command = 'oscap xccdf eval --profile ' + profile_ref_id + if tailoring_file_path: + command += ' --tailoring-file ' + tailoring_file_path + command += ' --results ' + output_path + ' ' + policy_xml + return command + + def run_scan(self, profile_ref_id, policy_xml, output_path, tailoring_file_path=None): + if policy_xml is None: + return logger.info('Running scan for {0}... this may take a while'.format(profile_ref_id)) env = os.environ.copy() env.update({'TZ': 'UTC'}) - rc, oscap = call('oscap xccdf eval --profile ' + profile_ref_id + ' --results ' + output_path + ' ' + policy_xml, keep_rc=True, env=env) + oscap_command = self.build_oscap_command(profile_ref_id, policy_xml, output_path, tailoring_file_path) + if not six.PY3: + oscap_command = oscap_command.encode() + rc, oscap = call(oscap_command, keep_rc=True, env=env) + + if rc and rc == OUT_OF_MEMORY_STATUS: + logger.error('Scan failed due to insufficient memory') + logger.error('More information can be found here: {0}'.format(OOM_ERROR_LINK)) + exit(constants.sig_kill_bad) + if rc and rc != NONCOMPLIANT_STATUS: logger.error('Scan failed') + logger.error(rc) logger.error(oscap) exit(constants.sig_kill_bad) - else: - self.archive.copy_file(output_path) + + @property + def ssg_version(self): + if not self._ssg_version: + self._ssg_version = self.get_ssg_version() + return self._ssg_version + + def get_ssg_version(self): + rpmcmd = 'rpm -qa --qf "%{VERSION}" ' + SSG_PACKAGE + if not six.PY3: + rpmcmd = rpmcmd.encode() + + rc, ssg_version = call(rpmcmd, keep_rc=True) + if rc: + logger.warning('Tried determinig SSG version but failed: {0}.\n'.format(ssg_version)) + return + + logger.info('System uses SSG version %s', ssg_version) + return ssg_version + + # Helper function that traverses through the XCCDF report and replaces the content of each + # matching xpath with an empty string + def obfuscate(self, tree, xpaths): + for xpath in xpaths: + for node in tree.findall(xpath): + node.text = '' + + def results_need_repair(self): + return self.ssg_version in VERSIONS_FOR_REPAIR + + def repair_results(self, results_file): + if not os.path.isfile(results_file): + return + if not self.ssg_version: + logger.warning("Couldn't repair SSG version in results file %s", results_file) + return + + results_file_in = '{0}.in'.format(results_file) + os.rename(results_file, results_file_in) + + with open(results_file_in, 'r') as in_file: + with open(results_file, 'w') as out_file: + is_repaired = self._repair_ssg_version_in_results( + in_file, out_file, self.ssg_version + ) + + os.remove(results_file_in) + if is_repaired: + logger.debug('Repaired version in results file %s', results_file) + return is_repaired + + def _repair_ssg_version_in_results(self, in_file, out_file, ssg_version): + replacement = '{0}'.format(ssg_version) + is_repaired = False + for line in in_file: + if is_repaired or SNIPPET_TO_FIX not in line: + out_file.write(line) + else: + out_file.write(line.replace(SNIPPET_TO_FIX, replacement)) + is_repaired = True + logger.debug( + 'Substituted "%s" with "%s" in %s', + SNIPPET_TO_FIX, replacement, out_file.name + ) + + return is_repaired def _assert_oscap_rpms_exist(self): - rc, rpm = call('rpm -qa ' + ' '.join(REQUIRED_PACKAGES), keep_rc=True) + rpmcmd = 'rpm -qa ' + ' '.join(REQUIRED_PACKAGES) + if not six.PY3: + rpmcmd = rpmcmd.encode() + rc, rpm = call(rpmcmd, keep_rc=True) if rc: logger.error('Tried running rpm -qa but failed: {0}.\n'.format(rpm)) exit(constants.sig_kill_bad) @@ -88,3 +277,17 @@ def _assert_oscap_rpms_exist(self): if len(rpm.strip().split('\n')) < len(REQUIRED_PACKAGES): logger.error('Missing required packages for compliance scanning. Please ensure the following packages are installed: {0}\n'.format(', '.join(REQUIRED_PACKAGES))) exit(constants.sig_kill_bad) + + def _get_inventory_id(self): + systems = self.conn._fetch_system_by_machine_id() + if len(systems) == 1 and 'id' in systems[0]: + return systems[0].get('id') + else: + logger.error('Failed to find system in Inventory') + exit(constants.sig_kill_bad) + + def _results_file(self, archive_dir, profile): + return os.path.join( + archive_dir, + 'oscap_results-{0}.xml'.format(profile['attributes']['ref_id']) + ) diff --git a/insights/client/apps/malware_detection/__init__.py b/insights/client/apps/malware_detection/__init__.py new file mode 100644 index 0000000000..73ccf6a024 --- /dev/null +++ b/insights/client/apps/malware_detection/__init__.py @@ -0,0 +1,1593 @@ +import os +import re +import time +import json +import yaml +from sys import exit +import logging +from glob import glob +from datetime import datetime +from tempfile import NamedTemporaryFile, gettempdir +try: + # python 2 + from urllib import quote as urlencode + from urlparse import urlparse, urlunparse +except ImportError: + # python 3 + from urllib.parse import urlparse, urlunparse, quote as urlencode + +from insights.client.connection import InsightsConnection +from insights.client.constants import InsightsConstants as constants +from insights.client.utilities import ( + generate_machine_id, write_data_to_file, get_time +) +from insights.core.exceptions import CalledProcessError +from insights.util.subproc import call + +logger = logging.getLogger(__name__) +MIN_YARA_VERSION = "4.1.0" +MALWARE_APP_URL = 'https://console.redhat.com/insights/malware' +MALWARE_CONFIG_FILE = os.path.join(constants.default_conf_dir, "malware-detection-config.yml") +LAST_FILESYSTEM_SCAN_FILE = os.path.join(constants.default_conf_dir, '.last_malware-detection_filesystem_scan') +LAST_PROCESSES_SCAN_FILE = os.path.join(constants.default_conf_dir, '.last_malware-detection_processes_scan') +DEFAULT_MALWARE_CONFIG = """ +# Configuration file for the Red Hat Insights Malware Detection Client app +# File format is YAML +--- +# Perform a simple test scan of the insights-client config directory and process to verify installation and scanning +# are working correctly. The results from this scan do not show up in the webUI. +# Once verified, disable this option to perform actual malware scans. +test_scan: true + +# Scan the filesystem? +# When it is false, the filesystem isn't scanned and the filesystem_* options that follow are ignored +scan_filesystem: true + +# filesystem_scan_only: a single or list of files/directories to be scanned and no others, for example: +# filesystem_scan_only: +# - /var/www +# - /home +# ... means only scan files in /var/www and /home. May also be written as filesystem_scan_only: [/var/www, /home] +# No value means scan all files and directories +filesystem_scan_only: + +# filesystem_scan_exclude: a single or list of files/directories to be excluded from filesystem scanning +# If an item appears in both filesystem_scan_only and filesystem_scan_exclude, filesystem_scan_exclude takes precedence +# and the item will be excluded +# filesystem_scan_exclude is pre-populated with a list of top level directories that are recommended to be excluded +filesystem_scan_exclude: +- /proc +- /sys +- /cgroup +- /selinux +- /net +- /mnt +- /media +- /dev + +# filesystem_scan_since: scan files created or modified since X days ago or since the 'last' scan. +# Valid values are integers >= 1 or the string 'last'. For example: +# filesystem_scan_since: 1 +# ... means scan files created/modified since 1 day ago +# filesystem_scan_since: last +# ... means scan files created/modified since the last successful scan +# No value means scan all files regardless of created/modified date +filesystem_scan_since: + +# Exclude mounted network/external filesystems mountpoints? +# Scanning files within mounted network filesystems may be slow and cause extra network traffic. +# They are excluded by default, meaning that files in network/externally mounted filesystems are not scanned. +# Their mountpoints will be added to the scan_exclude list of directories to be excluded from scanning +exclude_network_filesystem_mountpoints: true + +# List of network/external filesystem types to search for mountpoints on the system. +# If any mountpoints are found for these filesystem types, the value of the exclude_network_filesystem_mountpoints +# option will determine if files within the mountpoints are scanned or not. +network_filesystem_types: [nfs, nfs4, cifs, smbfs, fuse.sshfs, ceph, glusterfs, gfs, gfs2] + +# Scan the running processes? +# Scan_process is disabled by default to prevent an impact on system performance when scanning numerous or large processes. +# When it is false, no processes are scanned and the processes_scan_* options that follow are ignored +scan_processes: false + +# processes_scan_only: processes to be scanned and no others, for example: +# processes_scan_only: +# - 123 +# - 1..100 +# - 10000.. +# - docker +# - chrome +#... means only scan PID 123, PIDs from 1 to 100 inclusive, PIDs >= 10000 and process names containing the strings docker or chrome +# No value means scan all processes +processes_scan_only: + +# processes_scan_exclude: processes to be excluded from scanning. Uses the same syntax as processes_scan_only. +# If an item appears in both processes_scan_only and processes_scan_exclude, processes_scan_exclude takes precedence +# and the item will be excluded +# No value means don't exclude any processes +processes_scan_exclude: + +# processes_scan_since: scan processes created since X days ago or since the 'last' scan. +# Valid values are integers >= 1 or the string 'last'. For example: +# processes_scan_since: 1 +# ... means scan processes created since 1 day ago +# processes_scan_since: last +# ... means scan processes created since the last successful scan +# No value means scan all processes regardless of created date +processes_scan_since: + +# Add extra metadata about each scan match (if possible), eg file type & md5sum, matching line numbers, process name +# The extra metadata will display in the webUI along with the scan matches +add_metadata: true + +# Abort a particular scan if it takes longer than scan_timeout seconds. Default is 3600 seconds (1 hour) +scan_timeout: # 3600 + +# Run the yara process with this nice priority value. Default is 19 (lowest priority) +nice_value: # 19 + +# The max number of CPUs threads used by yara when scanning. Autodetected, but default is 2 +cpu_thread_limit: # 2 +""".lstrip() + +# All the config options have corresponding environment variables +# Env vars are initially strings and need to be parsed to their appropriate type to match the yaml types +ENV_VAR_TYPES = { + 'boolean': ['SCAN_FILESYSTEM', 'SCAN_PROCESSES', 'TEST_SCAN', 'ADD_METADATA', + 'EXCLUDE_NETWORK_FILESYSTEM_MOUNTPOINTS'], + 'list': ['FILESYSTEM_SCAN_ONLY', 'FILESYSTEM_SCAN_EXCLUDE', 'PROCESSES_SCAN_ONLY', 'PROCESSES_SCAN_EXCLUDE', + 'NETWORK_FILESYSTEM_TYPES'], + 'integer': ['SCAN_TIMEOUT', 'NICE_VALUE', 'CPU_THREAD_LIMIT', 'STRING_MATCH_LIMIT'], + 'int_or_str': ['FILESYSTEM_SCAN_SINCE', 'PROCESSES_SCAN_SINCE'] +} + + +class MalwareDetectionClient: + def __init__(self, insights_config): + # insights_config is used to access insights-client auth and network config when downloading rules + self.insights_config = insights_config + + # Load the malware-detection config file + self.config = self._load_config() + + # Early check if the yara binary exists. No point continuing if it doesn't + self.yara_binary = self._find_yara() + + # Get/set the values of assorted integer config values - mainly options used with the yara command + for option, value in [('nice_value', 19), + ('scan_timeout', 3600), + ('cpu_thread_limit', 2), + ('string_match_limit', 10)]: + try: + setattr(self, option, int(self._get_config_option(option, value))) + except Exception as e: + logger.error("Problem setting configuration option %s: %s", option, str(e)) + exit(constants.sig_kill_bad) + + # If doing a test scan, then ignore the other scan_* options because test scan sets its own values for them + if not self._parse_test_scan_option(): + self._parse_scan_options() + + # Obtain the rules to be used by yara + self.rules_file = self._get_rules() + + # Build the yara command, with its various command line options, that will be run + self.yara_cmd = self._build_yara_command() + + # host_scan is a dictionary into which all the scan matches are stored. Its structure is like: + # host_scan = {rule_name: [{source: ..., stringData: ..., stringIdentifier: ..., stringOffset: ...}, + # {source: ...}], + # rule_name: [{...}, {...}, {...}], + # ... } + # host_scan_mutation is the host_scan dict converted to a GraphQL mutation query string + self.host_scan = {} + self.host_scan_mutation = '' + + # Check if we are adding extra metadata to each scan match + self.add_metadata = self._get_config_option('add_metadata', False) + + self.matches = 0 + self.potential_matches = 0 + + def run(self): + # Start the scans and record the time they were started + filesystem_scan_start = get_time() + self.scan_filesystem() + processes_scan_start = get_time() + self.scan_processes() + + if self.do_filesystem_scan or self.do_process_scan: + # If any scans were performed then get the results as a GraphQL mutation query + # This mutation query is what is uploaded to the malware backend + host_scan_mutation = self._create_host_scan_mutation() + + # Write a message to user informing them if there were matches or not and what to do next + if self.matches == 0: + if self.potential_matches == 0: + logger.info("No rule matches found.\n") + else: + logger.info("Rule matches potentially found but problems encountered parsing them, so no match data to upload.") + logger.info("Please contact support.\n") + else: + logger.info("Found %d rule match%s.", self.matches, 'es' if self.matches > 1 else '') + if not self.test_scan: + logger.info("Please visit %s for more information\n", MALWARE_APP_URL) + + # Write the scan start times to disk if scans were performed + # (used by the 'filesystem_scan_since: last' and 'processes_scan_since: last' options) + # Only write the scan time after scans have completed without error or interruption, and its not a test scan + if not self.test_scan: + if self.do_filesystem_scan: + write_data_to_file(filesystem_scan_start, LAST_FILESYSTEM_SCAN_FILE) + os.chmod(LAST_FILESYSTEM_SCAN_FILE, 0o644) + if self.do_process_scan: + write_data_to_file(processes_scan_start, LAST_PROCESSES_SCAN_FILE) + os.chmod(LAST_PROCESSES_SCAN_FILE, 0o644) + else: + logger.info("\nRed Hat Insights malware-detection app test scan complete.\n" + "Test scan results are not recorded in the Insights UI (%s)\n" + "To perform proper scans, please set test_scan: false in %s\n", + MALWARE_APP_URL, MALWARE_CONFIG_FILE) + + # This is what is uploaded to the malware backend + return host_scan_mutation + else: + logger.error("No scans performed, no results to upload.") + exit(constants.sig_kill_bad) + + @staticmethod + def _load_config(): + # Load the malware-detection config file. Write out a default one first if it doesn't already exist + if not os.path.isfile(MALWARE_CONFIG_FILE): + logger.info("Writing the malware-detection app default configuration to %s", MALWARE_CONFIG_FILE) + write_data_to_file(DEFAULT_MALWARE_CONFIG, MALWARE_CONFIG_FILE) + os.chmod(MALWARE_CONFIG_FILE, 0o644) + + try: + with open(MALWARE_CONFIG_FILE) as m: + return yaml.safe_load(m) + except Exception as e: + logger.error("Error encountered loading the malware-detection app config file %s:\n%s", + MALWARE_CONFIG_FILE, str(e)) + exit(constants.sig_kill_bad) + + def _find_yara(self): + """ + Find the yara binary in particular locations on the local system. Don't use 'which yara' + and rely on the system path in case it finds a malicious yara. + Also, don't let the user specify where yara is, again in case it is a malicious version of yara + If found, check it's version >= MIN_YARA_VERSION + """ + def yara_version_ok(yara): + # Check the installed yara version >= MIN_YARA_VERSION + self.yara_version = call([[yara, '--version']]).strip() + try: + # Check the installed yara X.Y version > the minimal recommended yara version + if float('.'.join(self.yara_version.split('.')[:2])) < float(MIN_YARA_VERSION[:3]): + raise RuntimeError("Found %s with version %s, but malware-detection requires version >= %s\n" + "Please install a later version of yara." + % (yara, self.yara_version, MIN_YARA_VERSION)) + except RuntimeError as e: + logger.error(str(e)) + exit(constants.sig_kill_bad) + except Exception as e: + logger.error("Error getting the version of the specified yara binary %s: %s", yara, str(e)) + exit(constants.sig_kill_bad) + # If we are here then the version of yara was ok + return True + + # Try to find yara in only these usual locations. + # /bin/yara and /usr/bin/yara will exist if yara is installed via rpm + # /usr/local/bin/yara will (likely) exist if the user has compiled and installed yara manually + for yara in ['/bin/yara', '/usr/bin/yara']: + if os.path.exists(yara) and yara_version_ok(yara): + logger.debug("Using yara binary: %s", yara) + return yara + + logger.error("Couldn't find yara. Please ensure the yara package is installed") + exit(constants.sig_kill_bad) + + def _parse_scan_options(self): + """ + Initialize the various scan flags and lists and run methods that may change/populate them + """ + self.do_filesystem_scan = self._get_config_option('scan_filesystem', True) + self.do_process_scan = self._get_config_option('scan_processes', False) + self.scan_fsobjects = [] + self.scan_pids = [] + + if not (self.do_filesystem_scan or self.do_process_scan): + logger.error("Both scan_filesystem and scan_processes are disabled. Nothing to scan.") + exit(constants.sig_kill_bad) + + # Check if old options are still in use and inform the user of their replacements + for replaced_scan_option in ('scan_only', 'scan_exclude', 'scan_since'): + if self._get_config_option(replaced_scan_option): + logger.error("The '{0}' option has been replaced with the 'filesystem_{0}' and 'processes_{0}' options in {1}" + .format(replaced_scan_option, MALWARE_CONFIG_FILE)) + logger.error("Please remove the %s file and a new config file will be written with the new options", MALWARE_CONFIG_FILE) + exit(constants.sig_kill_bad) + + # Try parsing the filesystem and processes scan_only options and exit under certain conditions + parse_filesystem_scan_only = self._parse_filesystem_scan_only_option() + parse_processes_scan_only = self._parse_processes_scan_only_option() + if not (parse_filesystem_scan_only or parse_processes_scan_only): + logger.error("Nothing to scan with the filesystem_scan_only and processes_scan_only options") + exit(constants.sig_kill_bad) + if not (parse_filesystem_scan_only or self.do_process_scan): + logger.error("Nothing to scan with filesystem_scan_only option and scan_processes is disabled") + exit(constants.sig_kill_bad) + if not (self.do_filesystem_scan or parse_processes_scan_only): + logger.error("Nothing to scan with processes_scan_only option and scan_filesystem is disabled") + exit(constants.sig_kill_bad) + + # If we've made it here we are still doing scans, but disable scans if there were problems with scan_only + if not parse_filesystem_scan_only: + self.do_filesystem_scan = False + if not parse_processes_scan_only: + self.do_process_scan = False + + self._parse_filesystem_scan_exclude_option() + self._parse_processes_scan_exclude_option() + self._parse_filesystem_scan_since_option() + self._parse_processes_scan_since_option() + self._parse_exclude_network_filesystem_mountpoints_option() + + def _parse_test_scan_option(self): + self.test_scan = self._get_config_option('test_scan', False) + if not self.test_scan: + return False + + self.filesystem_scan_exclude_list = [] + self.processes_scan_exclude_list = [] + self.filesystem_scan_since_dict = {'timestamp': None} + self.processes_scan_since_dict = {'timestamp': None} + self.network_filesystem_mountpoints = [] + + # For matching the test rule, scan the insights config file and the currently running process + # Make sure the config file exists first though! + if os.path.isfile(MALWARE_CONFIG_FILE): + self.do_filesystem_scan = True + self.scan_fsobjects = [MALWARE_CONFIG_FILE] + else: + self.do_filesystem_scan = False + self.scan_fsobjects = [] + + self.do_process_scan = True + self.scan_pids = [str(os.getpid())] + logger.info("\nPerforming a test scan of %sthe current process (PID %s) " + "to verify the malware-detection app is installed and scanning correctly ...\n", + "%s and " % self.scan_fsobjects[0] if self.do_filesystem_scan else "", self.scan_pids[0]) + return True + + def _parse_filesystem_scan_only_option(self): + """ + Parse the filesystem_scan_only option, if specified, to get a list of files/dirs to scan + If parsing was successful, then self.scan_fsobjects is populated and true is returned + If parsing was not successful, self.scan_fsobjects remains empty and false is returned + """ + filesystem_scan_only = self._get_config_option('filesystem_scan_only') + if filesystem_scan_only: + if not self.do_filesystem_scan: + logger.error("Skipping filesystem_scan_only option because scan_filesystem is false") + return False + # Process the filesystem_scan_only option as a list of files/dirs + if not isinstance(filesystem_scan_only, list): + filesystem_scan_only = [filesystem_scan_only] + for item in filesystem_scan_only: + # Remove extras slashes (/) in the file name and leading double slashes too (normpath doesn't) + item = os.path.normpath(item).replace('//', '/') + # Assume the item represents a filesystem item + if not os.path.exists(item): + logger.info("Skipping missing filesystem_scan_only item: '%s'", item) + elif os.path.islink(item): + logger.info("Skipping symlink filesystem_scan_only item: '%s'. Please use non-symlink items", item) + else: + self.scan_fsobjects.append(item) + + if self.scan_fsobjects: + logger.info("Scan only the specified filesystem item%s: %s", "s" if len(self.scan_fsobjects) > 1 else "", + self.scan_fsobjects) + return True + else: + logger.error("Unable to find the items specified for the filesystem_scan_only option. Skipping ...") + return False + return True + + def _parse_filesystem_scan_exclude_option(self): + """ + Simple parse of the filesystem_scan_exclude option (if specified) to get a list of valid items to exclude + """ + if not self.do_filesystem_scan: + return + + self.filesystem_scan_exclude_list = [] + filesystem_scan_exclude = self._get_config_option('filesystem_scan_exclude') + if filesystem_scan_exclude: + if not isinstance(filesystem_scan_exclude, list): + # Convert filesystem_scan_exclude to a list if only a single non-list item was specified + filesystem_scan_exclude = [filesystem_scan_exclude] + for item in filesystem_scan_exclude: + item = os.path.normpath(item).replace('//', '/') + if not os.path.exists(item): + logger.info("Skipping missing filesystem_scan_exclude item: '%s'", item) + elif os.path.islink(item): + logger.info("Skipping symlink filesystem_scan_exclude item: '%s'. Please use non-symlink items", item) + else: + self.filesystem_scan_exclude_list.append(item) + if self.filesystem_scan_exclude_list: + logger.info("Excluding specified filesystem item%s: %s", "s" if len(self.filesystem_scan_exclude_list) > 1 else "", + self.filesystem_scan_exclude_list) + else: + logger.info("Unable to find the items specified for the filesystem_scan_exclude option. Not excluding any filesystem items") + + @staticmethod + def _parse_processes_scan_option(option_items): + """ + 'option_items' is the list of items provided for either the processes_scan_only or processes_scan_exclude options + It is parsed as a list of items that may contain: + - a single PID, eg 1 + - a range of PIDs, eg 10..100 or 10000.. or ..500 + - a process_name, eg chrome + + A list of PIDs is returned representing all the PIDs that were matched from parsing the items + """ + pids = [] + ps_output = call([['ps', '-eo', 'pid=', '-o', 'comm=']]).splitlines() + proc_names = list(map(lambda x: (int(x[0]), str(x[1])), map(lambda x: tuple(x.split()), ps_output))) + proc_pids = list(map(lambda x: x[0], proc_names)) + if not isinstance(option_items, list): + option_items = [str(option_items)] + for item in option_items: + if isinstance(item, float): + # Handle floats so they don't cause exceptions + item = str(item) + if isinstance(item, int) or item.isdigit(): + # If it's digit, assume it represents a process ID + if int(item) in proc_pids: + logger.debug("Found PID %s", item) + pids.append(str(item)) + else: + logger.info("Skipping missing PID: %s", item) + elif '..' in item: + # Assume the item represents a range of process IDs + try: + start, end = item.split('..', 1) + start = 1 if not start else int(start.strip('.')) + end = int(open('/proc/sys/kernel/pid_max').read()) if not end else int(end.strip('.')) + pid_matches = [str(proc) for proc in proc_pids if start <= proc <= end] + except Exception as err: + logger.error("Unable to parse '%s' in to a range of PIDs: %s", item, str(err)) + continue + logger.debug("Found PID(s) in range '%s': %s", item, pid_matches) + if pid_matches: + pids.extend(pid_matches) + else: + logger.info("No PIDs found in process range '%s'", item) + else: + # Assume the item is a string representing the name of one or multiple processes + pid_matches = [str(proc[0]) for proc in proc_names if item in proc[1]] + if pid_matches: + pids.extend(pid_matches) + logger.debug("Found PID(s) for string '%s': %s", item, pid_matches) + else: + logger.info("No PID matches found for process name '%s'", item) + + return pids + + def _parse_processes_scan_only_option(self): + """ + Parse the processes_scan_only option, if specified, to get a list of processes to scan + """ + processes_scan_only = self._get_config_option('processes_scan_only') + if processes_scan_only: + if not self.do_process_scan: + logger.error("Skipping processes_scan_only option because scan_processes is false") + return False + + pids = self._parse_processes_scan_option(processes_scan_only) + if pids: + self.scan_pids = sorted(set(pids), key=lambda pid: int(pid)) + logger.info("Scan only the specified process ID%s: %s", "s" if len(self.scan_pids) > 1 else "", + self.scan_pids) + return True + else: + logger.error("Unable to find the items specified for the processes_scan_only option. Skipping ...") + return False + return True + + def _parse_processes_scan_exclude_option(self): + """ + Simple parse of the processes_scan_exclude option (if specified) to get a list of processes to exclude + """ + if not self.do_process_scan: + return + + self.processes_scan_exclude_list = [] + processes_scan_exclude = self._get_config_option('processes_scan_exclude') + if processes_scan_exclude: + if not self.do_process_scan: + logger.error("Skipping processes_scan_exclude option because scan_processes is false") + return + + pids = self._parse_processes_scan_option(processes_scan_exclude) + if pids: + self.processes_scan_exclude_list = sorted(set(pids), key=lambda pid: int(pid)) + logger.info("Excluding specified process ID%s: %s", "s" if len(self.processes_scan_exclude_list) > 1 else "", + self.processes_scan_exclude_list) + else: + logger.error("Unable to find the items specified for the processes_scan_exclude option. Not excluding any processes.") + + def _parse_filesystem_scan_since_option(self): + """ + filesystem_scan_since is specified as an integer representing the number of days ago to scan for modified files + If the option was specified and valid, then get the corresponding unix timestamp for the specified + number of days ago from now, which is used for comparing file modification times + """ + if not self.do_filesystem_scan: + return + + self.filesystem_scan_since_dict = {'timestamp': None, 'datetime': None} + filesystem_scan_since = self._get_config_option('filesystem_scan_since') + if filesystem_scan_since is not None: + timestamp = get_scan_since_timestamp('filesystem_scan_since', filesystem_scan_since) + if timestamp: + self.filesystem_scan_since_dict['timestamp'] = timestamp + self.filesystem_scan_since_dict['datetime'] = datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S') + message = "Scan for files created/modified since %s%s" + if isinstance(filesystem_scan_since, str): + submessage = 'last successful scan on ' + else: + submessage = '%s day%s ago on ' % (filesystem_scan_since, "s" if filesystem_scan_since > 1 else "") + logger.info(message, submessage, self.filesystem_scan_since_dict['datetime']) + + def _parse_processes_scan_since_option(self): + """ + processes_scan_since is specified as an integer representing the number of days ago to scan for new processes + If the option was specified and valid, then get the corresponding unix timestamp for the specified + number of days ago from now, which is used for comparing process start times + """ + if not self.do_process_scan: + return + + self.processes_scan_since_dict = {'timestamp': None, 'datetime': None} + processes_scan_since = self._get_config_option('processes_scan_since') + if processes_scan_since is not None: + timestamp = get_scan_since_timestamp('processes_scan_since', processes_scan_since) + if timestamp: + self.processes_scan_since_dict['timestamp'] = timestamp + self.processes_scan_since_dict['datetime'] = datetime.fromtimestamp(timestamp).strftime('%Y-%m-%d %H:%M:%S') + message = "Scan for processes started since %s%s" + if isinstance(processes_scan_since, str): + submessage = 'last successful scan on ' + else: + submessage = '%s day%s ago on ' % (processes_scan_since, "s" if processes_scan_since > 1 else "") + logger.info(message, submessage, self.processes_scan_since_dict['datetime']) + + def _parse_exclude_network_filesystem_mountpoints_option(self): + """ + If exclude_network_filesystem_mountpoints is true, get a list of mountpoints of mounted network filesystems. + The network_filesystem_types option has the list of network filesystems types to look for mountpoints for, + eg NFS, CIFS, SMBFS, SSHFS, Ceph, GlusterFS, GFS. + The list of network filesystem mountpoints will be added to the list of directories to exclude from scanning + """ + if not self.do_filesystem_scan: + return + + self.network_filesystem_mountpoints = [] + if not self._get_config_option('exclude_network_filesystem_mountpoints'): + # We aren't excluding network filesystems, leave it as a blank list (ie nothing to exclude) + return + + network_filesystem_types = self._get_config_option('network_filesystem_types') + if not network_filesystem_types: + logger.error("No value specified for 'network_filesystem_types' option") + exit(constants.sig_kill_bad) + + if isinstance(network_filesystem_types, list): + network_filesystem_types = ','.join(network_filesystem_types) + cmd = ['findmnt', '-t', network_filesystem_types, '-n', '-o', 'TARGET'] + logger.debug("Command to find mounted network filesystems: %s", ' '.join(cmd)) + try: + output = call([cmd]) + except CalledProcessError as err: + logger.error("Unable to get network filesystem mountpoints: %s", err.output.strip()) + exit(constants.sig_kill_bad) + + self.network_filesystem_mountpoints = str(output).strip().split('\n') if output else [] + if self.network_filesystem_mountpoints: + logger.info("Excluding network filesystem mountpoints: %s", self.network_filesystem_mountpoints) + else: + logger.debug("No mounted network filesystems found") + + def _get_rules(self): + """ + Obtain the rules used by yara for scanning from the rules_location option. + They can either be downloaded from the malware backend or obtained from a local file. + """ + # The rules file that is downloaded from the backend should be automatically removed when the + # malware-detection client exits. + # However it can happen that the rules file isn't removed for some reason, so remove any existing + # rules files before beginning a new scan, otherwise they may show up as matches in the scan results. + old_rules_files = sum([glob(os.path.join(path, rules)) + for path in ('/tmp', '/var/tmp', '/usr/tmp', gettempdir()) + for rules in ('.tmpmdsigs*', 'tmp_malware-detection-client_rules.*')], []) + for old_rules_file in old_rules_files: + if os.path.exists(old_rules_file): + logger.debug("Removing old rules file %s", old_rules_file) + os.remove(old_rules_file) + + self.rules_location = self._get_config_option('rules_location', '') + + # If rules_location starts with a /, assume its a file rather than a URL + if self.rules_location.startswith('/'): + # Remove any extra slashes from the file name and from the start too (normpath doesn't remove those) + rules_file = os.path.normpath(self.rules_location).replace('//', '/') + if not os.path.isfile(rules_file): + logger.error("Couldn't find specified rules file: %s", rules_file) + exit(constants.sig_kill_bad) + logger.debug("Using specified rules file: %s", rules_file) + return rules_file + + # If we are here, then we are downloading the rules from the malware backend + # Check if insights-config is defined first because we need to access its auth and network config + if not self.insights_config: + logger.error("Couldn't access the insights-client configuration") + exit(constants.sig_kill_bad) + + signatures_file = 'signatures.yar' + signatures_file += '?yara_version=' + self.yara_version if hasattr(self, 'yara_version') else '' + if not self.rules_location: + self.rules_location = 'https://console.redhat.com/api/malware-detection/v1/' + signatures_file + if '/redhat_access/' in self.insights_config.base_url: + # Satellite URLs have '/redhat_access/' in the base_url config option + self.rules_location = self.insights_config.base_url + '/malware-detection/v1/' + signatures_file + elif any([url in self.insights_config.base_url for url in ['console.stage.', 'cloud.stage.']]): + # For downloading rules in the stage environment, use the URL of base_url (after finding it) + base_url = urlparse(self.insights_config.base_url) + self.rules_location = base_url.netloc or base_url.scheme or base_url.path.split('/')[0] or 'cert.console.stage.redhat.com' + self.rules_location += '/api/malware-detection/v1/' + signatures_file + + # Make sure the rules_location starts with https:// + if not re.match('^https?://', self.rules_location): + self.rules_location = 'https://' + self.rules_location + + # If talking direct to C.R.C with cert auth or basic auth without a username/password, append 'cert.' to the url + if self.rules_location.startswith('https://console.redhat.com'): + authmethod = self.insights_config.authmethod if hasattr(self.insights_config, 'authmethod') else 'CERT' + username = self.insights_config.username if hasattr(self.insights_config, 'username') else '' + password = self.insights_config.password if hasattr(self.insights_config, 'password') else '' + if authmethod == 'CERT' or (authmethod == 'BASIC' and not (username or password)): + self.insights_config.authmethod = 'CERT' + parsed_url = urlparse(self.rules_location) + if not parsed_url.netloc.startswith('cert.'): + self.rules_location = urlunparse(parsed_url._replace(netloc='cert.' + parsed_url.netloc)) + + # If doing a test scan, replace signatures.yar (or any other file suffix) with test-rule.yar + log_rule_contents = False + if self.test_scan: + self.rules_location = self._get_test_rule_location(self.rules_location) + log_rule_contents = True + + # Shouldn't need this, but left here for insurance: https://access.redhat.com/solutions/6997170 + # If a custom CA cert is being used, eg on a Satellite, then SSL errors may occur when downloading the rules + # The CA cert needs to be added to a CA bundle (with update-ca-trust) and the bundle used for cert verification + ca_cert = None + ca_bundles = ['/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem', '/etc/pki/tls/certs/ca-bundle.crt'] + for ca_bundle in ca_bundles: + if os.path.isfile(ca_bundle): + ca_cert = ca_bundle + break + + logger.debug("Downloading rules from: %s", self.rules_location) + if not self.insights_config.cert_verify: + self.insights_config.cert_verify = True + logger.debug("Using cert_verify value %s ...", self.insights_config.cert_verify) + conn = InsightsConnection(self.insights_config) + + for attempt in range(1, self.insights_config.retries + 1): + try: + response = conn.get( + self.rules_location, + log_response_text=log_rule_contents, + verify=self.insights_config.cert_verify + ) + if response.status_code != 200: + raise Exception("%s %s: %s" % (response.status_code, response.reason, response.text)) + break + except Exception as e: + if attempt < self.insights_config.retries: + logger.debug("Unable to download rules from %s: %s", self.rules_location, str(e)) + logger.debug("Trying again in %d seconds ...", attempt) + time.sleep(attempt) + else: + logger.error("Unable to download rules from %s: %s", self.rules_location, str(e)) + exit(constants.sig_kill_bad) + + if re.search('SSL.*verify.failed', str(e), re.IGNORECASE): + # Kept as a fallback in case SSL errors still occur - add the custom CA cert to the trusted certs + self.insights_config.cert_verify = self._get_config_option('ca_cert', ca_cert) + logger.debug("Trying cert_verify value %s ...", self.insights_config.cert_verify) + + self.temp_rules_file = NamedTemporaryFile(prefix='.tmpmdsigs', mode='wb', delete=True) + self.temp_rules_file.write(response.content) + self.temp_rules_file.flush() + return self.temp_rules_file.name + + def _build_yara_command(self): + """ + Get all the switches for the yara command to be run, for example: + - whether the rules file is compiled or not (-C) + - the number of CPU threads to use (-p) + - the nice command and its value to use (nice -n 'value') + - scan timeouts (-a) + """ + # Detect if the rules file is a text or binary (compiled) file (or otherwise) + output = call([['file', '-b', self.rules_file]]) + rule_type = output.strip().lower() + if os.path.getsize(self.rules_file) == 0 or rule_type == 'empty': + logger.error("Rules file %s is empty", self.rules_file) + exit(constants.sig_kill_bad) + + compiled_rules_flag = '-C' if rule_type.startswith('yara') or rule_type == 'data' else '' + logger.debug("Rules file type: '%s', Compiled rules: %s", rule_type, compiled_rules_flag == '-C') + + # Quickly test the rules file to make sure it contains usable rules! + # Note, if the compiled_rules_flag is '' it must be removed from the list or it causes problems + cmd = list(filter(None, [self.yara_binary, '--fail-on-warnings', '-p', '1', '-f', compiled_rules_flag, + self.rules_file, '/dev/null'])) + try: + call([cmd]) + except CalledProcessError as err: + err_str = str(err.output.strip().decode()) + logger.error("Unable to use rules file %s: %s", self.rules_file, err_str) + exit(constants.sig_kill_bad) + + # Limit the number of threads used by yara to limit the CPU load of the scans + # If system has 2 or fewer CPUs, then use just one thread + nproc = call('nproc').strip() + if not nproc or int(nproc) <= 2: + self.cpu_thread_limit = 1 + logger.debug("Using %s CPU thread(s) for scanning", self.cpu_thread_limit) + + # Construct the (partial) yara command that will be used later for scanning files and processes + # The argument for the files and processes that will be scanned will be added later + yara_cmd = list(filter(None, ['nice', '-n', str(self.nice_value), self.yara_binary, '-s', '-N', + '-a', str(self.scan_timeout), '-p', str(self.cpu_thread_limit), '-r', '-f', + compiled_rules_flag, self.rules_file])) + logger.debug("Yara command: %s", yara_cmd) + return yara_cmd + + def scan_filesystem(self): + """ + Process the filesystem items to scan + If self.scan_fsobjects is set, then just scan its items, less any items in the exclude list + scan_dict will contain all the toplevel directories to scan, and any particular files/subdirectories to scan + """ + if not self.do_filesystem_scan: + return False + + # Exclude the rules file and insights-client log files, unless they are things we specifically want to scan + # Get a list of potential rules files locations,eg /tmp, /var/tmp, /usr/tmp and gettempdir() + # eg customers may have /tmp linked to /var/tmp so both must be checked for excluding the downloaded rules + rules_file_name = os.path.basename(self.rules_file) + potential_tmp_dirs = set([gettempdir(), '/tmp', '/var/tmp', '/usr/tmp']) + potential_rules_files = set(list(map(lambda d: os.path.join(d, rules_file_name), potential_tmp_dirs)) + [self.rules_file]) + rules_files = list(filter(lambda f: os.path.isfile(f), potential_rules_files)) + for rules_file in rules_files: + if rules_file not in self.scan_fsobjects: + self.filesystem_scan_exclude_list.append(rules_file) + logger.debug("Excluding rules file: %s", rules_file) + insights_log_files = glob(constants.default_log_file + '*') + self.filesystem_scan_exclude_list.extend(list(set(insights_log_files) - set(self.scan_fsobjects))) + + scan_dict = process_include_exclude_items(include_items=self.scan_fsobjects, + exclude_items=self.filesystem_scan_exclude_list, + exclude_mountpoints=self.network_filesystem_mountpoints) + if not scan_dict: + self.do_filesystem_scan = False + return False + + logger.debug("Filesystem objects to be scanned in: %s", sorted(scan_dict.keys())) + + logger.info("Starting filesystem scan ...") + fs_scan_start = time.time() + + for toplevel_dir in sorted(scan_dict): + # Make a copy of the self.yara_cmd list and add to it the thing to scan + cmd = self.yara_cmd[:] + dir_scan_start = time.time() + + specified_log_txt = "specified " if 'include' in scan_dict[toplevel_dir] else "" + if self.filesystem_scan_since_dict['timestamp']: + logger.info("Scanning %sfiles in %s modified since %s ...", specified_log_txt, toplevel_dir, + self.filesystem_scan_since_dict['datetime']) + # Find the recently modified files in the given top level directory + scan_list_file = NamedTemporaryFile(prefix='%s_scan_list.' % os.path.basename(toplevel_dir), + mode='w', delete=True) + if 'include' in scan_dict[toplevel_dir]: + find_modified_include_items(scan_dict[toplevel_dir]['include'], self.filesystem_scan_since_dict['timestamp'], scan_list_file) + else: + find_modified_in_directory(toplevel_dir, self.filesystem_scan_since_dict['timestamp'], scan_list_file) + + scan_list_file.flush() + cmd.extend(['--scan-list', scan_list_file.name]) + else: + logger.info("Scanning %sfiles in %s ...", specified_log_txt, toplevel_dir) + if 'include' in scan_dict[toplevel_dir]: + scan_list_file = NamedTemporaryFile(prefix='%s_scan_list.' % os.path.basename(toplevel_dir), + mode='w', delete=True) + scan_list_file.write('\n'.join(scan_dict[toplevel_dir]['include'])) + scan_list_file.flush() + cmd.extend(['--scan-list', scan_list_file.name]) + else: + cmd.append(toplevel_dir) + + logger.debug("Yara command: %s", cmd) + try: + output = call([cmd]).strip() + except CalledProcessError as cpe: + logger.debug("Unable to scan %s: %s", toplevel_dir, cpe.output.strip()) + continue + + try: + self.parse_scan_output(output.strip()) + except Exception as e: + self.potential_matches += 1 + logger.exception("Rule match(es) potentially found in %s but problems encountered parsing the results: %s. Skipping ...", + toplevel_dir, str(e)) + + dir_scan_end = time.time() + logger.info("Scan time for %s: %d seconds", toplevel_dir, (dir_scan_end - dir_scan_start)) + if dir_scan_end - dir_scan_start >= self.scan_timeout - 2: + logger.warning("Scan of %s timed-out after %d seconds and may not have been fully scanned. " + "Consider increasing the scan_timeout value in %s", + toplevel_dir, self.scan_timeout, MALWARE_CONFIG_FILE) + + fs_scan_end = time.time() + logger.info("Filesystem scan time: %s", time.strftime("%H:%M:%S", time.gmtime(fs_scan_end - fs_scan_start))) + return True + + def scan_processes(self): + if not self.do_process_scan: + return False + + # Get a list of all PIDs to scan if none were specified with scan only option + if not self.scan_pids: + self.scan_pids = [entry for entry in os.listdir('/proc') if entry.isdigit()] + + # Add this currently running process' PID to the list of processes to exclude (unless its a test_scan) + # Then remove the excluded processes from the list of PIDs to scan + if not self.test_scan: + self.processes_scan_exclude_list.append(str(os.getpid())) # make sure to exclude our script's pid + self.scan_pids = sorted(list(set(self.scan_pids) - set(self.processes_scan_exclude_list)), key=lambda pid: int(pid)) + + if not self.scan_pids: + logger.error("No processes to scan because the specified exclude items cancel them out") + self.do_process_scan = False + return False + + # If process_scan_since is specified, get a list of processes started since the specified date + if hasattr(self, 'processes_scan_since_dict') and self.processes_scan_since_dict['timestamp']: + # First get a list of all running processes and their start times + ps_output = call([['ps', '-eo', 'pid=', '-o', 'lstart=']]).splitlines() + all_proc_starts = list(map(lambda x: (str(x[0]), str(x[1])), map(lambda x: tuple(x.strip().split(' ', 1)), ps_output))) + scan_since_pids = [] + # Loop through all processes and if the process start time <= the specified scan_since time then + # make note of the process + for proc in all_proc_starts: + proc_start_secs = float(datetime.strptime(proc[1], '%a %b %d %H:%M:%S %Y').strftime('%s')) + if proc_start_secs >= self.processes_scan_since_dict['timestamp']: + scan_since_pids.append(proc[0]) + # Finally, do a set intersection of the current list of scan_pids and the list of processes started + # since processes_scan_since. The resulting list is the list of processes to scan. + self.scan_pids = sorted(list(set(self.scan_pids) & set(scan_since_pids)), key=lambda pid: int(pid)) + + if not self.scan_pids: + logger.error("No processes to scan because none were started since %s", self.processes_scan_since_dict['datetime']) + self.do_process_scan = False + return False + + logger.info("Starting processes scan ...") + pids_scan_start = time.time() + + for scan_pid in self.scan_pids: + pid_scan_start = time.time() + logger.info("Scanning process %s ...", scan_pid) + cmd = self.yara_cmd + [str(scan_pid)] + logger.debug("Yara command: %s", cmd) + try: + output = call([cmd]).strip() + except CalledProcessError as cpe: + logger.debug("Unable to scan process %s: %s", scan_pid, cpe.output.strip()) + continue + + try: + self.parse_scan_output(output) + except Exception as e: + self.potential_matches += 1 + logger.exception("Rule match(es) potentially found in process %s but problems encountered parsing the results: %s. Skipping ...", + scan_pid, str(e)) + + pid_scan_end = time.time() + logger.info("Scan time for process %s: %d seconds", scan_pid, (pid_scan_end - pid_scan_start)) + if pid_scan_end - pid_scan_start >= self.scan_timeout - 2: + logger.warning("Scan of process %s timed-out after %d seconds and may not have been fully scanned. " + "Consider increasing the scan_timeout value in %s", + scan_pid, self.scan_timeout, MALWARE_CONFIG_FILE) + + pids_scan_end = time.time() + logger.info("Processes scan time: %s", time.strftime("%H:%M:%S", time.gmtime(pids_scan_end - pids_scan_start))) + return True + + def parse_scan_output(self, output, exclude_items=[]): + if not output: + return + + # Each 'set' of output lines consists of 1 line containing the rule and file/pid (aka source) it matches + # Followed by one or more related lines of matching string data from that source, eg + # ... + # rule_name source + Set of 3 related lines + # 0x_offset:string_identifier:string_data | + # 0x_offset:string_identifier:string_data + + # rule_name source + Set of 2 related lines + # 0x_offset:string_identifier:string_data + + # ... + + def skip_string_data_lines(string_data_lines): + # Skip the 0x... lines containing string match data + while string_data_lines and string_data_lines[0].startswith('0x'): + logger.debug("Skipping string data line '%s'", string_data_lines[0]) + string_data_lines.pop(0) + + output_lines = output.split("\n") + while output_lines: + if 'error scanning ' in output_lines[0]: + if output_lines[0].endswith('error: 4'): + # Yara 'could not map file' error - only display this error if debugging (spammy) + logger.debug(output_lines[0]) + else: + logger.info(output_lines[0]) + output_lines.pop(0) # Skip the error scanning line + # Skip any string match lines after the error scanning line + skip_string_data_lines(output_lines) + continue + # Get the rule_name and source from the first line in the set + try: + rule_name, source = output_lines[0].rstrip().split(" ", 1) + except ValueError as err: + # Hopefully shouldn't happen but log it and continue processing + logger.debug("Error parsing rule match '%s': %s", output_lines[0], err) + output_lines.pop(0) # Skip the erroneous line + # Skip any string match lines afterwards until we get to the next rule match line + skip_string_data_lines(output_lines) + continue + + # All good so far, skip over the line containing the rule name and matching source file/pid + output_lines.pop(0) + + # If the rule or the source file/pid is to be excluded, then skip over its scan matches + # and move onto the next match + # if any([exclude_rule.lower() in rule_name.lower() for exclude_rule in self.exclude_rules]) \ + # or any([ei in source for ei in exclude_items]): + # skip_string_data_lines(output_lines) + # continue + + # Check if the rule name contains a ':' or doesn't start with a char/string + # It shouldn't and its likely to be due to a malformed string_offset line + # Skip any further scan matches until the next rule match + if ':' in rule_name or not re.match('^[a-zA-Z]+', rule_name): + skip_string_data_lines(output_lines) + continue + + rule_match = {'rule_name': rule_name, 'matches': []} + source_type = "process" if source.isdigit() else "file" + + # Parse the string match data for the remaining lines in the set + string_matches = 0 + while output_lines and output_lines[0].startswith('0x'): + if string_matches < self.string_match_limit: + try: + string_offset, string_identifier, string_data = output_lines[0].split(':', 2) + string_offset = int(string_offset, 0) + except ValueError as err: + logger.debug("Error parsing string match '%s': %s", output_lines[0], err) + output_lines.pop(0) + continue + rule_match_dict = {'source': source, + 'string_data': string_data.strip(), + 'string_identifier': string_identifier, + 'string_offset': string_offset, + 'metadata': {'source_type': source_type}} + rule_match['matches'].extend([rule_match_dict]) + output_lines.pop(0) + string_matches += 1 + + # If string_match_limit is 0 or there was no string data, there will be no rule_matches, + # but still record the file/pid source that was matched + if not rule_match['matches']: + rule_match_dict = {'source': source, + 'string_data': '', + 'string_identifier': '', + 'string_offset': -1, + 'metadata': {'source_type': source_type}} + rule_match['matches'] = [rule_match_dict] + + if self.add_metadata: + try: + # Add extra data to each rule match, beyond what yara provides + # Eg, for files: line numbers & context, checksums; for processes: process name + # TODO: find more pythonic ways of doing this stuff instead of using system commands + metadata_func = self._add_file_metadata if source_type == 'file' else self._add_process_metadata + metadata_func(rule_match['matches']) + except Exception as e: + logger.error("Error adding metadata to rule match %s in %s %s: %s. Skipping ...", + rule_name, source_type, source, str(e)) + + self.matches += 1 + logger.info("Matched rule %s in %s %s", rule_name, source_type, source) + logger.debug(rule_match) + if self.host_scan.get(rule_match['rule_name']): + self.host_scan[rule_match['rule_name']].extend(rule_match['matches']) + else: + self.host_scan[rule_match['rule_name']] = rule_match['matches'] + + def _add_process_metadata(self, rule_matches): + """ + Add extra data to the process scan matches beyond what is provided by yara, eg process name + """ + # All passed in rule_matches will have the same source PID + # Check the process still exists before obtaining the metadata about it + source = rule_matches[0]['source'] + if not os.path.exists('/proc/%s' % source): + return + + # Get name of process from ps command + # -h: no output header, -q: only the specified process, -o args: just the process name and args + try: + process_name = call([['ps', '-hq', source, '-o', 'args']]).strip() + except CalledProcessError: + process_name = 'unknown' + + for rule_match in rule_matches: + rule_match['metadata'].update({'process_name': process_name}) + + def _add_file_metadata(self, rule_matches): + """ + Add extra data to the file scan matches beyond what is provided by yara + - eg matching line numbers, line context, file checksum + - Use grep to get the line numbers & sed to get the line + """ + def get_line_from_file(file_name, line_number): + # Extract the line at line_number from file_name + line_length_limit = 120 + try: + line = call([['sed', '%dq;d' % line_number, file_name]]).strip() + except CalledProcessError: + line = "" + # Limit line length if necessary and urlencode it to minimize problems with GraphQL when uploading + return urlencode(line if len(line) < line_length_limit else line[:line_length_limit] + "...") + + # All passed in rule_matches will have the same source file + # Check the file still exists before obtaining the metadata about it + source = rule_matches[0]['source'] + if not os.path.exists(source): + return + + # Get the file type, mime type and md5sum hash of the source file + try: + file_type = call([['file', '-b', source]]).strip() + except Exception: + file_type = "" + try: + mime_type = call([['file', '-bi', source]]).strip() + except Exception: + mime_type = "" + try: + md5sum = call([['md5sum', source]]).strip().split()[0] + except Exception: + md5sum = "" + + grep_string_data_match_list = [] + if mime_type and 'charset=binary' not in mime_type: + # Get the line numbers for each of yara's string_data matches in the source file, but not for binary files + # Build a grep command that searches for each of the string_data patterns in the source file + # For each string_data pattern that grep finds, the grep output will have the form... + # line_number:offset_from_0:string_data_pattern + + # Get the set of patterns to grep for, eg ['pattern1', 'pattern2', etc], ie remove duplicate patterns + grep_string_data_pattern_set = set([match['string_data'] for match in rule_matches]) + if grep_string_data_pattern_set: + # Build an option list for grep, eg ['-e', 'pattern1', '-e', 'pattern2', ... etc] + # zip creates a list of tuples, eg [('-e', 'pattern'), ('-e', 'pattern2'), ...], then flatten the list + grep_string_data_patterns = [item for tup in list(zip(['-e'] * len(grep_string_data_pattern_set), + grep_string_data_pattern_set)) + for item in tup] + # Create the grep command to execute. -F means don't interpret regex special chars in the patterns + grep_command = ['/bin/grep', '-Fbon'] + grep_string_data_patterns + [source] + logger.debug("grep command: %s", grep_command) + try: + grep_output = call([grep_command]) + except CalledProcessError: + grep_output = "" + + # Now turn the grep output into a list of tuples for easier searching a little later, ie + # [(line_number, offset_from_0, string_data_pattern), (...), ] + if grep_output and not grep_output.lower().startswith('binary'): + grep_string_data_match_list = list(map(lambda grep_output_line: tuple(grep_output_line.split(':', 3)), + grep_output.splitlines())) + + for rule_match in rule_matches: + metadata = rule_match['metadata'] + metadata.update({'file_type': file_type, + 'mime_type': mime_type, + 'md5sum': md5sum}) + if grep_string_data_match_list: + # Now, for each offset_from_0 in the grep output, we want to match it with the corresponding + # string_offset value from the yara output so we can get the line number for that string_data match + # And while we are here, get the line from the source file at that line number + line_number = None + for grep_list_item in grep_string_data_match_list: + if int(grep_list_item[1]) == rule_match['string_offset']: + line_number = int(grep_list_item[0]) + break + if line_number: + metadata.update({'line_number': line_number, + 'line': get_line_from_file(source, line_number)}) + + def _create_host_scan_mutation(self): + # Build the mutation text + mutation_header = """ + mutation HostScan { + recordHostScan( + input: { + scannedhost: { + insightsId: "%s" + rulesScanned: [""" % generate_machine_id() + + mutation_footer = """ + ] + } + } + ) { + success + } + }""" + + mutation = mutation_header + for rule_name in self.host_scan.keys(): + rule_scan = """{ + ruleName: "%s" + stringsMatched: [""" % rule_name + for match in self.host_scan[rule_name]: + rule_scan += """{ + source: "%s" + stringData: %s + stringIdentifier: %s + stringOffset: "%s" + metadata: "%s" + }, """ % (match['source'], + json.dumps(match['string_data']), + json.dumps(match['string_identifier']), + match['string_offset'], + json.dumps(match['metadata']).replace('"', '\\"')) + rule_scan += "]}, " + mutation += rule_scan + + mutation += mutation_footer + return mutation + + def _get_config_option(self, option, default_value=None): + """ + Get the value of a config option or if it doesn't exist or is None, the default_value + """ + value = os.getenv(option.upper()) + if value is not None: + return self._parse_env_var(option.upper(), value) + value = self.config.get(option) + return value if value is not None else default_value + + @staticmethod + def _parse_env_var(env_var, value): + """ + Parse specific environment variables to make sure they have appropriate values + """ + logger.debug("Found environment variable: %s, value: %s", env_var, value) + # Parse these env vars as booleans + if env_var in ENV_VAR_TYPES['boolean']: + return value.lower() in ('true', 'yes', 't', 'y') + + # Parse these as lists by splitting at the commas + if env_var in ENV_VAR_TYPES['list']: + if value: + return value.split(',') if ',' in value else [value] + else: + return [] + + # Parse *_scan_since, can be either an int or a string (ie 'last') + if env_var in ENV_VAR_TYPES['int_or_str']: + return int(value) if value.isdigit() else value + + # Parse these as ints + if env_var in ENV_VAR_TYPES['integer']: + try: + return int(value) + except ValueError as e: + logger.error("Problem parsing environment variable %s: %s", env_var, str(e)) + exit(constants.sig_kill_bad) + + # env_var value doesn't require parsing, just return it as is (ie. as a string) + return value + + @staticmethod + def _get_test_rule_location(rules_location): + test_rule = 'test-rule.yar' + # Nothing to do if the location already ends with test_rule + if rules_location.endswith(test_rule): + return rules_location + # Replace the last entry from the path with the test-rule + # A bit crude but it seems to work ok with both URL and file paths + return os.path.join(os.path.dirname(rules_location), test_rule) + + +# +# Utility functions +# Mainly for including / excluding certain directories for scanning +# And also for finding files that have been modified recently +# +def get_toplevel_dirs(): + """ + Returns a list of the top level directories directly under root (/), + """ + toplevel_dirs = sorted(filter(lambda x: not os.path.islink(x), map(lambda x: "/" + x, os.listdir('/')))) + return toplevel_dirs + + +def is_same_file_or_root(file1, file2): + # Catch possible permission denied error with fuse mounted filesystems. Yes, even for root! + try: + if os.path.samefile(file1, file2) or os.path.samefile(file1, '/'): + return True + except Exception as err: + logger.debug("Encountered exception running os.path.samefile('%s', '%s'): %s. Trying string comparison ...", + file1, file2, str(err)) + if file1 == file2 or file1 == '/': + return True + return False + + +def get_parent_dirs(item, parent_dir_list, base_case='/'): + """ + Get a list of parent directories of a particular filesystem item, stopping at base_case (root by default) + Eg for get_parent_dirs('/path/to/some/item', parent_dir_list) -> + parent_dir_list = ['/path', '/path/to', '/path/to/some', '/path/to/some/item'] + """ + if is_same_file_or_root(item, base_case): + return + get_parent_dirs(os.path.dirname(item), parent_dir_list, base_case) + parent_dir_list.append(item) + + +def process_include_items(include_items=[]): + """ + Process the include items to a get list of directories to be scanned + If there are no entries then get the list of top level directories under root (/), + :return: a list of directories to be scanned. It never returns an empty list. + """ + default_values = get_toplevel_dirs() + + logger.debug("Parsing include items ...") + parsed_list = [] + for item in include_items: + item = item.strip() + if not item or item.startswith('#'): + continue + include_item = os.path.normpath(item).replace('//', '/') + if os.path.exists(include_item): + # ignore the include_item if its not a full directory path + if not include_item.startswith('/'): + logger.debug("Skipping partial directory path '%s' ...", include_item) + continue + elif os.path.islink(include_item): + logger.debug("Skipping link '%s' ...", include_item) + continue + elif include_item == '/': + # Found / in include item list. No need to get the other items because / trumps all + logger.debug("Found root directory in list of items to scan. Ignoring the other items ...") + parsed_list = default_values + break + else: + parsed_list.append(include_item) + else: + logger.debug("Skipping missing item '%s' ...", include_item) + + if not parsed_list: + logger.debug("No items specified to be scanned. Using default values %s ...", default_values) + parsed_list = default_values + else: + # Remove any duplicates and any children of parent directories before returning + parsed_list = remove_child_items(sorted(list(set(parsed_list)))) + + logger.debug("Include items: %s", parsed_list) + return parsed_list + + +def process_exclude_items(exclude_items=[]): + """ + Process the exclude items to get list of directories to NOT be scanned + :return: a list of directories to not be scanned if any, otherwise an empty list + """ + logger.debug("Parsing exclude items ...") + parsed_list = [] + for item in exclude_items: + item = item.strip() + if not item or item.startswith('#'): + continue + exclude_item = os.path.normpath(item).replace('//', '/') + if os.path.exists(exclude_item): + # ignore the exclude_item if its not a full directory path + if exclude_item == '/': + # Found / in exclude list. No need to get the other items because / trumps all + logger.debug("Found root directory in the exclude list. Expanding it to all toplevel directories ...") + parsed_list = get_toplevel_dirs() + break + elif not exclude_item.startswith('/'): + logger.debug("Skipping partial directory path '%s' ...", exclude_item) + continue + else: + parsed_list.append(exclude_item) + else: + logger.debug("Skipping missing item '%s' ...", exclude_item) + + if not parsed_list: + logger.debug("No items specified to be excluded") + else: + # Remove any duplicates and any children of parent directories before returning + parsed_list = remove_child_items(sorted(list(set(parsed_list)))) + + logger.debug("Exclude items: %s", parsed_list) + return parsed_list + + +def remove_child_items(item_list): + """ + For a list of filesystem items, remove those items that are duplicates or children of other items + Eg, for remove_child_items['/path/to/some/item/child', '/path/to/another/item', '/path/to/some/item'] + returns ['/path/to/another/item', '/path/to/some/item'] + If one if the items is root, then it wins + Also, all items should be the full path starting at root (/). Any that aren't are removed + """ + if '/' in item_list: + return ['/'] + + # Remove duplicates and any non-full path items + item_list = sorted(list(set(filter(lambda x: x.startswith('/'), item_list)))) + remove_items = set([]) + for i, item1 in enumerate(item_list[:-1]): + for item2 in item_list[i + 1:]: + if item1 != item2 and item2.startswith(item1 + '/'): + remove_items.add(item2) + for remove_item in remove_items: + item_list.remove(remove_item) + return sorted(list(set(item_list))) + + +def remove_included_excluded_items(included_items, excluded_items): + """ + Go through the list of included items and remove any that are in the exclude list, + or are children of excluded items (no need to scan an included item if its parent is to be excluded) + """ + # Clean up the lists, just in case this hasn't been done already + included_items = remove_child_items(included_items) + excluded_items = remove_child_items(excluded_items) + + remove_items = set([]) + for included_item in included_items: + for excluded_item in excluded_items: + if excluded_item == included_item or included_item.startswith(excluded_item + '/'): + remove_items.add(included_item) + for remove_item in remove_items: + included_items.remove(remove_item) + return included_items + + +def process_include_exclude_items(include_items=[], exclude_items=[], exclude_mountpoints=[]): + """ + Process the include and exclude items, where the exclude items are effectively subtracted from the include_items. + It builds a scan_dict dictionary of items to scan keyed by the filesystem top level directories. + Only the toplevel directories from items in the include_items list will be present in scan_dict. + scan_dict = {'/boot': {'include': ['/boot/include/me', ...], 'exclude: ['/boot/exclude/me', ...]}, + '/etc': {'include': ['/etc/include/me', ...], 'exclude: ['/etc/exclude/me', ...]}, + ... + :return: scan_dict + """ + # Get a list of excluded items from the exclude file and network filesystem mountpoints + initial_exclude_list = process_exclude_items(exclude_items) + final_exclude_list = remove_child_items(list(set(exclude_mountpoints) | set(initial_exclude_list))) + logger.debug("Final exclude items: %s", final_exclude_list) + + # Get a list of included items from the include file, minus the excluded items + initial_include_list = process_include_items(include_items) + if not initial_include_list: + logger.error("No filesystem items to scan because the include items doesn't contain any valid items") + return {} + final_include_list = remove_included_excluded_items(initial_include_list, final_exclude_list) + logger.debug("Final include items after removing exclude items: %s", final_include_list) + if not final_include_list: + logger.error("No filesystem items to scan because the specified exclude items cancel them out") + return {} + + # This is the dictionary that will hold all the items to scan (after processing the include and exclude items) + # It will be keyed by each of the toplevel directories containing items to scan + # yara will scan each of the toplevel dir's 'include' keys (if present), or just the toplevel dir itself + scan_dict = {} + + # Populate the scan_dict by creating keys for each toplevel directory of the items to include/scan + # Create an 'include' key for each toplevel directory containing items to include in that toplevel directory + logger.debug("Populating scan_dict's include items ...") + for include_item in final_include_list: + item_subpaths = [] + get_parent_dirs(include_item, item_subpaths) + include_item_toplevel_dir = item_subpaths[0] + if include_item_toplevel_dir not in scan_dict: + # Create an 'include' key if the item to scan isn't just the toplevel directory itself + scan_dict[include_item_toplevel_dir] = {'include': set([include_item])}\ + if include_item != include_item_toplevel_dir else {} + else: + scan_dict[include_item_toplevel_dir]['include'].add(include_item) + + logger.debug("Scan dict after adding include items: %s", scan_dict) + + # Populate an 'exclude' key for the toplevel dirs in the scan_dict that also have items to exclude + # Or remove the toplevel dirs from the scan dict where the toplevel dir itself is to be excluded + logger.debug("Populating scan_dict's exclude items ...") + for exclude_item in final_exclude_list: + item_subpaths = [] + get_parent_dirs(exclude_item, item_subpaths) + exclude_item_toplevel_dir = item_subpaths[0] + if exclude_item_toplevel_dir not in scan_dict: + # This exclude_item's toplevel dir isn't in the scan dict, so skip it (since its not being included) + continue + if 'exclude' not in scan_dict[exclude_item_toplevel_dir]: + # Create the 'exclude' key if it doesn't already exist + scan_dict[exclude_item_toplevel_dir]['exclude'] = {'items': [], 'subpaths': set([])} + + scan_dict[exclude_item_toplevel_dir]['exclude']['items'].append(exclude_item) + + # Add the list of subpaths leading to this exclude item. + # The subpaths are needed later for listing the contents each subpath + scan_dict[exclude_item_toplevel_dir]['exclude']['subpaths'].update(item_subpaths) + + logger.debug("Scan dict after adding exclude items: %s", scan_dict) + + # For each toplevel dir with items to exclude, re-populate the include key with directory content listings + # of the subpaths, minus the items to exclude and only including items to include. Yep, its complicated. + # These directory listings will be used with yara's --scan-list option + logger.debug("Re-populating scan_dict's include items with directory content listings to pass to yara ...") + for toplevel_dir in scan_dict: + if 'exclude' not in scan_dict[toplevel_dir]: + continue + + # Get directory listings of each of the subpaths + if 'include' in scan_dict[toplevel_dir]: + scan_items = set(scan_dict[toplevel_dir]['include']) + else: + scan_items = set([]) + toplevel_dir_exclude = scan_dict[toplevel_dir]['exclude'] + for exclude_item in toplevel_dir_exclude['items']: + subpaths = [] + get_parent_dirs(exclude_item, subpaths) + for i, subpath in enumerate(subpaths[:-1]): + dir_list = os.listdir(subpath) + dir_list = sorted(map(lambda x: subpath + '/' + x, dir_list)) + dir_list.remove(subpaths[i + 1]) + scan_items.update(dir_list) + + # Go through the list of scan items and remove any exclude items or exclude item subpaths + for scan_item in list(scan_items): + for exclude_item in toplevel_dir_exclude['items']: + if scan_item == exclude_item or scan_item.startswith(exclude_item + '/'): + scan_items.remove(scan_item) + break + else: + for exclude_subpath in toplevel_dir_exclude['subpaths']: + if scan_item == exclude_subpath: + scan_items.remove(scan_item) + + # If there is an include list, make sure the scan_items only include items in the include list + if 'include' in scan_dict[toplevel_dir]: + for maybe_include in list(scan_items): + if os.path.islink(maybe_include) or (not os.path.isfile(maybe_include) and not os.path.isdir(maybe_include)): + scan_items.remove(maybe_include) + continue + if any([maybe_include == definitely_include or maybe_include.startswith(definitely_include + '/') + for definitely_include in scan_dict[toplevel_dir]['include']]): + continue + else: + scan_items.remove(maybe_include) + + # Overwrite the existing include key list with the new list of scan_items + scan_dict[toplevel_dir]['include'] = sorted(list(scan_items)) + + logger.debug("Final scan_dict: %s", scan_dict) + return scan_dict + + +def get_scan_since_timestamp(scan_since_option, since): + """ + Return a unix timestamp corresponding to how long ago to scan for files or processes (depending on scan_since_option) + Valid values of 'since' are integers > 0 meaning the number of days back in time from now, + or 'last' meaning get the timestamp of the last scan + If 'since' is not one of these valid values, then terminate + """ + now = time.time() + timestamp_file = LAST_FILESYSTEM_SCAN_FILE if scan_since_option == 'filesystem_scan_since' else LAST_PROCESSES_SCAN_FILE + + def get_lastscan_timestamp(scan_since_option, lastscan): + try: + # Convert the datetime string into a unix timestamp + lastscan_seconds = float(datetime.strptime(lastscan, '%Y-%m-%dT%H:%M:%S.%f').strftime('%s')) + if lastscan_seconds > now: + raise RuntimeError("Last scan time is in the future.") + except Exception as err: + logger.error("Error getting time of last malware scan: %s. Ignoring '%s: last' option ...", str(err), scan_since_option) + return None + return lastscan_seconds + + if isinstance(since, str) and since.lower().startswith('l'): + # Get the timestamp of the last scan + if os.path.isfile(timestamp_file): + with open(timestamp_file) as f: + lastscan = f.readline().strip() + return get_lastscan_timestamp(scan_since_option, lastscan) + else: + logger.info("File %s doesn't exist for '%s: last' option. Continuing ...", timestamp_file, scan_since_option) + return None + elif isinstance(since, str): + logger.error("Unknown value '%s' for %s option. Valid values are integers >= 1 and 'last'", since, scan_since_option) + exit(constants.sig_kill_bad) + + try: + since_int = int(since) + if since_int >= 1: + return now - (since_int * 86400) # 86400 seconds in a day + else: + raise ValueError("Invalid %s value %s. Valid values are integers >= 1 and 'last'" % (scan_since_option, since)) + except ValueError as e: + logger.error(str(e)) + exit(constants.sig_kill_bad) + + +def is_recent_mtime(item, timestamp): + """ + Return True if the given 'item' has a modification time that is newer than 'timestamp' + Return False otherwise, or if the the 'item' is a link or another non-file type (eg pipes) + """ + if os.path.exists(item) and not os.path.islink(item) and os.path.isfile(item): + return os.path.getmtime(item) > timestamp + return False + + +def find_modified_in_directory(directory, timestamp, output_file): + """ + Find files in 'directory' that have been created/modified since 'timestamp' + and write their names to 'output_file' + """ + for root, dirs, files in os.walk(directory): + for afile in files: + path = os.path.join(root, afile) + if is_recent_mtime(path, timestamp): + output_file.write(path + "\n") + + +def find_modified_include_items(item_list, timestamp, output_file): + """ + Find files in the given list of items (files/directories) that have been created/modified since 'timestamp' + and write their names to 'output_file' + """ + for item in item_list: + if os.path.isdir(item): + find_modified_in_directory(item, timestamp, output_file) + else: + if is_recent_mtime(item, timestamp): + output_file.write(item + '\n') diff --git a/insights/client/apps/manifests.py b/insights/client/apps/manifests.py new file mode 100644 index 0000000000..a221e590c5 --- /dev/null +++ b/insights/client/apps/manifests.py @@ -0,0 +1,78 @@ +""" +App manifests for use with the --collector APP option +Define the app manifest and add it to the manifests dict at the bottom of the file +""" + +malware_detection_manifest = """ +# Manifest file for malware detection app data collection +--- +# version is for the format of this file, not its contents. +version: 0 + +client: + context: + class: insights.core.context.HostContext + args: + timeout: 10 # timeout in seconds for commands. Doesn't apply to files. + + blacklist: + files: [] + commands: [] + patterns: [] + keywords: [] + + persist: + # determines what will appear in the archive + - name: insights.specs.datasources.malware_detection.MalwareDetectionSpecs + enabled: true + + run_strategy: + name: serial + args: + max_workers: null + +plugins: + # disable everything by default + # defaults to false if not specified. + default_component_enabled: false + packages: + # determines which packages are loaded. These will be namespaced to the relevant collector + - insights.specs.datasources.malware_detection + - insights.specs.default + configs: + # determines which specs get loaded + - name: insights.specs.datasources.malware_detection.MalwareDetectionSpecs + enabled: true + # Enable specs for collecting the system's canonical facts + - name: insights.specs.default.DefaultSpecs.mac_addresses + enabled: true + - name: insights.specs.Specs.mac_addresses + enabled: true + - name: insights.specs.default.DefaultSpecs.etc_machine_id + enabled: true + - name: insights.specs.Specs.etc_machine_id + enabled: true + - name: insights.specs.default.DefaultSpecs.hostname + enabled: true + - name: insights.specs.Specs.hostname + enabled: true + - name: insights.specs.default.DefaultSpecs.bios_uuid + enabled: true + - name: insights.specs.Specs.bios_uuid + enabled: true + - name: insights.specs.default.DefaultSpecs.machine_id + enabled: true + - name: insights.specs.Specs.machine_id + enabled: true + - name: insights.specs.default.DefaultSpecs.ip_addresses + enabled: true + - name: insights.specs.Specs.ip_addresses + enabled: true + - name: insights.specs.default.DefaultSpecs.subscription_manager_id + enabled: true + - name: insights.specs.Specs.subscription_manager_id + enabled: true +""".lstrip() + +manifests = {'malware-detection': malware_detection_manifest} +content_types = {'malware-detection': 'application/vnd.redhat.malware-detection.results+tgz'} diff --git a/insights/client/archive.py b/insights/client/archive.py index 5e3beee21b..d9d18b4fc4 100644 --- a/insights/client/archive.py +++ b/insights/client/archive.py @@ -2,6 +2,9 @@ Handle adding files and preparing the archive for upload """ from __future__ import absolute_import +import glob +from signal import SIGTERM, signal +import sys import time import os import shutil @@ -14,6 +17,7 @@ from .utilities import determine_hostname, _expand_paths, write_data_to_file from .insights_spec import InsightsFile, InsightsCommand +from .constants import InsightsConstants as constants logger = logging.getLogger(__name__) @@ -22,47 +26,87 @@ class InsightsArchive(object): """ This class is an interface for adding command output and files to the insights archive + + Attributes: + config - an InsightsConfig object + tmp_dir - a temporary directory in /var/tmp + archive_dir - location to collect archive data inside tmp_dir + archive_tmp_dir - a temporary directory to write the final archive file + archive_name - filename of the archive and archive_dir + cmd_dir - insights_commands directory inside archive_dir + compressor - tar compression flag to use + tar_file - path of the final archive file """ def __init__(self, config): """ Initialize the Insights Archive - Create temp dir, archive dir, and command dir """ self.config = config - self.tmp_dir = tempfile.mkdtemp(prefix='/var/tmp/') - self.archive_tmp_dir = None - if not self.config.obfuscate: - self.archive_tmp_dir = tempfile.mkdtemp(prefix='/var/tmp/') - name = determine_hostname() + self.cleanup_previous_archive() + if not os.path.exists(constants.insights_tmp_path): + os.mkdir(constants.insights_tmp_path, 0o700) + # input this to core collector as `tmp_path` + self.tmp_dir = tempfile.mkdtemp(dir=constants.insights_tmp_path, prefix='insights-archive-') + + # we don't really need this anymore... + self.archive_tmp_dir = tempfile.mkdtemp(dir=constants.insights_tmp_path, prefix='insights-archive-') + + # We should not hint the hostname in the archive if it has to be obfuscated + if config.obfuscate_hostname: + hostname = "localhost" + else: + hostname = determine_hostname() + self.archive_name = ("insights-%s-%s" % - (name, + (hostname, time.strftime("%Y%m%d%H%M%S"))) - self.archive_dir = self.create_archive_dir() - self.cmd_dir = self.create_command_dir() + + # lazy create these, only if needed when certain + # functions are called + # classic collection and compliance needs these + # core collection will set "archive_dir" on its own + self.archive_dir = None + self.cmd_dir = None + self.compressor = config.compressor + self.archive_stored = None self.tar_file = None + self.keep_archive_dir = '/var/cache/insights-client' atexit.register(self.cleanup_tmp) + signal(SIGTERM, self.sigterm_handler) def create_archive_dir(self): """ - Create the archive dir + Create the archive directory if it is undefined or does not exist. """ + if self.archive_dir and os.path.exists(self.archive_dir): + # attr defined and exists. move along + return self.archive_dir + archive_dir = os.path.join(self.tmp_dir, self.archive_name) - os.makedirs(archive_dir, 0o700) - return archive_dir + if not os.path.exists(archive_dir): + logger.debug('Creating archive directory %s...', archive_dir) + os.makedirs(archive_dir, 0o700) + self.archive_dir = archive_dir + return self.archive_dir def create_command_dir(self): """ - Create the "sos_commands" dir + Create the "insights_commands" dir """ + self.create_archive_dir() cmd_dir = os.path.join(self.archive_dir, "insights_commands") - os.makedirs(cmd_dir, 0o700) - return cmd_dir + logger.debug('Creating command directory %s...', cmd_dir) + if not os.path.exists(cmd_dir): + os.makedirs(cmd_dir, 0o700) + self.cmd_dir = cmd_dir + return self.cmd_dir def get_full_archive_path(self, path): """ Returns the full archive path """ + self.create_archive_dir() return os.path.join(self.archive_dir, path.lstrip('/')) def _copy_file(self, path): @@ -99,6 +143,7 @@ def copy_dir(self, path): """ Recursively copy directory """ + self.create_archive_dir() for directory in path: if os.path.isdir(path): full_path = os.path.join(self.archive_dir, directory.lstrip('/')) @@ -120,7 +165,7 @@ def create_tar_file(self): """ Create tar file to be compressed """ - if not self.archive_tmp_dir: + if not self.tmp_dir: # we should never get here but bail out if we do raise RuntimeError('Archive temporary directory not defined.') tar_file_name = os.path.join(self.archive_tmp_dir, self.archive_name) @@ -143,15 +188,17 @@ def delete_tmp_dir(self): """ Delete the entire tmp dir """ - logger.debug("Deleting: " + self.tmp_dir) - shutil.rmtree(self.tmp_dir, True) + if self.tmp_dir: + logger.debug("Deleting: " + self.tmp_dir) + shutil.rmtree(self.tmp_dir, True) def delete_archive_dir(self): """ Delete the entire archive dir """ - logger.debug("Deleting: " + self.archive_dir) - shutil.rmtree(self.archive_dir, True) + if self.archive_dir: + logger.debug("Deleting: " + self.archive_dir) + shutil.rmtree(self.archive_dir, True) def delete_archive_file(self): """ @@ -192,12 +239,40 @@ def cleanup_tmp(self): and tar_file exists. ''' if self.config.keep_archive and self.tar_file: + self.storing_archive() if self.config.no_upload: - logger.info('Archive saved at %s', self.tar_file) + logger.info('Archive saved at %s', self.archive_stored) else: - logger.info('Insights archive retained in %s', self.tar_file) - if self.config.obfuscate: - return # return before deleting tmp_dir - else: - self.delete_archive_file() + logger.info('Insights archive retained in %s', self.archive_stored) self.delete_tmp_dir() + + def sigterm_handler(_signo, _stack_frame, _context): + sys.exit(1) + + def cleanup_previous_archive(self): + ''' + Used at the start, this will clean the temporary directory of previous killed runs + ''' + archive_glob = os.path.join(constants.insights_tmp_path, 'insights-archive-*') + for file in glob.glob(archive_glob): + os.path.join('', file) + logger.debug("Deleting previous archive %s", file) + shutil.rmtree(file, True) + + def storing_archive(self): + if not os.path.exists(self.keep_archive_dir): + try: + os.makedirs(self.keep_archive_dir) + except OSError: + logger.error('ERROR: Could not create %s', self.keep_archive_dir) + raise + + archive_name = os.path.basename(self.tar_file) + self.archive_stored = os.path.join(self.keep_archive_dir, archive_name) + logger.info('Copying archive from %s to %s', self.tar_file, self.archive_stored) + try: + shutil.copyfile(self.tar_file, self.archive_stored) + except OSError: + # file exists already + logger.error('ERROR: Could not stored archive to %s', self.archive_stored) + raise diff --git a/insights/client/auto_config.py b/insights/client/auto_config.py index 19e740124b..d284721828 100644 --- a/insights/client/auto_config.py +++ b/insights/client/auto_config.py @@ -5,6 +5,7 @@ import logging import os import requests +import re try: from urlparse import urlparse @@ -54,7 +55,7 @@ def verify_connectivity(config): return False -def set_auto_configuration(config, hostname, ca_cert, proxy, is_satellite): +def set_auto_configuration(config, hostname, ca_cert, proxy, is_satellite, is_stage, rhsm_no_proxy=None): """ Set config based on discovered data """ @@ -62,6 +63,7 @@ def set_auto_configuration(config, hostname, ca_cert, proxy, is_satellite): logger.debug("Attempting to auto configure hostname: %s", hostname) logger.debug("Attempting to auto configure CA cert: %s", ca_cert) logger.debug("Attempting to auto configure proxy: %s", proxy) + logger.debug("Attempting to auto configure no_proxy: %s", rhsm_no_proxy) saved_base_url = config.base_url if ca_cert is not None: saved_cert_verify = config.cert_verify @@ -69,13 +71,18 @@ def set_auto_configuration(config, hostname, ca_cert, proxy, is_satellite): if proxy is not None: saved_proxy = config.proxy config.proxy = proxy + if rhsm_no_proxy and rhsm_no_proxy != '': + config.no_proxy = rhsm_no_proxy if is_satellite: # satellite config.base_url = hostname + '/r/insights' logger.debug('Auto-configured base_url: %s', config.base_url) else: # connected directly to RHSM - config.base_url = hostname + '/r/insights' + if is_stage: + config.base_url = hostname + '/api' + else: + config.base_url = hostname + '/r/insights' logger.debug('Auto-configured base_url: %s', config.base_url) logger.debug('Not connected to Satellite, skipping branch_info') # direct connection to RHSM, skip verify_connectivity @@ -111,6 +118,7 @@ def _try_satellite6_configuration(config): key = open(rhsmCertificate.keypath(), 'r').read() rhsm = rhsmCertificate(key, cert) is_satellite = False + is_stage = False # This will throw an exception if we are not registered logger.debug('Checking if system is subscription-manager registered') @@ -123,6 +131,9 @@ def _try_satellite6_configuration(config): rhsm_proxy_port = rhsm_config.get('server', 'proxy_port').strip() rhsm_proxy_user = rhsm_config.get('server', 'proxy_user').strip() rhsm_proxy_pass = rhsm_config.get('server', 'proxy_password').strip() + rhsm_no_proxy = rhsm_config.get('server', 'no_proxy').strip() + if rhsm_no_proxy.lower() == 'none' or rhsm_no_proxy == '': + rhsm_no_proxy = None proxy = None @@ -151,9 +162,13 @@ def _try_satellite6_configuration(config): rhsm_hostname = 'cert-api.access.redhat.com' rhsm_ca = None elif _is_staging_rhsm(rhsm_hostname): - logger.debug('Connected to staging RHSM, using rhel-test') - rhsm_hostname = 'rhel-test.cloud.redhat.com' - rhsm_ca = False # NOT None + logger.debug('Connected to staging RHSM, using cert.cloud.stage.redhat.com') + rhsm_hostname = 'cert.cloud.stage.redhat.com' + # never use legacy upload for staging + config.legacy_upload = False + config.cert_verify = True + is_stage = True + rhsm_ca = None else: # Set the host path # 'rhsm_hostname' should really be named ~ 'rhsm_host_base_url' @@ -161,7 +176,7 @@ def _try_satellite6_configuration(config): is_satellite = True logger.debug("Trying to set auto_configuration") - set_auto_configuration(config, rhsm_hostname, rhsm_ca, proxy, is_satellite) + set_auto_configuration(config, rhsm_hostname, rhsm_ca, proxy, is_satellite, is_stage, rhsm_no_proxy=rhsm_no_proxy) return True except Exception as e: logger.debug(e) @@ -221,7 +236,7 @@ def _try_satellite5_configuration(config): else: proxy = proxy + proxy_host_port logger.debug("RHN Proxy: %s", proxy) - set_auto_configuration(config, hostname, rhn_ca, proxy, True) + set_auto_configuration(config, hostname, rhn_ca, proxy, True, False) else: logger.debug("Could not find hostname") return False @@ -238,6 +253,12 @@ def try_auto_configuration(config): if config.auto_config and not config.offline: if not _try_satellite6_configuration(config): _try_satellite5_configuration(config) - if not config.legacy_upload and 'cloud.redhat.com' not in config.base_url: + if not config.legacy_upload and re.match(r'(.+)?\/r\/insights', config.base_url): + # When to append /platform + # base url ~= console.redhat.com/r/insights + # base url ~= cert-api.access.redhat.com/r/insights + # base url ~= satellite.host.example.com/redhat_access/r/insights + # When not to append /platform + # base url ~= console.redhat.com/api config.base_url = config.base_url + '/platform' logger.debug('Updated base_url: %s', config.base_url) diff --git a/insights/client/client.py b/insights/client/client.py index 945c1baaf4..6a03bcff10 100644 --- a/insights/client/client.py +++ b/insights/client/client.py @@ -1,5 +1,6 @@ from __future__ import print_function from __future__ import absolute_import +from os.path import isfile import sys import json import logging @@ -7,25 +8,24 @@ import os import time import six +from distutils.version import LooseVersion from .utilities import (generate_machine_id, write_to_disk, write_registered_file, write_unregistered_file, - delete_registered_file, - delete_unregistered_file, delete_cache_files, determine_hostname, - read_pidfile, - systemd_notify) + get_version_info) from .collection_rules import InsightsUploadConf from .data_collector import DataCollector +from .core_collector import CoreCollector from .connection import InsightsConnection from .archive import InsightsArchive from .support import registration_check from .constants import InsightsConstants as constants -from .schedule import get_scheduler +NETWORK = constants.custom_network_log_level LOG_FORMAT = ("%(asctime)s %(levelname)8s %(name)s %(message)s") logger = logging.getLogger(__name__) @@ -36,14 +36,25 @@ def do_log_rotation(): def get_file_handler(config): + ''' + Sets up the logging file handler. + Returns: + RotatingFileHandler - client rpm version is older than 3.2.0. + FileHandler - client rpm version is 3.2.0 or newer. + ''' log_file = config.logging_file log_dir = os.path.dirname(log_file) if not log_dir: log_dir = os.getcwd() elif not os.path.exists(log_dir): os.makedirs(log_dir, 0o700) - file_handler = logging.handlers.RotatingFileHandler( - log_file, backupCount=3) + # ensure the legacy rotating file handler is only used in older client versions + # or if there is a problem retrieving the rpm version. + rpm_version = get_version_info()['client_version'] + if not rpm_version or (LooseVersion(rpm_version) < LooseVersion(constants.rpm_version_before_logrotate)): + file_handler = logging.handlers.RotatingFileHandler(log_file, backupCount=3) + else: + file_handler = logging.FileHandler(log_file) file_handler.setFormatter(logging.Formatter(LOG_FORMAT)) return file_handler @@ -53,6 +64,8 @@ def get_console_handler(config): target_level = logging.FATAL elif config.verbose: target_level = logging.DEBUG + elif config.net_debug: + target_level = NETWORK elif config.quiet: target_level = logging.ERROR else: @@ -68,6 +81,7 @@ def get_console_handler(config): def configure_level(config): + config_level = 'NETWORK' if config.net_debug else config.loglevel config_level = 'DEBUG' if config.verbose else config.loglevel init_log_level = logging.getLevelName(config_level) @@ -78,13 +92,12 @@ def configure_level(config): logger.setLevel(init_log_level) logging.root.setLevel(init_log_level) - net_debug_level = logging.INFO if config.net_debug else logging.ERROR - logging.getLogger('network').setLevel(net_debug_level) if not config.verbose: logging.getLogger('insights.core.dr').setLevel(logging.WARNING) def set_up_logging(config): + logging.addLevelName(NETWORK, "NETWORK") if len(logging.root.handlers) == 0: logging.root.addHandler(get_console_handler(config)) logging.root.addHandler(get_file_handler(config)) @@ -117,18 +130,17 @@ def _legacy_handle_registration(config, pconn): None - could not reach the API ''' logger.debug('Trying registration.') - # force-reregister -- remove machine-id files and registration files - # before trying to register again - if config.reregister: - delete_registered_file() - delete_unregistered_file() - write_to_disk(constants.machine_id_file, delete=True) - logger.debug('Re-register set, forcing registration.') - - logger.debug('Machine-id: %s', generate_machine_id(new=config.reregister)) # check registration with API check = get_registration_status(config, pconn) + machine_id_present = isfile(constants.machine_id_file) + + if machine_id_present and check['status'] is False: + logger.info("Machine-id found, insights-client can not be registered." + " Please, unregister insights-client first: `insights-client --unregister`") + return False + + logger.debug('Machine-id: %s', generate_machine_id()) for m in check['messages']: logger.debug(m) @@ -174,7 +186,7 @@ def _legacy_handle_registration(config, pconn): 're-register this machine.') else: # not yet registered - logger.info('This machine has not yet been registered.' + logger.info('This machine has not yet been registered. ' 'Use --register to register this machine.') return False @@ -198,11 +210,19 @@ def get_registration_status(config, pconn): return registration_check(pconn) +def __cleanup_local_files(): + write_unregistered_file() + delete_cache_files() + write_to_disk(constants.machine_id_file, delete=True) + logger.debug('Unregistered and removed machine-id') + + # -LEGACY- def _legacy_handle_unregistration(config, pconn): """ returns (bool): True success, False failure """ + check = get_registration_status(config, pconn) for m in check['messages']: @@ -210,6 +230,9 @@ def _legacy_handle_unregistration(config, pconn): if check['unreachable']: # Run connection test and exit + if config.force: + __cleanup_local_files() + return True return None if check['status']: @@ -219,9 +242,8 @@ def _legacy_handle_unregistration(config, pconn): logger.info('This system is already unregistered.') if unreg: # only set if unreg was successful - write_unregistered_file() - get_scheduler(config).remove_scheduling() - delete_cache_files() + __cleanup_local_files() + logger.debug('Legacy unregistration') return unreg @@ -236,10 +258,9 @@ def handle_unregistration(config, pconn): return _legacy_handle_unregistration(config, pconn) unreg = pconn.unregister() - if unreg: - # only set if unreg was successful - write_unregistered_file() - delete_cache_files() + if unreg or config.force: + # only set if unreg was successful or --force was set + __cleanup_local_files() return unreg @@ -269,27 +290,29 @@ def get_branch_info(config): return config.branch_info -def collect(config, pconn): +def collect(config): """ All the heavy lifting done here """ branch_info = get_branch_info(config) pc = InsightsUploadConf(config) - output = None - collection_rules = pc.get_conf_file() rm_conf = pc.get_rm_conf() + blacklist_report = pc.create_report() if rm_conf: logger.warn("WARNING: Excluding data from files") - # defaults - mp = None archive = InsightsArchive(config) msg_name = determine_hostname(config.display_name) - dc = DataCollector(config, archive, mountpoint=mp) + if config.core_collect: + collection_rules = None + dc = CoreCollector(config, archive) + else: + collection_rules = pc.get_conf_file() + dc = DataCollector(config, archive) logger.info('Starting to collect Insights data for %s', msg_name) - dc.run_collection(collection_rules, rm_conf, branch_info) + dc.run_collection(collection_rules, rm_conf, branch_info, blacklist_report) output = dc.done(collection_rules, rm_conf) return output @@ -301,10 +324,13 @@ def get_connection(config): def _legacy_upload(config, pconn, tar_file, content_type, collection_duration=None): logger.info('Uploading Insights data.') api_response = None - parent_pid = read_pidfile() for tries in range(config.retries): - systemd_notify(parent_pid) - upload = pconn.upload_archive(tar_file, '', collection_duration) + logger.debug("Legacy upload attempt %d of %d ...", tries + 1, config.retries) + try: + upload = pconn.upload_archive(tar_file, '', collection_duration) + except Exception as e: + display_upload_error_and_retry(config, tries, str(e)) + continue if upload.status_code in (200, 201): api_response = json.loads(upload.text) @@ -315,7 +341,9 @@ def _legacy_upload(config, pconn, tar_file, content_type, collection_duration=No handler.write(upload.text) else: handler.write(upload.text.encode('utf-8')) + os.chmod(constants.last_upload_results_file, 0o644) write_to_disk(constants.lastupload_file) + os.chmod(constants.lastupload_file, 0o644) msg_name = determine_hostname(config.display_name) account_number = config.account_number @@ -324,22 +352,16 @@ def _legacy_upload(config, pconn, tar_file, content_type, collection_duration=No msg_name, account_number) else: logger.info("Successfully uploaded report for %s.", msg_name) + if config.register: + # direct to console after register + upload + logger.info('View the Red Hat Insights console at https://console.redhat.com/insights/') break elif upload.status_code in (412, 413): pconn.handle_fail_rcs(upload) raise RuntimeError('Upload failed.') else: - logger.error("Upload attempt %d of %d failed! Status Code: %s", - tries + 1, config.retries, upload.status_code) - if tries + 1 != config.retries: - logger.info("Waiting %d seconds then retrying", - constants.sleep_time) - time.sleep(constants.sleep_time) - else: - logger.error("All attempts to upload have failed!") - logger.error("Please see %s for additional information", config.logging_file) - raise RuntimeError('Upload failed.') + display_upload_error_and_retry(config, tries, "%s: %s" % (upload.status_code, upload.reason)) return api_response @@ -347,26 +369,41 @@ def upload(config, pconn, tar_file, content_type, collection_duration=None): if config.legacy_upload: return _legacy_upload(config, pconn, tar_file, content_type, collection_duration) logger.info('Uploading Insights data.') - parent_pid = read_pidfile() for tries in range(config.retries): - systemd_notify(parent_pid) - upload = pconn.upload_archive(tar_file, content_type, collection_duration) + logger.debug("Upload attempt %d of %d ...", tries + 1, config.retries) + try: + upload = pconn.upload_archive(tar_file, content_type, collection_duration) + except Exception as e: + display_upload_error_and_retry(config, tries, str(e)) + continue if upload.status_code in (200, 202): + write_to_disk(constants.lastupload_file) + os.chmod(constants.lastupload_file, 0o644) msg_name = determine_hostname(config.display_name) logger.info("Successfully uploaded report for %s.", msg_name) + if config.register: + # direct to console after register + upload + logger.info('View the Red Hat Insights console at https://console.redhat.com/insights/') return elif upload.status_code in (413, 415): pconn.handle_fail_rcs(upload) raise RuntimeError('Upload failed.') else: - logger.error("Upload attempt %d of %d failed! Status code: %s", - tries + 1, config.retries, upload.status_code) - if tries + 1 != config.retries: - logger.info("Waiting %d seconds then retrying", - constants.sleep_time) - time.sleep(constants.sleep_time) - else: - logger.error("All attempts to upload have failed!") - logger.error("Please see %s for additional information", config.logging_file) - raise RuntimeError('Upload failed.') + err_msg = "%s" % upload.status_code + if hasattr(upload, 'reason'): + err_msg += ": %s" % upload.reason + display_upload_error_and_retry(config, tries, err_msg) + + +def display_upload_error_and_retry(config, tries, error_message): + logger.error("Upload attempt %d of %d failed! Reason: %s", + tries + 1, config.retries, error_message) + if tries + 1 < config.retries: + logger.info("Waiting %d seconds then retrying", + constants.sleep_time) + time.sleep(constants.sleep_time) + else: + logger.error("All attempts to upload have failed!") + print("Please see %s for additional information" % config.logging_file) + raise RuntimeError('Upload failed.') diff --git a/insights/client/collection_rules.py b/insights/client/collection_rules.py index 5d09c0feaf..3e969e8859 100644 --- a/insights/client/collection_rules.py +++ b/insights/client/collection_rules.py @@ -16,12 +16,83 @@ from subprocess import Popen, PIPE, STDOUT from tempfile import NamedTemporaryFile from .constants import InsightsConstants as constants +from .map_components import map_rm_conf_to_components APP_NAME = constants.app_name logger = logging.getLogger(__name__) -net_logger = logging.getLogger('network') +NETWORK = constants.custom_network_log_level -expected_keys = ('commands', 'files', 'patterns', 'keywords') + +def correct_format(parsed_data, expected_keys, filename): + ''' + Ensure the parsed file matches the needed format + Returns True, on error + Returns False, None on success + ''' + # validate keys are what we expect + def is_list_of_strings(data): + ''' + Helper function for correct_format() + ''' + if data is None: + # nonetype, no data to parse. treat as empty list + return True + if not isinstance(data, list): + return False + for l in data: + if not isinstance(l, six.string_types): + return False + return True + + keys = parsed_data.keys() + invalid_keys = set(keys).difference(expected_keys) + if invalid_keys: + return True, ('Unknown section(s) in %s: ' % filename + ', '.join(invalid_keys) + + '\nValid sections are ' + ', '.join(expected_keys) + '.') + + # validate format (lists of strings) + for k in expected_keys: + if k in parsed_data: + if k == 'patterns' and isinstance(parsed_data['patterns'], dict): + if 'regex' not in parsed_data['patterns']: + return True, 'Patterns section contains an object but the "regex" key was not specified.' + if 'regex' in parsed_data['patterns'] and len(parsed_data['patterns']) > 1: + return True, 'Unknown keys in the patterns section. Only "regex" is valid.' + if not is_list_of_strings(parsed_data['patterns']['regex']): + return True, 'regex section under patterns must be a list of strings.' + continue + if not is_list_of_strings(parsed_data[k]): + return True, '%s section must be a list of strings.' % k + return False, None + + +def load_yaml(filename): + try: + with open(filename) as f: + loaded_yaml = yaml.safe_load(f) + if loaded_yaml is None: + logger.debug('%s is empty.', filename) + return {} + except (yaml.YAMLError, yaml.parser.ParserError) as e: + # can't parse yaml from conf + raise RuntimeError('ERROR: Cannot parse %s.\n' + 'If using any YAML tokens such as [] in an expression, ' + 'be sure to wrap the expression in quotation marks.\n\nError details:\n%s\n' % (filename, e)) + if not isinstance(loaded_yaml, dict): + # loaded data should be a dict with at least one key + raise RuntimeError('ERROR: Invalid YAML loaded.') + return loaded_yaml + + +def verify_permissions(f): + ''' + Verify 600 permissions on a file + ''' + mode = stat.S_IMODE(os.stat(f).st_mode) + if not mode == 0o600: + raise RuntimeError("Invalid permissions on %s. " + "Expected 0600 got %s" % (f, oct(mode))) + logger.debug("Correct file permissions on %s", f) class InsightsUploadConf(object): @@ -36,16 +107,32 @@ def __init__(self, config, conn=None): self.config = config self.fallback_file = constants.collection_fallback_file self.remove_file = config.remove_file + self.redaction_file = config.redaction_file + self.content_redaction_file = config.content_redaction_file + self.tags_file = config.tags_file self.collection_rules_file = constants.collection_rules_file self.collection_rules_url = self.config.collection_rules_url self.gpg = self.config.gpg + + # initialize an attribute to store the content of uploader.json + # once it is loaded and verified + self.uploader_json = None + + # set rm_conf as a class attribute so we can observe it + # in create_report + self.rm_conf = None + + # attribute to set when using file-redaction.yaml instead of + # remove.conf, for reporting purposes. True by default + # since new format is favored. + self.using_new_format = True + if conn: if self.collection_rules_url is None: if config.legacy_upload: self.collection_rules_url = conn.base_url + '/v1/static/uploader.v2.json' else: self.collection_rules_url = conn.base_url.split('/platform')[0] + '/v1/static/uploader.v2.json' - # self.collection_rules_url = conn.base_url + '/static/uploader.v2.json' self.conn = conn def validate_gpg_sig(self, path, sig=None): @@ -103,9 +190,8 @@ def get_collection_rules(self, raw=False): logger.debug("Attemping to download collection rules from %s", self.collection_rules_url) - net_logger.info("GET %s", self.collection_rules_url) try: - req = self.conn.session.get( + req = self.conn.get( self.collection_rules_url, headers=({'accept': 'text/plain'})) if req.status_code == 200: @@ -140,9 +226,8 @@ def fetch_gpg(self): self.collection_rules_url + ".asc") headers = ({'accept': 'text/plain'}) - net_logger.info("GET %s", self.collection_rules_url + '.asc') - config_sig = self.conn.session.get(self.collection_rules_url + '.asc', - headers=headers) + config_sig = self.conn.get(self.collection_rules_url + '.asc', + headers=headers) if config_sig.status_code == 200: logger.debug("Successfully downloaded GPG signature") return config_sig.text @@ -175,6 +260,9 @@ def get_conf_file(self): """ Get config from local config file, first try cache, then fallback. """ + if self.uploader_json: + return self.uploader_json + for conf_file in [self.collection_rules_file, self.fallback_file]: logger.debug("trying to read conf from: " + conf_file) conf = self.try_disk(conf_file, self.gpg) @@ -189,13 +277,14 @@ def get_conf_file(self): conf['file'] = conf_file logger.debug("Success reading config") logger.debug(json.dumps(conf)) + self.uploader_json = conf return conf - raise ValueError("ERROR: Unable to download conf or read it from disk!") + raise RuntimeError("ERROR: Unable to download conf or read it from disk!") def get_conf_update(self): """ - Get updated config from URL, fallback to local file if download fails. + Get updated config from URL. """ dyn_conf = self.get_collection_rules() @@ -217,128 +306,194 @@ def get_rm_conf_old(self): Get excluded files config from remove_file. """ # Convert config object into dict - logger.debug('Trying to parse as INI file.') + self.using_new_format = False parsedconfig = ConfigParser.RawConfigParser() - + if not self.remove_file: + # no filename defined, return nothing + logger.debug('remove_file is undefined') + return None + if not os.path.isfile(self.remove_file): + logger.debug('%s not found. No data files, commands,' + ' or patterns will be ignored, and no keyword obfuscation will occur.', self.remove_file) + return None + try: + verify_permissions(self.remove_file) + except RuntimeError as e: + if self.config.validate: + # exit if permissions invalid and using --validate + raise RuntimeError('ERROR: %s' % e) + logger.warning('WARNING: %s', e) try: parsedconfig.read(self.remove_file) + sections = parsedconfig.sections() + + if not sections: + # file has no sections, skip it + logger.debug('Remove.conf exists but no parameters have been defined.') + return None + + if sections != ['remove']: + raise RuntimeError('ERROR: invalid section(s) in remove.conf. Only "remove" is valid.') + + expected_keys = ('commands', 'files', 'patterns', 'keywords') rm_conf = {} for item, value in parsedconfig.items('remove'): if item not in expected_keys: - raise RuntimeError('Unknown section in remove.conf: ' + item + - '\nValid sections are ' + ', '.join(expected_keys) + '.') + raise RuntimeError('ERROR: Unknown key in remove.conf: ' + item + + '\nValid keys are ' + ', '.join(expected_keys) + '.') if six.PY3: - rm_conf[item] = value.strip().encode('utf-8').decode('unicode-escape').split(',') + rm_conf[item] = [v.strip() for v in value.strip().encode('utf-8').decode('unicode-escape').split(',')] else: - rm_conf[item] = value.strip().decode('string-escape').split(',') - return rm_conf + rm_conf[item] = [v.strip() for v in value.strip().decode('string-escape').split(',')] + self.rm_conf = rm_conf except ConfigParser.Error as e: # can't parse config file at all logger.debug(e) - raise RuntimeError('ERROR: Cannot parse the remove.conf file as a YAML file ' - 'nor as an INI file. Please check the file formatting.\n' + logger.debug('To configure using YAML, please use file-redaction.yaml and file-content-redaction.yaml.') + raise RuntimeError('ERROR: Cannot parse the remove.conf file.\n' 'See %s for more information.' % self.config.logging_file) + logger.warning('WARNING: remove.conf is deprecated. Please use file-redaction.yaml and file-content-redaction.yaml. See https://access.redhat.com/articles/4511681 for details.') + return self.rm_conf - def get_rm_conf(self): + def load_redaction_file(self, fname): ''' - Load remove conf. If it's a YAML-formatted file, try to load - the "new" version of remove.conf + Load the YAML-style file-redaction.yaml + or file-content-redaction.yaml files ''' - def is_list_of_strings(data): - ''' - Helper function for correct_format() - ''' - if data is None: - # nonetype, no data to parse. treat as empty list - return True - if not isinstance(data, list): - return False - for l in data: - if not isinstance(l, six.string_types): - return False - return True - - def correct_format(parsed_data): - ''' - Ensure the parsed file matches the needed format - Returns True, on error - ''' - # validate keys are what we expect - keys = parsed_data.keys() - invalid_keys = set(keys).difference(expected_keys) - if invalid_keys: - return True, ('Unknown section(s) in remove.conf: ' + ', '.join(invalid_keys) + - '\nValid sections are ' + ', '.join(expected_keys) + '.') - - # validate format (lists of strings) - for k in expected_keys: - if k in parsed_data: - if k == 'patterns' and isinstance(parsed_data['patterns'], dict): - if 'regex' not in parsed_data['patterns']: - return True, 'Patterns section contains an object but the "regex" key was not specified.' - if 'regex' in parsed_data['patterns'] and len(parsed_data['patterns']) > 1: - return True, 'Unknown keys in the patterns section. Only "regex" is valid.' - if not is_list_of_strings(parsed_data['patterns']['regex']): - return True, 'regex section under patterns must be a list of strings.' - continue - if not is_list_of_strings(parsed_data[k]): - return True, '%s section must be a list of strings.' % k - return False, None - - if not os.path.isfile(self.remove_file): - logger.debug('No remove.conf defined. No files/commands will be ignored.') + if fname not in (self.redaction_file, self.content_redaction_file): + # invalid function use, should never get here in a production situation + return None + if not fname: + # no filename defined, return nothing + logger.debug('redaction_file or content_redaction_file is undefined') + return None + if not fname or not os.path.isfile(fname): + if fname == self.redaction_file: + logger.debug('%s not found. No files or commands will be skipped.', self.redaction_file) + elif fname == self.content_redaction_file: + logger.debug('%s not found. ' + 'No patterns will be skipped and no keyword obfuscation will occur.', self.content_redaction_file) return None try: - with open(self.remove_file) as f: - rm_conf = yaml.safe_load(f) - if rm_conf is None: - logger.warn('WARNING: Remove file %s is empty.', self.remove_file) - return {} - except (yaml.YAMLError, yaml.parser.ParserError) as e: - # can't parse yaml from conf, try old style - logger.debug('ERROR: Cannot parse remove.conf as a YAML file.\n' - 'If using any YAML tokens such as [] in an expression, ' - 'be sure to wrap the expression in quotation marks.\n\nError details:\n%s\n', e) - return self.get_rm_conf_old() - if not isinstance(rm_conf, dict): - # loaded data should be a dict with at least one key (commands, files, patterns, keywords) - logger.debug('ERROR: Invalid YAML loaded.') - return self.get_rm_conf_old() - err, msg = correct_format(rm_conf) + verify_permissions(fname) + except RuntimeError as e: + if self.config.validate: + # exit if permissions invalid and using --validate + raise RuntimeError('ERROR: %s' % e) + logger.warning('WARNING: %s', e) + loaded = load_yaml(fname) + if fname == self.redaction_file: + err, msg = correct_format(loaded, ('commands', 'files', 'components'), fname) + elif fname == self.content_redaction_file: + err, msg = correct_format(loaded, ('patterns', 'keywords'), fname) if err: # YAML is correct but doesn't match the format we need raise RuntimeError('ERROR: ' + msg) + return loaded + + def get_rm_conf(self): + ''' + Try to load the the "new" version of + remove.conf (file-redaction.yaml and file-redaction.yaml) + ''' + rm_conf = {} + redact_conf = self.load_redaction_file(self.redaction_file) + content_redact_conf = self.load_redaction_file(self.content_redaction_file) + + if redact_conf: + rm_conf.update(redact_conf) + if content_redact_conf: + rm_conf.update(content_redact_conf) + + if not redact_conf and not content_redact_conf: + # no file-redaction.yaml or file-content-redaction.yaml defined, + # try to use remove.conf + self.rm_conf = self.get_rm_conf_old() + if self.config.core_collect: + self.rm_conf = map_rm_conf_to_components(self.rm_conf, self.get_conf_file()) + return self.rm_conf + # remove Nones, empty strings, and empty lists filtered_rm_conf = dict((k, v) for k, v in rm_conf.items() if v) - return filtered_rm_conf + self.rm_conf = filtered_rm_conf + if self.config.core_collect: + self.rm_conf = map_rm_conf_to_components(self.rm_conf, self.get_conf_file()) + return self.rm_conf - def validate(self): + def get_tags_conf(self): ''' - Validate remove.conf + Try to load the tags.conf file ''' - if not os.path.isfile(self.remove_file): - logger.warn("WARNING: Remove file does not exist") - return False - # Make sure permissions are 600 - mode = stat.S_IMODE(os.stat(self.remove_file).st_mode) - if not mode == 0o600: - logger.error("WARNING: Invalid remove file permissions. " - "Expected 0600 got %s" % oct(mode)) - return False + if not os.path.isfile(self.tags_file): + logger.info("%s does not exist", self.tags_file) + return None else: - logger.debug("Correct file permissions") + try: + load_yaml(self.tags_file) + logger.info("%s loaded successfully", self.tags_file) + except RuntimeError: + logger.warning("Invalid YAML. Unable to load %s", self.tags_file) + return None + + def validate(self): + ''' + Validate remove.conf and tags.conf + ''' + self.get_tags_conf() success = self.get_rm_conf() - if success is None or success is False: - logger.error('Could not parse remove.conf') - return False + if not success: + logger.info('No contents in the blacklist configuration to validate.') + return None # Using print here as this could contain sensitive information - if self.config.verbose or self.config.validate: - print('Remove file parsed contents:') - print(success) - logger.info('Parsed successfully.') + print('Blacklist configuration parsed contents:') + print(json.dumps(success, indent=4)) + logger.info('Parsed successfully.') return True - -if __name__ == '__main__': - from .config import InsightsConfig - print(InsightsUploadConf(InsightsConfig().load_all())) + def create_report(self): + def length(lst): + ''' + Because of how the INI remove.conf is parsed, + an empty value in the conf will produce + the value [''] when parsed. Do not include + these in the report + ''' + if len(lst) == 1 and lst[0] == '': + return 0 + return len(lst) + + num_commands = 0 + num_files = 0 + num_components = 0 + num_patterns = 0 + num_keywords = 0 + using_regex = False + + if self.rm_conf: + for key in self.rm_conf: + if key == 'commands': + num_commands = length(self.rm_conf['commands']) + if key == 'files': + num_files = length(self.rm_conf['files']) + if key == 'components': + num_components = length(self.rm_conf['components']) + if key == 'patterns': + if isinstance(self.rm_conf['patterns'], dict): + num_patterns = length(self.rm_conf['patterns']['regex']) + using_regex = True + else: + num_patterns = length(self.rm_conf['patterns']) + if key == 'keywords': + num_keywords = length(self.rm_conf['keywords']) + + return { + 'obfuscate': self.config.obfuscate, + 'obfuscate_hostname': self.config.obfuscate_hostname, + 'commands': num_commands, + 'files': num_files, + 'components': num_components, + 'patterns': num_patterns, + 'keywords': num_keywords, + 'using_new_format': self.using_new_format, + 'using_patterns_regex': using_regex + } diff --git a/insights/client/config.py b/insights/client/config.py index e0a96f395c..949a148a94 100644 --- a/insights/client/config.py +++ b/insights/client/config.py @@ -6,6 +6,9 @@ import six import sys from six.moves import configparser as ConfigParser +from distutils.version import LooseVersion +from .utilities import get_version_info +from insights.client.apps.manifests import manifests, content_types try: from .constants import InsightsConstants as constants @@ -14,6 +17,24 @@ logger = logging.getLogger(__name__) + +def _core_collect_default(): + ''' + Core collection should be disabled by default, unless + the RPM version 3.1 or above + ''' + rpm_version = get_version_info()['client_version'] + if not rpm_version: + # problem getting the version, default to False + return False + if LooseVersion(rpm_version) < LooseVersion(constants.core_collect_rpm_version): + # rpm version is older than the core collection release + return False + else: + # rpm version is equal to or newer than the core collection release + return True + + DEFAULT_OPTS = { 'analyze_container': { 'default': False, @@ -42,6 +63,12 @@ 'const': True, 'nargs': '?', }, + 'ansible_host': { + 'default': None, + 'opt': ['--ansible-host'], + 'help': 'Set an Ansible hostname for this system. ', + 'action': 'store' + }, 'authmethod': { # non-CLI 'default': 'BASIC' @@ -73,8 +100,16 @@ 'check_results': { 'default': False, 'opt': ['--check-results'], - 'help': "Check for insights results", - 'action': "store_true" + 'help': argparse.SUPPRESS, + 'action': "store_true", + 'group': 'actions' + }, + 'checkin': { + 'default': False, + 'opt': ['--checkin'], + 'help': 'Do a lightweight check-in instead of full upload', + 'action': "store_true", + 'group': 'actions' }, 'cmd_timeout': { # non-CLI @@ -84,11 +119,28 @@ # non-CLI 'default': None }, + 'app': { + 'default': None, + 'opt': ['--collector'], + 'help': 'Run the specified app and upload its results archive', + 'action': 'store', + 'group': 'actions', + 'dest': 'app' + }, + 'manifest': { + 'default': None, + 'opt': ['--manifest'], + 'help': 'Collect using the provided manifest', + 'action': 'store', + 'group': 'actions', + 'dest': 'manifest' + }, 'compliance': { 'default': False, 'opt': ['--compliance'], 'help': 'Scan the system using openscap and upload the report', - 'action': 'store_true' + 'action': 'store_true', + 'group': 'actions' }, 'compressor': { 'default': 'gz', @@ -102,6 +154,9 @@ 'help': 'Pass a custom config file', 'action': 'store' }, + 'core_collect': { + 'default': False + }, 'egg_path': { # non-CLI 'default': None @@ -117,7 +172,8 @@ 'default': False, 'opt': ['--disable-schedule'], 'help': 'Disable automatic scheduling', - 'action': 'store_true' + 'action': 'store_true', + 'group': 'actions' }, 'display_name': { 'default': None, @@ -130,6 +186,7 @@ 'opt': ['--enable-schedule'], 'help': 'Enable automatic scheduling for collection to run', 'action': 'store_true', + 'group': 'actions' }, 'gpg': { 'default': True, @@ -143,27 +200,36 @@ # non-CLI 'default': None }, + 'force': { + 'default': False, + 'opt': ['--force'], + 'help': argparse.SUPPRESS, + 'action': 'store_true' + }, 'group': { 'default': None, 'opt': ['--group'], - 'help': 'Group to add this system to during registration', + 'help': 'Group to add to this system', 'action': 'store', }, 'http_timeout': { # non-CLI 'default': 120.0 }, - 'insecure_connection': { - # non-CLI - 'default': False - }, 'keep_archive': { 'default': False, 'opt': ['--keep-archive'], - 'help': 'Do not delete archive after upload', + 'help': 'Store archive in /var/cache/insights-client/ after upload', 'action': 'store_true', 'group': 'debug' }, + 'list_specs': { + 'default': False, + 'opt': ['--list-specs'], + 'help': 'Show insights-client collection specs', + 'action': 'store_true', + 'group': 'actions' + }, 'logging_file': { 'default': constants.default_log_file, 'opt': ['--logging-file'], @@ -185,6 +251,10 @@ # non-CLI 'default': False, # legacy }, + 'no_proxy': { + # non-CLI + 'default': None + }, 'no_upload': { 'default': False, 'opt': ['--no-upload'], @@ -192,6 +262,12 @@ 'action': 'store_true', 'group': 'debug' }, + 'module': { + 'default': None, + 'opt': ['--module', '-m'], + 'help': argparse.SUPPRESS, + 'action': 'store' + }, 'obfuscate': { # non-CLI 'default': False @@ -236,16 +312,31 @@ 'default': False, 'opt': ['--register'], 'help': 'Register system to the Red Hat Insights Service', - 'action': 'store_true' + 'action': 'store_true', + 'group': 'actions', }, 'remove_file': { # non-CLI 'default': os.path.join(constants.default_conf_dir, 'remove.conf') }, + 'tags_file': { + # non-CLI + 'default': os.path.join(constants.default_conf_dir, 'tags.yaml') + }, + 'redaction_file': { + # non-CLI + 'default': os.path.join(constants.default_conf_dir, 'file-redaction.yaml') + }, + 'content_redaction_file': { + # non-CLI + 'default': os.path.join(constants.default_conf_dir, 'file-content-redaction.yaml') + }, 'reregister': { 'default': False, 'opt': ['--force-reregister'], - 'help': 'Forcefully reregister this machine to Red Hat. Use only as directed.', + 'help': ('This flag is deprecated and it will be removed in a future release.' + 'Forcefully reregister this machine to Red Hat.' + 'Please use `insights-client --unregister && insights-client --register `instead'), 'action': 'store_true', 'group': 'debug', 'dest': 'reregister' @@ -263,7 +354,8 @@ 'default': False, 'opt': ['--show-results'], 'help': "Show insights about this host", - 'action': "store_true" + 'action': "store_true", + 'group': 'actions' }, 'silent': { 'default': False, @@ -292,7 +384,8 @@ 'default': False, 'opt': ['--test-connection'], 'help': 'Test connectivity to Red Hat', - 'action': 'store_true' + 'action': 'store_true', + 'group': 'debug' }, 'to_json': { 'default': False, @@ -304,7 +397,8 @@ 'default': False, 'opt': ['--unregister'], 'help': 'Unregister system from the Red Hat Insights Service', - 'action': 'store_true' + 'action': 'store_true', + 'group': 'actions' }, 'upload_url': { # non-CLI @@ -331,15 +425,15 @@ 'validate': { 'default': False, 'opt': ['--validate'], - 'help': 'Validate remove.conf', - 'action': 'store_true' + 'help': 'Validate remove.conf and tags.yaml', + 'action': 'store_true', + 'group': 'actions' }, 'verbose': { 'default': False, 'opt': ['--verbose'], 'help': "DEBUG output to stdout", - 'action': "store_true", - 'group': 'debug' + 'action': "store_true" }, 'version': { 'default': False, @@ -347,9 +441,6 @@ 'help': "Display version", 'action': "store_true" }, - - # platform options - # hide help messages with SUPPRESS until we're ready to make them public 'legacy_upload': { # True: upload to insights classic API # False: upload to insights platform API @@ -358,45 +449,23 @@ 'payload': { 'default': None, 'opt': ['--payload'], - # 'help': 'Use Insights client to upload an archive', - 'help': argparse.SUPPRESS, + 'help': 'Use the Insights Client to upload an archive', 'action': 'store', - 'group': 'platform' + 'group': 'actions' }, 'content_type': { 'default': None, 'opt': ['--content-type'], - # 'help': 'Content type of the archive specified with --payload', - 'help': argparse.SUPPRESS, - 'action': 'store', - 'group': 'platform' + 'help': 'Content type of the archive specified with --payload', + 'action': 'store' }, 'diagnosis': { 'default': None, 'opt': ['--diagnosis'], - 'help': argparse.SUPPRESS, + 'help': 'Retrieve a diagnosis for this system', 'const': True, 'nargs': '?', - 'group': 'platform' - }, - # AWS options - 'portal_access': { - 'default': False, - 'opt': ['--portal-access'], - 'group': 'platform', - 'action': 'store_true', - 'help': 'Entitle an AWS instance with Red Hat and register with Red Hat Insights' - }, - 'portal_access_no_insights': { - 'default': False, - 'opt': ['--portal-access-no-insights'], - 'group': 'platform', - 'action': 'store_true', - 'help': 'Entitle an AWS instance with Red Hat, but do not register with Red Hat Insights' - }, - 'portal_access_hydra_url': { - # non-CLI - 'default': constants.default_portal_access_hydra_url + 'group': 'actions' } } @@ -409,6 +478,7 @@ class InsightsConfig(object): ''' Insights client configuration ''' + def __init__(self, *args, **kwargs): # this is only used to print configuration errors upon initial load self._print_errors = False @@ -417,12 +487,18 @@ def __init__(self, *args, **kwargs): self._init_attrs = copy.copy(dir(self)) self._update_dict(DEFAULT_KVS) + + # initialize the real default for core_collect here + # instead of inside DEFAULT_KVS because calling + # this function at the module scope ignores unit test mocks + self.core_collect = _core_collect_default() + if args: self._update_dict(args[0]) self._update_dict(kwargs) + self._cli_opts = None self._imply_options() self._validate_options() - self._cli_opts = None def __str__(self): _str = ' ' @@ -483,7 +559,7 @@ def _boolify(v): return v # put this warning here so the error msg only prints once - if os.environ.get('HTTP_PROXY') and self._print_errors: + if os.environ.get('HTTP_PROXY') and not os.environ.get('HTTPS_PROXY') and self._print_errors: sys.stdout.write('WARNING: HTTP_PROXY is unused by insights-client. Please use HTTPS_PROXY.\n') # ignore these env as they are not config vars @@ -514,23 +590,26 @@ def _load_command_line(self, conf_only=False): self._update_dict(self._cli_opts) return parser = argparse.ArgumentParser() - debug_grp = parser.add_argument_group('Debug options') - platf_grp = parser.add_argument_group('Platform options') + arg_groups = { + "actions": parser.add_argument_group("actions"), + "debug": parser.add_argument_group("optional debug arguments") + } cli_options = dict((k, v) for k, v in DEFAULT_OPTS.items() if ( 'opt' in v)) - for _, o in cli_options.items(): - group = o.pop('group', None) - if group == 'debug': - g = debug_grp - elif group == 'platform': - g = platf_grp + for _, _o in cli_options.items(): + # cli_options contains references to DEFAULT_OPTS, so + # make a copy so we don't mutate DEFAULT_OPTS + o = copy.copy(_o) + group_name = o.pop('group', None) + if group_name is None: + group = parser else: - g = parser + group = arg_groups[group_name] optnames = o.pop('opt') # use argparse.SUPPRESS as CLI defaults so it won't parse # options that weren't specified o['default'] = argparse.SUPPRESS - g.add_argument(*optnames, **o) + group.add_argument(*optnames, **o) options = parser.parse_args() @@ -614,6 +693,11 @@ def _validate_options(self): if self.analyze_container: raise ValueError( '--analyze-container is no longer supported.') + if self.reregister: + raise ValueError( + "`force-reregistration` has been deprecated. Please use `insights-client " + "--unregister && insights-client --register` instead", + ) if self.use_atomic: raise ValueError( '--use-atomic is no longer supported.') @@ -626,15 +710,9 @@ def _validate_options(self): if self.enable_schedule and self.disable_schedule: raise ValueError( 'Conflicting options: --enable-schedule and --disable-schedule') - if self.portal_access and self.portal_access_no_insights: - raise ValueError('Conflicting options: --portal-access and --portal-access-no-insights') if self.payload and not self.content_type: raise ValueError( '--payload requires --content-type') - if not self.legacy_upload: - if self.group: - raise ValueError( - '--group is not supported at this time.') if self.offline: if self.to_json: raise ValueError('Cannot use --to-json in offline mode.') @@ -642,6 +720,14 @@ def _validate_options(self): raise ValueError('Cannot check registration status in offline mode.') if self.test_connection: raise ValueError('Cannot run connection test in offline mode.') + if self.checkin: + raise ValueError('Cannot check-in in offline mode.') + if self.unregister: + raise ValueError('Cannot unregister in offline mode.') + if self.check_results: + raise ValueError('Cannot check results in offline mode') + if self.diagnosis: + raise ValueError('Cannot diagnosis in offline mode') if self.output_dir and self.output_file: raise ValueError('Specify only one: --output-dir or --output-file.') if self.output_dir == '': @@ -675,6 +761,11 @@ def _validate_options(self): if self.obfuscate: if self._print_errors: sys.stdout.write('WARNING: SOSCleaner reports will be created alongside the output archive.\n') + if self.module and not self.module.startswith('insights.client.apps.'): + raise ValueError('You can only run modules within the namespace insights.client.apps.*') + if self.app and not self.manifest: + raise ValueError("Unable to find app: %s\nList of available apps: %s" + % (self.app, ', '.join(sorted(manifests.keys())))) def _imply_options(self): ''' @@ -688,16 +779,16 @@ def _imply_options(self): self.analyze_image_id): self.analyze_container = True self.to_json = self.to_json or self.analyze_container - self.register = (self.register or self.reregister) and not self.offline + self.register = self.register and not self.offline self.keep_archive = self.keep_archive or self.no_upload if self.to_json and self.quiet: self.diagnosis = True - if self.payload or self.diagnosis or self.compliance or self.show_results or self.check_results: + if self.test_connection: + self.net_debug = True + if self.payload or self.diagnosis or self.compliance or self.check_results or self.checkin: self.legacy_upload = False if self.payload and (self.logging_file == constants.default_log_file): self.logging_file = constants.default_payload_log - if os.path.exists(constants.register_marker_file): - self.register = True if self.output_dir or self.output_file: # do not upload in this case self.no_upload = True @@ -709,6 +800,13 @@ def _imply_options(self): if self._print_errors: sys.stdout.write('The compressor {0} is not supported. Using default: gz\n'.format(self.compressor)) self.compressor = 'gz' + if self.app: + # Get the manifest for the specified app + self.manifest = manifests.get(self.app) + self.content_type = content_types.get(self.app) + self.core_collect = True + self.legacy_upload = False + self._set_app_config() if self.output_dir: # get full path self.output_dir = os.path.abspath(self.output_dir) @@ -716,6 +814,49 @@ def _imply_options(self): # get full path self.output_file = os.path.abspath(self.output_file) self._determine_filename_and_extension() + if self._cli_opts and "ansible_host" in self._cli_opts and not self.register: + # Specific use case, explained here: + # + # Ansible hostname is, more or less, a second display name. + # However, there is no method in the legacy API to handle + # changes to the ansible hostname. So, if a user specifies + # --ansible-hostname on the CLI to change it like they would + # --display-name, in order to actually change it, we need to + # force disable legacy_upload to make the proper HTTP requests. + # + # As of now, registration still needs to be tied to the legacy + # API, so if the user has legacy upload enabled (the default), + # we can't force disable it when registering. Thus, if + # specifying --ansible-hostname alongside --register, all the + # necessary legacy API calls will still be made, the + # ansible-hostname will be packed into the archive, and the + # rest will be handled by ingress. Incidentally, if legacy + # upload *is* disabled, the ansible hostname will also be + # included in the upload metadata. + # + # The reason to explicitly look for ansible_host in the CLI + # parameters *only* is because, due to a customer request from + # long ago, a display_name specified in the config file should + # be applied as part of the upload, and conversely, specifying + # it on the command line (WITHOUT --register) should be a + # "once and done" option that does a single HTTP call to modify + # it. We are going to mimic that behavior with the Ansible + # hostname. + # + # Therefore, only force legacy_upload to False when attempting + # to change Ansible hostname from the CLI, when not registering. + self.legacy_upload = False + + def _set_app_config(self): + ''' + Set App specific insights config values that differ from the default values + Config values may have been set manually however, so need to take that into consideration + ''' + if self.app == 'malware-detection': + # Add extra retries for malware, mainly because it could take a long time to run + # and the results archive shouldn't be discarded after a single failed upload attempt + if self.retries < 3: + self.retries = 3 def _determine_filename_and_extension(self): ''' diff --git a/insights/client/connection.py b/insights/client/connection.py index 3270767f6a..b96cc5c993 100644 --- a/insights/client/connection.py +++ b/insights/client/connection.py @@ -8,10 +8,10 @@ import six import json import logging -import pkg_resources import platform import xml.etree.ElementTree as ET import warnings +import errno # import io from tempfile import TemporaryFile # from datetime import datetime, timedelta @@ -26,20 +26,20 @@ from .utilities import (determine_hostname, generate_machine_id, write_unregistered_file, - write_registered_file) + write_registered_file, + os_release_info, + largest_spec_in_archive, + size_in_mb) from .cert_auth import rhsmCertificate from .constants import InsightsConstants as constants from .url_cache import URLCache from insights import package_info -from insights.core.context import Context -from insights.parsers.os_release import OsRelease -from insights.parsers.redhat_release import RedhatRelease from insights.util.canonical_facts import get_canonical_facts warnings.simplefilter('ignore') APP_NAME = constants.app_name +NETWORK = constants.custom_network_log_level logger = logging.getLogger(__name__) -net_logger = logging.getLogger("network") """ urllib3's logging is chatty @@ -49,6 +49,8 @@ URLLIB3_LOGGER = logging.getLogger('requests.packages.urllib3.connectionpool') URLLIB3_LOGGER.setLevel(logging.WARNING) +REQUEST_FAILED_EXCEPTIONS = (requests.ConnectionError, requests.Timeout) + # TODO: Document this, or turn it into a real option if os.environ.get('INSIGHTS_DEBUG_HTTP'): import httplib @@ -60,6 +62,16 @@ requests_log.propagate = True +def _host_not_found(): + raise Exception("Error: failed to find host with matching machine-id. Run insights-client --status to check registration status") + + +def _api_request_failed(exception, message='The Insights API could not be reached.'): + logger.error(exception) + if message: + logger.error(message) + + class InsightsConnection(object): """ @@ -88,11 +100,6 @@ def __init__(self, config): self.cert_verify = True protocol = "https://" - insecure_connection = self.config.insecure_connection - if insecure_connection: - # This really should not be used. - protocol = "http://" - self.cert_verify = False self.auto_config = self.config.auto_config @@ -106,7 +113,7 @@ def __init__(self, config): self.base_url = protocol + constants.base_url else: self.base_url = protocol + self.config.base_url - # end hack. in the future, make cloud.redhat.com the default + # end hack. in the future, make console.redhat.com the default self.upload_url = self.config.upload_url if self.upload_url is None: @@ -121,6 +128,8 @@ def __init__(self, config): # workaround for a workaround for a workaround base_url_base = self.base_url.split('/platform')[0] self.branch_info_url = base_url_base + '/v1/branch_info' + self.inventory_url = self.api_url + "/inventory/v1" + self.authmethod = self.config.authmethod self.systemid = self.config.systemid or None self.get_proxies() @@ -151,7 +160,7 @@ def _init_session(self): # HACKY try: # Need to make a request that will fail to get proxies set up - net_logger.info("GET %s", self.base_url) + logger.log(NETWORK, "GET %s", self.base_url) session.request( "GET", self.base_url, timeout=self.config.http_timeout) except requests.ConnectionError: @@ -168,11 +177,62 @@ def _init_session(self): connection.proxy_headers = auth_map return session + def _http_request(self, url, method, log_response_text=True, **kwargs): + ''' + Perform an HTTP request, net logging, and error handling + Parameters + url - URL to perform the request against + method - HTTP method, used for logging + kwargs - Rest of the args to pass to the request function + Returns + HTTP response object + ''' + logger.log(NETWORK, "%s %s", method, url) + try: + res = self.session.request(url=url, method=method, timeout=self.config.http_timeout, **kwargs) + except Exception: + raise + logger.log(NETWORK, "HTTP Status: %d %s", res.status_code, res.reason) + if log_response_text or res.status_code != 200: + logger.log(NETWORK, "HTTP Response Text: %s", res.text) + return res + + def get(self, url, **kwargs): + try: + return self._http_request(url, 'GET', **kwargs) + except Exception: + raise + + def post(self, url, **kwargs): + try: + return self._http_request(url, 'POST', **kwargs) + except Exception: + raise + + def put(self, url, **kwargs): + try: + return self._http_request(url, 'PUT', **kwargs) + except Exception: + raise + + def patch(self, url, **kwargs): + try: + return self._http_request(url, 'PATCH', **kwargs) + except Exception: + raise + + def delete(self, url, **kwargs): + try: + return self._http_request(url, 'DELETE', **kwargs) + except Exception: + raise + @property def user_agent(self): """ Generates and returns a string suitable for use as a request user-agent """ + import pkg_resources core_version = "insights-core" pkg = pkg_resources.working_set.find(pkg_resources.Requirement.parse(core_version)) if pkg is not None: @@ -180,10 +240,17 @@ def user_agent(self): else: core_version = "Core %s" % package_info["VERSION"] - client_version = "insights-client" - pkg = pkg_resources.working_set.find(pkg_resources.Requirement.parse(client_version)) - if pkg is not None: - client_version = "%s/%s" % (pkg.project_name, pkg.version) + try: + from insights_client import constants as insights_client_constants + client_version = "insights-client/{0}".format(insights_client_constants.InsightsConstants.version) + except ImportError: + client_version = "insights-client" + + if os.path.isfile(constants.ppidfile): + with open(constants.ppidfile, 'r') as f: + parent_process = f.read() + else: + parent_process = "unknown" requests_version = None pkg = pkg_resources.working_set.find(pkg_resources.Requirement.parse("requests")) @@ -192,32 +259,13 @@ def user_agent(self): python_version = "%s %s" % (platform.python_implementation(), platform.python_version()) - os_family = "Unknown" - os_release = "" - for p in ["/etc/os-release", "/etc/redhat-release"]: - try: - with open(p) as f: - data = f.readlines() - - ctx = Context(content=data, path=p, relative_path=p) - if p == "/etc/os-release": - rls = OsRelease(ctx) - os_family = rls.data.get("NAME") - os_release = rls.data.get("VERSION_ID") - elif p == "/etc/redhat-release": - rls = RedhatRelease(ctx) - os_family = rls.product - os_release = rls.version - break - except IOError: - continue - except Exception as e: - logger.warning("Failed to detect OS version: %s", e) + os_family, os_release = os_release_info() kernel_version = "%s %s" % (platform.system(), platform.release()) - ua = "{client_version} ({core_version}; {requests_version}) {os_family} {os_release} ({python_version}; {kernel_version})".format( + ua = "{client_version} ({core_version}; {requests_version}) {os_family} {os_release} ({python_version}; {kernel_version}); {parent_process}".format( client_version=client_version, core_version=core_version, + parent_process=parent_process, python_version=python_version, os_family=os_family, os_release=os_release, @@ -227,6 +275,48 @@ def user_agent(self): return ua + def get_proxy(self, proxy_info, no_proxy_info, environment): + proxies = None + proxy_auth = None + if '@' in proxy_info: + scheme = proxy_info.split(':')[0] + '://' + logger.debug("Proxy Scheme: %s", scheme) + location = proxy_info.split('@')[1] + logger.debug("Proxy Location: %s", location) + username = proxy_info.split( + '@')[0].split(':')[1].replace('/', '') + logger.debug("Proxy User: %s", username) + password = proxy_info.split('@')[0].split(':')[2] + proxy_auth = requests.auth._basic_auth_str(username, password) + proxy_info = scheme + location + logger.debug("%s Proxy: %s", environment, proxy_info) + proxies = {"https": proxy_info} + if no_proxy_info: + insights_service_host = urlparse(self.base_url).hostname + logger.debug('Found NO_PROXY set. Checking NO_PROXY %s against base URL %s.', no_proxy_info, insights_service_host) + # Split the no_proxy entries on ',', then strip any leading and trailing whitespace. Create a clean list for the + # for loop. + no_proxy_info = [host.strip() for host in no_proxy_info.split(',')] + for no_proxy_host in no_proxy_info: + logger.debug('Checking %s against %s', no_proxy_host, insights_service_host) + if no_proxy_host == '*': + proxies = None + proxy_auth = None + logger.debug('Found NO_PROXY asterisk(*) wildcard, disabling all proxies.') + break + elif no_proxy_host.startswith('.') or no_proxy_host.startswith('*'): + if insights_service_host.endswith(no_proxy_host.replace('*', '')): + proxies = None + proxy_auth = None + logger.debug('Found NO_PROXY range %s matching %s', no_proxy_host, insights_service_host) + break + elif no_proxy_host == insights_service_host: + proxies = None + proxy_auth = None + logger.debug('Found NO_PROXY %s exactly matching %s', no_proxy_host, insights_service_host) + break + return proxies, proxy_auth + def get_proxies(self): """ Determine proxy configuration @@ -234,72 +324,26 @@ def get_proxies(self): # Get proxy from ENV or Config proxies = None proxy_auth = None - no_proxy = os.environ.get('NO_PROXY') - logger.debug("NO PROXY: %s", no_proxy) # CONF PROXY TAKES PRECEDENCE OVER ENV PROXY conf_proxy = self.config.proxy - if ((conf_proxy is not None and - conf_proxy.lower() != 'None'.lower() and - conf_proxy != "")): - if '@' in conf_proxy: - scheme = conf_proxy.split(':')[0] + '://' - logger.debug("Proxy Scheme: %s", scheme) - location = conf_proxy.split('@')[1] - logger.debug("Proxy Location: %s", location) - username = conf_proxy.split( - '@')[0].split(':')[1].replace('/', '') - logger.debug("Proxy User: %s", username) - password = conf_proxy.split('@')[0].split(':')[2] - proxy_auth = requests.auth._basic_auth_str(username, password) - conf_proxy = scheme + location - logger.debug("CONF Proxy: %s", conf_proxy) - proxies = {"https": conf_proxy} + conf_no_proxy = self.config.no_proxy + + if conf_proxy: + proxies, proxy_auth = self.get_proxy(conf_proxy, conf_no_proxy, "CONF") # HANDLE NO PROXY CONF PROXY EXCEPTION VERBIAGE + no_proxy = os.environ.get('NO_PROXY') if no_proxy and conf_proxy: logger.debug("You have environment variable NO_PROXY set " "as well as 'proxy' set in your configuration file. " "NO_PROXY environment variable will be ignored.") - # IF NO CONF PROXY, GET ENV PROXY AND NO PROXY - if proxies is None: + # IF NO CONF PROXY and NO_PROX none in conf, GET ENV PROXY AND NO PROXY + if proxies is None and conf_no_proxy is None: env_proxy = os.environ.get('HTTPS_PROXY') if env_proxy: - if '@' in env_proxy: - scheme = env_proxy.split(':')[0] + '://' - logger.debug("Proxy Scheme: %s", scheme) - location = env_proxy.split('@')[1] - logger.debug("Proxy Location: %s", location) - username = env_proxy.split('@')[0].split(':')[1].replace('/', '') - logger.debug("Proxy User: %s", username) - password = env_proxy.split('@')[0].split(':')[2] - proxy_auth = requests.auth._basic_auth_str(username, password) - env_proxy = scheme + location - logger.debug("ENV Proxy: %s", env_proxy) - proxies = {"https": env_proxy} - if no_proxy: - insights_service_host = urlparse(self.base_url).hostname - logger.debug('Found NO_PROXY set. Checking NO_PROXY %s against base URL %s.', no_proxy, insights_service_host) - for no_proxy_host in no_proxy.split(','): - logger.debug('Checking %s against %s', no_proxy_host, insights_service_host) - if no_proxy_host == '*': - proxies = None - proxy_auth = None - logger.debug('Found NO_PROXY asterisk(*) wildcard, disabling all proxies.') - break - elif no_proxy_host.startswith('.') or no_proxy_host.startswith('*'): - if insights_service_host.endswith(no_proxy_host.replace('*', '')): - proxies = None - proxy_auth = None - logger.debug('Found NO_PROXY range %s matching %s', no_proxy_host, insights_service_host) - break - elif no_proxy_host == insights_service_host: - proxies = None - proxy_auth = None - logger.debug('Found NO_PROXY %s exactly matching %s', no_proxy_host, insights_service_host) - break - + proxies, proxy_auth = self.get_proxy(env_proxy, no_proxy, "ENV") self.proxies = proxies self.proxy_auth = proxy_auth @@ -315,15 +359,11 @@ def _legacy_test_urls(self, url, method): paths = (url.path + '/', '', '/r', '/r/insights') for ext in paths: try: - logger.debug("Testing: %s", test_url + ext) - if method is "POST": - test_req = self.session.post( - test_url + ext, timeout=self.config.http_timeout, data=test_flag) - elif method is "GET": - test_req = self.session.get(test_url + ext, timeout=self.config.http_timeout) - logger.info("HTTP Status Code: %d", test_req.status_code) - logger.info("HTTP Status Text: %s", test_req.reason) - logger.info("HTTP Response Text: %s", test_req.text) + logger.log(NETWORK, "Testing: %s", test_url + ext) + if method == "POST": + test_req = self.post(test_url + ext, data=test_flag) + elif method == "GET": + test_req = self.get(test_url + ext) # Strata returns 405 on a GET sometimes, this isn't a big deal if test_req.status_code in (200, 201): logger.info( @@ -347,19 +387,16 @@ def _test_urls(self, url, method): if self.config.legacy_upload: return self._legacy_test_urls(url, method) try: - logger.debug('Testing %s', url) - if method is 'POST': + logger.log(NETWORK, 'Testing %s', url) + if method == 'POST': test_tar = TemporaryFile(mode='rb', suffix='.tar.gz') test_files = { 'file': ('test.tar.gz', test_tar, 'application/vnd.redhat.advisor.collection+tgz'), 'metadata': '{\"test\": \"test\"}' } - test_req = self.session.post(url, timeout=self.config.http_timeout, files=test_files) - elif method is "GET": - test_req = self.session.get(url, timeout=self.config.http_timeout) - logger.info("HTTP Status Code: %d", test_req.status_code) - logger.info("HTTP Status Text: %s", test_req.reason) - logger.info("HTTP Response Text: %s", test_req.text) + test_req = self.post(url, files=test_files) + elif method == "GET": + test_req = self.get(url) if test_req.status_code in (200, 201, 202): logger.info( "Successfully connected to: %s", url) @@ -394,17 +431,16 @@ def test_connection(self, rc=0): "SUCCESS" if api_success else "FAILURE") if upload_success and api_success: logger.info("Connectivity tests completed successfully") - logger.info("See %s for more details.", self.config.logging_file) + print("See %s for more details." % self.config.logging_file) else: logger.info("Connectivity tests completed with some errors") - logger.info("See %s for more details.", self.config.logging_file) + print("See %s for more details." % self.config.logging_file) rc = 1 except requests.ConnectionError as exc: print(exc) logger.error('Connectivity test failed! ' 'Please check your network configuration') - logger.error('Additional information may be in' - ' /var/log/' + APP_NAME + "/" + APP_NAME + ".log") + print('Additional information may be in %s' % self.config.logging_file) return 1 return rc @@ -413,30 +449,22 @@ def handle_fail_rcs(self, req): Bail out if we get a 401 and leave a message """ - try: - logger.debug("HTTP Status Code: %s", req.status_code) - logger.debug("HTTP Response Text: %s", req.text) - logger.debug("HTTP Response Reason: %s", req.reason) - logger.debug("HTTP Response Content: %s", req.content) - except: - logger.error("Malformed HTTP Request.") - # attempt to read the HTTP response JSON message try: - logger.debug("HTTP Response Message: %s", req.json()["message"]) + logger.log(NETWORK, "HTTP Response Message: %s", req.json()["message"]) except: logger.debug("No HTTP Response message present.") # handle specific status codes if req.status_code >= 400: - logger.info("Debug Information:\nHTTP Status Code: %s", + logger.debug("Debug Information:\nHTTP Status Code: %s", req.status_code) - logger.info("HTTP Status Text: %s", req.reason) + logger.debug("HTTP Status Text: %s", req.reason) if req.status_code == 401: - logger.error("Authorization Required.") - logger.error("Please ensure correct credentials " - "in " + constants.default_conf_file) - logger.debug("HTTP Response Text: %s", req.text) + logger.error("Please ensure that the system is registered " + "with RHSM for CERT auth, or that correct " + "credentials are set in %s for BASIC auth.", self.config.conf) + logger.log(NETWORK, "HTTP Response Text: %s", req.text) if req.status_code == 402: # failed registration because of entitlement limit hit logger.debug('Registration failed by 402 error.') @@ -444,10 +472,10 @@ def handle_fail_rcs(self, req): logger.error(req.json()["message"]) except LookupError: logger.error("Got 402 but no message") - logger.debug("HTTP Response Text: %s", req.text) + logger.log(NETWORK, "HTTP Response Text: %s", req.text) except: logger.error("Got 402 but no message") - logger.debug("HTTP Response Text: %s", req.text) + logger.log(NETWORK, "HTTP Response Text: %s", req.text) if req.status_code == 403 and self.auto_config: # Insights disabled in satellite rhsm_hostname = urlparse(self.base_url).hostname @@ -462,10 +490,10 @@ def handle_fail_rcs(self, req): write_unregistered_file(unreg_date) except LookupError: unreg_date = "412, but no unreg_date or message" - logger.debug("HTTP Response Text: %s", req.text) + logger.log(NETWORK, "HTTP Response Text: %s", req.text) except: unreg_date = "412, but no unreg_date or message" - logger.debug("HTTP Response Text: %s", req.text) + logger.log(NETWORK, "HTTP Response Text: %s", req.text) if req.status_code == 413: logger.error('Archive is too large to upload.') if req.status_code == 415: @@ -514,10 +542,7 @@ def get_branch_info(self): logger.debug(u'Obtaining branch information from %s', self.branch_info_url) - net_logger.info(u'GET %s', self.branch_info_url) - response = self.session.get(self.branch_info_url, - timeout=self.config.http_timeout) - logger.debug(u'GET branch_info status: %s', response.status_code) + response = self.get(self.branch_info_url) if response.status_code != 200: logger.debug("There was an error obtaining branch information.") logger.debug(u'Bad status from server: %s', response.status_code) @@ -528,8 +553,8 @@ def get_branch_info(self): logger.debug(u'Branch information: %s', json.dumps(branch_info)) # Determine if we are connected to Satellite 5 - if ((branch_info[u'remote_branch'] is not -1 and - branch_info[u'remote_leaf'] is -1)): + if ((branch_info[u'remote_branch'] != -1 and + branch_info[u'remote_leaf'] == -1)): self.get_satellite5_info(branch_info) # logger.debug(u'Saving branch info to file.') @@ -565,10 +590,9 @@ def create_system(self, new_machine_id=False): post_system_url = self.api_url + '/v1/systems' logger.debug("POST System: %s", post_system_url) logger.debug(data) - net_logger.info("POST %s", post_system_url) - return self.session.post(post_system_url, - headers={'Content-Type': 'application/json'}, - data=data) + return self.post(post_system_url, + headers={'Content-Type': 'application/json'}, + data=data) # -LEGACY- def group_systems(self, group_name, systems): @@ -584,35 +608,24 @@ def group_systems(self, group_name, systems): group_path = self.api_url + '/v1/groups' group_get_path = group_path + ('?display_name=%s' % quote(group_name)) - logger.debug("GET group: %s", group_get_path) - net_logger.info("GET %s", group_get_path) - get_group = self.session.get(group_get_path) - logger.debug("GET group status: %s", get_group.status_code) + get_group = self.get(group_get_path) if get_group.status_code == 200: api_group_id = get_group.json()['id'] if get_group.status_code == 404: # Group does not exist, POST to create - logger.debug("POST group") data = json.dumps({'display_name': group_name}) - net_logger.info("POST", group_path) - post_group = self.session.post(group_path, - headers=headers, - data=data) - logger.debug("POST group status: %s", post_group.status_code) - logger.debug("POST Group: %s", post_group.json()) + post_group = self.post(group_path, + headers=headers, + data=data) self.handle_fail_rcs(post_group) api_group_id = post_group.json()['id'] - logger.debug("PUT group") data = json.dumps(systems) - net_logger.info("PUT %s", group_path + ('/%s/systems' % api_group_id)) - put_group = self.session.put(group_path + - ('/%s/systems' % api_group_id), - headers=headers, - data=data) - logger.debug("PUT group status: %d", put_group.status_code) - logger.debug("PUT Group: %s", put_group.json()) + self.put(group_path + + ('/%s/systems' % api_group_id), + headers=headers, + data=data) # -LEGACY- # Keeping this function around because it's not private and I don't know if anything else uses it @@ -628,13 +641,16 @@ def do_group(self): def _legacy_api_registration_check(self): ''' Check registration status through API + True system exists in inventory + False connection or parsing response error + None system is not yet registered + string system is unregistered ''' logger.debug('Checking registration status...') machine_id = generate_machine_id() try: url = self.api_url + '/v1/systems/' + machine_id - net_logger.info("GET %s", url) - res = self.session.get(url, timeout=self.config.http_timeout) + res = self.get(url) except requests.ConnectionError: # can't connect, run connection test logger.error('Connection timed out. Running connection test...') @@ -646,23 +662,31 @@ def _legacy_api_registration_check(self): # True for registered # False for unregistered # None for system 404 - try: - # check the 'unregistered_at' key of the response - unreg_status = json.loads(res.content).get('unregistered_at', 'undefined') - # set the global account number - self.config.account_number = json.loads(res.content).get('account_number', 'undefined') - except ValueError: - # bad response, no json object + if res.status_code != 200: + self.handle_fail_rcs(res) + if res.status_code not in (200, 404): + # Network error returns False return False - if unreg_status == 'undefined': - # key not found, machine not yet registered - return None - elif unreg_status is None: - # unregistered_at = null, means this machine IS registered - return True else: - # machine has been unregistered, this is a timestamp - return unreg_status + try: + # check the 'unregistered_at' key of the response + unreg_status = json.loads(res.content).get('unregistered_at', 'undefined') + # set the global account number + self.config.account_number = json.loads(res.content).get('account_number', 'undefined') + except ValueError: + # bad response, no json object + return False + if unreg_status == 'undefined': + # key not found, machine not yet registered + return None + elif unreg_status is None: + # unregistered_at = null, means this machine IS registered + return True + else: + # machine has been unregistered, this is a timestamp + # This is done for legacy servers that responded with the timestamp of disconnection + # TODO: consider to remove this condition + return unreg_status def _fetch_system_by_machine_id(self): ''' @@ -678,12 +702,10 @@ def _fetch_system_by_machine_id(self): if self.config.legacy_upload: url = self.base_url + '/platform/inventory/v1/hosts?insights_id=' + machine_id else: - url = self.base_url + '/inventory/v1/hosts?insights_id=' + machine_id - net_logger.info("GET %s", url) - res = self.session.get(url, timeout=self.config.http_timeout) - except (requests.ConnectionError, requests.Timeout) as e: - logger.error(e) - logger.error('The Insights API could not be reached.') + url = self.inventory_url + '/hosts?insights_id=' + machine_id + res = self.get(url) + except REQUEST_FAILED_EXCEPTIONS as e: + _api_request_failed(e) return None try: if (self.handle_fail_rcs(res)): @@ -730,8 +752,7 @@ def _legacy_unregister(self): try: logger.debug("Unregistering %s", machine_id) url = self.api_url + "/v1/systems/" + machine_id - net_logger.info("DELETE %s", url) - self.session.delete(url) + self.delete(url) logger.info( "Successfully unregistered from the Red Hat Insights Service") return True @@ -748,11 +769,13 @@ def unregister(self): return self._legacy_unregister() results = self._fetch_system_by_machine_id() + if not results: + logger.info('This host could not be found.') + return False try: logger.debug("Unregistering host...") - url = self.api_url + "/inventory/v1/hosts/" + results[0]['id'] - net_logger.info("DELETE %s", url) - response = self.session.delete(url) + url = self.inventory_url + "/hosts/" + results[0]['id'] + response = self.delete(url) response.raise_for_status() logger.info( "Successfully unregistered from the Red Hat Insights Service") @@ -807,6 +830,32 @@ def register(self): else: return (message, client_hostname, "None", "") + def _archive_too_big(self, archive_file): + ''' + Some helpful messaging for when the archive is too large for ingress + ''' + archive_filesize = size_in_mb( + os.stat(archive_file).st_size) + logger.info("Archive is {fsize} MB which is larger than the maximum allowed size of {flimit} MB.".format( + fsize=archive_filesize, flimit=constants.archive_filesize_max)) + + if not self.config.core_collect: + logger.error("Cannot estimate the spec with largest filesize because core collection is not enabled. " + "Enable core collection by setting core_collect=True in %s, and attempt the upload again.", self.config.conf) + return + + biggest_file = largest_spec_in_archive(archive_file) + logger.info("The largest file in the archive is %s at %s MB.", biggest_file[0], size_in_mb(biggest_file[1])) + logger.info("Please add the following spec to /etc/insights-client/file-redaction.yaml." + "According to the documentation https://access.redhat.com/articles/4511681\n\n" + "**** /etc/insights-client/file-redaction.yaml ****\n" + "# file-redaction.yaml\n" + "# Omit entire output of files\n" + "# Files can be specified either by full filename or\n" + "# by the 'symbolic_name' listed in .cache.json\n" + "files:\n" + "- %s \n**** ****", biggest_file[2]) + # -LEGACY- def _legacy_upload_archive(self, data_collected, duration): ''' @@ -832,15 +881,18 @@ def _legacy_upload_archive(self, data_collected, duration): logger.debug("Uploading %s to %s", data_collected, upload_url) headers = {'x-rh-collection-time': str(duration)} - net_logger.info("POST %s", upload_url) - upload = self.session.post(upload_url, files=files, headers=headers) + try: + upload = self.post(upload_url, files=files, headers=headers) + except Exception: + raise - logger.debug("Upload status: %s %s %s", - upload.status_code, upload.reason, upload.text) if upload.status_code in (200, 201): the_json = json.loads(upload.text) else: logger.error("Upload archive failed with status code %s", upload.status_code) + if upload.status_code == 413: + # let the user know what file is bloating the archive + self._archive_too_big(data_collected) return upload try: self.config.account_number = the_json["upload"]["account_number"] @@ -849,7 +901,7 @@ def _legacy_upload_archive(self, data_collected, duration): logger.debug("Upload duration: %s", upload.elapsed) return upload - def upload_archive(self, data_collected, content_type, duration): + def upload_archive(self, data_collected, content_type, duration=None): """ Do an HTTPS Upload of the archive """ @@ -866,6 +918,9 @@ def upload_archive(self, data_collected, content_type, duration): if self.config.display_name: # add display_name to canonical facts c_facts['display_name'] = self.config.display_name + if self.config.ansible_host: + # add ansible_host to canonical facts + c_facts['ansible_host'] = self.config.ansible_host if self.config.branch_info: c_facts["branch_info"] = self.config.branch_info c_facts["satellite_id"] = self.config.branch_info["remote_leaf"] @@ -876,23 +931,33 @@ def upload_archive(self, data_collected, content_type, duration): 'file': (file_name, open(data_collected, 'rb'), content_type), 'metadata': c_facts } + logger.debug('content-type: %s', content_type) logger.debug("Uploading %s to %s", data_collected, upload_url) + try: + upload = self.post(upload_url, files=files, headers={}) + except Exception: + raise - net_logger.info("POST %s", upload_url) - upload = self.session.post(upload_url, files=files, headers={}) - - logger.debug("Upload status: %s %s %s", - upload.status_code, upload.reason, upload.text) logger.debug('Request ID: %s', upload.headers.get('x-rh-insights-request-id', None)) if upload.status_code in (200, 202): # 202 from platform, no json response logger.debug(upload.text) # upload = registration on platform - write_registered_file() + try: + write_registered_file() + except OSError as e: + if e.errno == errno.EACCES and os.getuid() != 0: + # if permissions error as non-root, ignore + pass + else: + logger.error('Could not update local registration record: %s', str(e)) else: logger.debug( "Upload archive failed with status code %s", upload.status_code) + if upload.status_code == 413: + # let the user know what file is bloating the archive + self._archive_too_big(data_collected) return upload logger.debug("Upload duration: %s", upload.elapsed) return upload @@ -903,19 +968,16 @@ def _legacy_set_display_name(self, display_name): try: url = self.api_url + '/v1/systems/' + machine_id - net_logger.info("GET %s", url) - res = self.session.get(url, timeout=self.config.http_timeout) + res = self.get(url) old_display_name = json.loads(res.content).get('display_name', None) if display_name == old_display_name: logger.debug('Display name unchanged: %s', old_display_name) return True - net_logger.info("PUT %s", url) - res = self.session.put(url, - timeout=self.config.http_timeout, - headers={'Content-Type': 'application/json'}, - data=json.dumps( - {'display_name': display_name})) + res = self.put(url, + headers={'Content-Type': 'application/json'}, + data=json.dumps( + {'display_name': display_name})) if res.status_code == 200: logger.info('System display name changed from %s to %s', old_display_name, @@ -929,8 +991,8 @@ def _legacy_set_display_name(self, display_name): logger.error('Unable to set display name: %s %s', res.status_code, res.text) return False - except (requests.ConnectionError, requests.Timeout, ValueError) as e: - logger.error(e) + except REQUEST_FAILED_EXCEPTIONS + (ValueError,) as e: + _api_request_failed(e, None) # can't connect, run connection test return False @@ -946,13 +1008,11 @@ def set_display_name(self, display_name): return system inventory_id = system[0]['id'] - req_url = self.base_url + '/inventory/v1/hosts/' + inventory_id + req_url = self.inventory_url + '/hosts/' + inventory_id try: - net_logger.info("PATCH %s", req_url) - res = self.session.patch(req_url, json={'display_name': display_name}) - except (requests.ConnectionError, requests.Timeout) as e: - logger.error(e) - logger.error('The Insights API could not be reached.') + res = self.patch(req_url, json={'display_name': display_name}) + except REQUEST_FAILED_EXCEPTIONS as e: + _api_request_failed(e) return False if (self.handle_fail_rcs(res)): logger.error('Could not update display name.') @@ -960,6 +1020,27 @@ def set_display_name(self, display_name): logger.info('Display name updated to ' + display_name + '.') return True + def set_ansible_host(self, ansible_host): + ''' + Set Ansible hostname of a system independently of upload. + ''' + system = self._fetch_system_by_machine_id() + if not system: + return system + inventory_id = system[0]['id'] + + req_url = self.inventory_url + '/hosts/' + inventory_id + try: + res = self.patch(req_url, json={'ansible_host': ansible_host}) + except REQUEST_FAILED_EXCEPTIONS as e: + _api_request_failed(e) + return False + if (self.handle_fail_rcs(res)): + logger.error('Could not update Ansible hostname.') + return False + logger.info('Ansible hostname updated to ' + ansible_host + '.') + return True + def get_diagnosis(self, remediation_id=None): ''' Reach out to the platform and fetch a diagnosis. @@ -972,11 +1053,9 @@ def get_diagnosis(self, remediation_id=None): # validate this? params['remediation'] = remediation_id try: - net_logger.info("GET %s", diag_url) - res = self.session.get(diag_url, params=params, timeout=self.config.http_timeout) + res = self.get(diag_url, params=params) except (requests.ConnectionError, requests.Timeout) as e: - logger.error(e) - logger.error('The Insights API could not be reached.') + _api_request_failed(e) return False if (self.handle_fail_rcs(res)): logger.error('Unable to get diagnosis data: %s %s', @@ -984,7 +1063,7 @@ def get_diagnosis(self, remediation_id=None): return None return res.json() - def _get(self, url): + def _cached_get(self, url): ''' Submits a GET request to @url, caching the result, and returning the response body, if any. It makes the response status code opaque @@ -999,8 +1078,7 @@ def _get(self, url): if item is not None: headers["If-None-Match"] = item.etag - net_logger.info("GET %s", url) - res = self.session.get(url, headers=headers) + res = self.get(url, headers=headers) if res.status_code in [requests.codes.OK, requests.codes.NOT_MODIFIED]: if res.status_code == requests.codes.OK: @@ -1019,16 +1097,16 @@ def get_advisor_report(self): ''' Retrieve advisor report ''' - url = self.base_url + "/inventory/v1/hosts?insights_id=%s" % generate_machine_id() - content = self._get(url) + url = self.inventory_url + "/hosts?insights_id=%s" % generate_machine_id() + content = self._cached_get(url) if content is None: return None host_details = json.loads(content) if host_details["total"] < 1: - raise Exception("Error: failed to find host with matching machine-id. Run insights-client --status to check registration status") + _host_not_found() if host_details["total"] > 1: - raise Exception("Error: multiple hosts detected (insights_id = %s)" % generate_machine_id()) + raise Exception("Error: multiple hosts detected (insights_id = %s). To fix this error, run command: insights-client --unregister && insights-client --register" % generate_machine_id()) if not os.path.exists("/var/lib/insights"): os.makedirs("/var/lib/insights", mode=0o755) @@ -1039,7 +1117,7 @@ def get_advisor_report(self): host_id = host_details["results"][0]["id"] url = self.base_url + "/insights/v1/system/%s/reports/" % host_id - content = self._get(url) + content = self._cached_get(url) if content is None: return None @@ -1048,3 +1126,38 @@ def get_advisor_report(self): logger.debug("Wrote \"/var/lib/insights/insights-details.json\"") return json.loads(content) + + def checkin(self): + ''' + Sends an ultralight check-in request containing only the Canonical Facts. + ''' + logger.info("Checking in...") + + try: + canonical_facts = get_canonical_facts() + except Exception as e: + logger.debug('Error getting canonical facts: %s', e) + logger.debug('Falling back to only machine ID.') + insights_id = generate_machine_id() + canonical_facts = {"insights_id": str(insights_id)} + + url = self.inventory_url + "/hosts/checkin" + logger.debug("Sending check-in request to %s with %s" % (url, canonical_facts)) + try: + response = self.post(url, headers={"Content-Type": "application/json"}, data=json.dumps(canonical_facts)) + # Change to POST when the API is fixed. + except REQUEST_FAILED_EXCEPTIONS as exception: + _api_request_failed(exception) + return None + logger.debug("Check-in response status code %d" % response.status_code) + + if response.status_code == requests.codes.CREATED: + # Remove OK when the API is fixed. + logger.info("Successfully checked in!") + return True + elif response.status_code == requests.codes.NOT_FOUND: + # Remove BAD_REQUEST when the API is fixed. + _host_not_found() + else: + logger.debug("Check-in response body %s" % response.text) + raise RuntimeError("Unknown check-in API response") diff --git a/insights/client/constants.py b/insights/client/constants.py index b249bed014..f348bfe2b6 100644 --- a/insights/client/constants.py +++ b/insights/client/constants.py @@ -1,8 +1,41 @@ import os +_user_home = os.path.expanduser('~') +_app_name = 'insights-client' +_uid = os.getuid() +_user_cache = os.getenv('XDG_CACHE_HOME', default=os.path.join(_user_home, '.cache')) + + +def _log_dir(): + ''' + Get the insights-client log dir + + Default: /var/log/insights-client + Non-root user: $XDG_CACHE_HOME/insights-client || $HOME/.cache/insights-client/log + ''' + if _uid == 0: + insights_log_dir = os.path.join(os.sep, 'var', 'log', _app_name) + else: + insights_log_dir = os.path.join(_user_cache, _app_name, 'log') + return insights_log_dir + + +def _lib_dir(): + ''' + Get the insights-client egg cache dir + + Default: /var/lib/insights + Non-root user: $XDG_CACHE_HOME/insights-client || $HOME/.cache/insights-client/lib + ''' + if _uid == 0: + insights_lib_dir = os.path.join(os.sep, 'var', 'lib', 'insights') + else: + insights_lib_dir = os.path.join(_user_cache, _app_name, 'lib') + return insights_lib_dir + class InsightsConstants(object): - app_name = 'insights-client' + app_name = _app_name auth_method = 'BASIC' package_path = os.path.dirname( os.path.dirname(os.path.abspath(__file__))) @@ -10,10 +43,12 @@ class InsightsConstants(object): command_blacklist = ('rm', 'kill', 'reboot', 'shutdown') default_conf_dir = os.getenv('INSIGHTS_CONF_DIR', default='/etc/insights-client') default_conf_file = os.path.join(default_conf_dir, 'insights-client.conf') - log_dir = os.path.join(os.sep, 'var', 'log', app_name) + default_tags_file = os.path.join(default_conf_dir, 'tags.yaml') + log_dir = _log_dir() simple_find_replace_dir = '/etc/redhat-access-insights' default_log_file = os.path.join(log_dir, app_name + '.log') default_payload_log = os.path.join(log_dir, app_name + '-payload.log') + custom_network_log_level = 11 default_sed_file = os.path.join(default_conf_dir, '.exp.sed') base_url = 'cert-api.access.redhat.com/r/insights/platform' legacy_base_url = 'cert-api.access.redhat.com/r/insights' @@ -32,18 +67,26 @@ class InsightsConstants(object): core_etag_file = os.path.join(default_conf_dir, '.insights-core.etag') core_gpg_sig_etag_file = os.path.join(default_conf_dir, '.insights-core-gpg-sig.etag') last_upload_results_file = os.path.join(default_conf_dir, '.last-upload.results') - insights_core_lib_dir = os.path.join('/', 'var', 'lib', 'insights') + insights_core_lib_dir = _lib_dir() insights_core_rpm = os.path.join(default_conf_dir, 'rpm.egg') insights_core_last_stable = os.path.join(insights_core_lib_dir, 'last_stable.egg') insights_core_last_stable_gpg_sig = os.path.join(insights_core_lib_dir, 'last_stable.egg.asc') insights_core_newest = os.path.join(insights_core_lib_dir, 'newest.egg') insights_core_gpg_sig_newest = os.path.join(insights_core_lib_dir, 'newest.egg.asc') + module_router_path = "/module-update-router/v1/channel?module=insights-core" sig_kill_ok = 100 sig_kill_bad = 101 cached_branch_info = os.path.join(default_conf_dir, '.branch_info') pidfile = os.path.join(os.sep, 'var', 'run', 'insights-client.pid') - # this file is used to attempt registration when the client starts, implies --register - register_marker_file = os.path.join(os.sep, 'var', 'run', 'insights-client-try-register') - # default Hydra endpoint for posting entitlements information for AWS - default_portal_access_hydra_url = 'https://access.redhat.com/hydra/rest/accounts/entitle' + insights_tmp_path = os.path.join(os.sep, 'var', 'tmp', 'insights-client') + egg_release_file = os.path.join(insights_tmp_path, 'insights-client-egg-release') + ppidfile = os.path.join(os.sep, 'tmp', 'insights-client.ppid') valid_compressors = ("gz", "xz", "bz2", "none") + # RPM version in which core collection was released + core_collect_rpm_version = '3.1.0' + # RPM version in which logrotate was released + rpm_version_before_logrotate = '3.2.0' + rhsm_facts_dir = os.path.join(os.sep, 'etc', 'rhsm', 'facts') + rhsm_facts_file = os.path.join(os.sep, 'etc', 'rhsm', 'facts', 'insights-client.facts') + # In MB + archive_filesize_max = 100 diff --git a/insights/client/core_collector.py b/insights/client/core_collector.py new file mode 100644 index 0000000000..e1791dc89f --- /dev/null +++ b/insights/client/core_collector.py @@ -0,0 +1,99 @@ +""" +Collect all the interesting data for analysis - Core version +""" +from __future__ import absolute_import +import os +import six +import logging +from insights import collect + +from .constants import InsightsConstants as constants +from .data_collector import DataCollector +from .utilities import systemd_notify_init_thread + +APP_NAME = constants.app_name +logger = logging.getLogger(__name__) + + +class CoreCollector(DataCollector): + def __init__(self, *args, **kwargs): + super(CoreCollector, self).__init__(*args, **kwargs) + + def run_collection(self, conf, rm_conf, branch_info, blacklist_report): + ''' + Initialize core collection here and generate the + output directory with collected data. + ''' + # initialize systemd-notify thread + systemd_notify_init_thread() + + if rm_conf is None: + rm_conf = {} + + # add tokens to limit regex handling + # core parses blacklist for files and commands as regex + if 'files' in rm_conf: + for idx, f in enumerate(rm_conf['files']): + rm_conf['files'][idx] = '^' + f + '$' + + if 'commands' in rm_conf: + for idx, c in enumerate(rm_conf['commands']): + rm_conf['commands'][idx] = '^' + c + '$' + + logger.debug('Beginning to run collection...') + + # only load files, keywords, components into core + core_blacklist = { + 'commands': rm_conf.get('commands', []), + 'files': rm_conf.get('files', []), + 'components': rm_conf.get('components', []) + } + + manifest = collect.default_manifest + if hasattr(self.config, 'manifest') and self.config.manifest: + if self.config.app is None: + with open(self.config.manifest, 'r') as f: + manifest = f.read() + else: + manifest = self.config.manifest + collected_data_path, exceptions = collect.collect( + manifest=manifest, + tmp_path=self.archive.tmp_dir, + rm_conf=core_blacklist, + client_timeout=self.config.cmd_timeout + ) + + # update the archive dir with the reported data location from Insights Core + if not collected_data_path: + raise RuntimeError('Error running collection: no output path defined.') + self.archive.archive_dir = collected_data_path + self.archive.archive_name = os.path.basename(collected_data_path) + + if not six.PY3: + # collect.py returns a unicode string, and these must be bytestrings + # when we call the tar command in 2.6 + self.archive.archive_dir = self.archive.archive_dir.encode('utf-8') + self.archive.archive_name = self.archive.archive_name.encode('utf-8') + + # set hostname_path for soscleaner + if os.path.exists(os.path.join(self.archive.archive_dir, 'data', 'insights_commands', 'hostname_-f')): + self.hostname_path = 'data/insights_commands/hostname_-f' + else: + # fall back to hostname if hostname -f not available + self.hostname_path = 'data/insights_commands/hostname' + + logger.debug('Collection finished.') + + self.redact(rm_conf) + + # collect metadata + logger.debug('Collecting metadata...') + self._write_branch_info(branch_info) + self._write_display_name() + self._write_ansible_host() + self._write_version_info() + self._write_tags() + self._write_blacklist_report(blacklist_report) + self._write_blacklisted_specs() + self._write_egg_release() + logger.debug('Metadata collection finished.') diff --git a/insights/client/data_collector.py b/insights/client/data_collector.py index f0a57c4fbc..4caa37f4e2 100644 --- a/insights/client/data_collector.py +++ b/insights/client/data_collector.py @@ -10,13 +10,15 @@ import glob import six import shlex +import re from itertools import chain from subprocess import Popen, PIPE, STDOUT from tempfile import NamedTemporaryFile +from insights.core.blacklist import BLACKLISTED_SPECS from insights.util import mangle from ..contrib.soscleaner import SOSCleaner -from .utilities import _expand_paths, get_version_info, read_pidfile, get_tags +from .utilities import _expand_paths, get_version_info, systemd_notify_init_thread, get_tags from .constants import InsightsConstants as constants from .insights_spec import InsightsFile, InsightsCommand from .archive import InsightsArchive @@ -31,6 +33,41 @@ SOSCLEANER_LOGGER.setLevel(logging.ERROR) +def _process_content_redaction(filepath, exclude, regex=False): + ''' + Redact content from a file, based on + /etc/insights-client/.exp.sed and and the contents of "exclude" + + filepath file to modify + exclude list of strings to redact + regex whether exclude is a list of regular expressions + + Returns the file contents with the specified data removed + ''' + logger.debug('Processing %s...', filepath) + + # password removal + sedcmd = Popen(['sed', '-rf', constants.default_sed_file, filepath], stdout=PIPE) + # patterns removal + if exclude: + exclude_file = NamedTemporaryFile() + exclude_file.write("\n".join(exclude).encode('utf-8')) + exclude_file.flush() + if regex: + flag = '-E' + else: + flag = '-F' + grepcmd = Popen(['grep', '-v', flag, '-f', exclude_file.name], stdin=sedcmd.stdout, stdout=PIPE) + sedcmd.stdout.close() + stdout, stderr = grepcmd.communicate() + logger.debug('Process status: %s', grepcmd.returncode) + else: + stdout, stderr = sedcmd.communicate() + logger.debug('Process status: %s', sedcmd.returncode) + logger.debug('Process stderr: %s', stderr) + return stdout + + class DataCollector(object): ''' Run commands and collect files @@ -55,6 +92,12 @@ def _write_display_name(self): self.archive.add_metadata_to_archive( self.config.display_name, '/display_name') + def _write_ansible_host(self): + if self.config.ansible_host: + logger.debug("Writing ansible_host to archive...") + self.archive.add_metadata_to_archive( + self.config.ansible_host, '/ansible_host') + def _write_version_info(self): logger.debug("Writing version information to archive...") version_info = get_version_info() @@ -85,6 +128,90 @@ def f(k, v): t = list(chain.from_iterable(t)) self.archive.add_metadata_to_archive(json.dumps(t), '/tags.json') + def _write_blacklist_report(self, blacklist_report): + logger.debug("Writing blacklist report to archive...") + self.archive.add_metadata_to_archive( + json.dumps(blacklist_report), '/blacklist_report') + + def _write_blacklisted_specs(self): + logger.debug("Writing blacklisted specs to archive...") + + if BLACKLISTED_SPECS: + self.archive.add_metadata_to_archive( + json.dumps({"specs": BLACKLISTED_SPECS}), '/blacklisted_specs') + + def _write_egg_release(self): + logger.debug("Writing egg release to archive...") + egg_release = '' + try: + with open(constants.egg_release_file) as fil: + egg_release = fil.read() + except (IOError, MemoryError) as e: + logger.debug('Could not read the egg release file: %s', str(e)) + try: + os.remove(constants.egg_release_file) + except OSError as e: + logger.debug('Could not remove the egg release file: %s', str(e)) + + try: + self.archive.add_metadata_to_archive( + egg_release, '/egg_release') + except OSError as e: + logger.debug('Could not add the egg release file to the archive: %s', str(e)) + self.archive.add_metadata_to_archive( + '', '/egg_release') + + def _write_collection_stats(self, collection_stats): + logger.debug("Writing collection stats to archive...") + self.archive.add_metadata_to_archive( + json.dumps(collection_stats), '/collection_stats') + + def _write_rhsm_facts(self, hashed_fqdn, ip_csv): + logger.info('Writing RHSM facts to %s...', constants.rhsm_facts_file) + ips_list = '' + with open(ip_csv) as fil: + # create IP list as JSON block with format + # [ + # { + # original: + # obfuscated: + # } + # ] + + ips_list = fil.readlines() + headings = ips_list[0].strip().split(',') + # set the indices for the IPs + if 'original' in headings[0].lower(): + # soscleaner 0.4.4, original first + org = 0 + obf = 1 + else: + # soscleaner 0.2.2, obfuscated first + org = 1 + obf = 0 + + ip_block = [] + for line in ips_list[1:]: + ipset = line.strip().split(',') + ip_block.append( + { + 'original': ipset[org], + 'obfuscated': ipset[obf] + }) + + facts = { + 'insights_client.obfuscate_hostname_enabled': self.config.obfuscate_hostname, + 'insights_client.hostname': hashed_fqdn, + 'insights_client.obfuscate_ip_enabled': self.config.obfuscate, + 'insights_client.ips': json.dumps(ip_block) + } + + try: + with open(constants.rhsm_facts_file, 'w') as fil: + json.dump(facts, fil) + except (IOError, OSError) as e: + logger.error('Could not write to %s: %s', constants.rhsm_facts_file, str(e)) + def _run_pre_command(self, pre_cmd): ''' Run a pre command to get external args for a command @@ -182,73 +309,159 @@ def _parse_command_spec(self, spec, precmds): else: return [spec] - def run_collection(self, conf, rm_conf, branch_info): + def run_collection(self, conf, rm_conf, branch_info, blacklist_report): ''' Run specs and collect all the data ''' - parent_pid = read_pidfile() + # initialize systemd-notify thread + systemd_notify_init_thread() + + self.archive.create_archive_dir() + self.archive.create_command_dir() + + collection_stats = {} + if rm_conf is None: rm_conf = {} logger.debug('Beginning to run collection spec...') - exclude = None - if rm_conf: - try: - exclude = rm_conf['patterns'] - # handle the None or empty case of the sub-object - if 'regex' in exclude and not exclude['regex']: - raise LookupError - logger.warn("WARNING: Skipping patterns found in remove.conf") - except LookupError: - logger.debug('Patterns section of remove.conf is empty.') + + rm_commands = rm_conf.get('commands', []) + rm_files = rm_conf.get('files', []) for c in conf['commands']: # remember hostname archive path if c.get('symbolic_name') == 'hostname': self.hostname_path = os.path.join( 'insights_commands', mangle.mangle_command(c['command'])) - rm_commands = rm_conf.get('commands', []) if c['command'] in rm_commands or c.get('symbolic_name') in rm_commands: logger.warn("WARNING: Skipping command %s", c['command']) + BLACKLISTED_SPECS.append(c['symbolic_name']) elif self.mountpoint == "/" or c.get("image"): cmd_specs = self._parse_command_spec(c, conf['pre_commands']) for s in cmd_specs: if s['command'] in rm_commands: logger.warn("WARNING: Skipping command %s", s['command']) + BLACKLISTED_SPECS.append(s['symbolic_name']) continue - cmd_spec = InsightsCommand(self.config, s, exclude, self.mountpoint, parent_pid) + cmd_spec = InsightsCommand(self.config, s, self.mountpoint) self.archive.add_to_archive(cmd_spec) + collection_stats[s['command']] = { + 'return_code': cmd_spec.return_code, + 'exec_time': cmd_spec.exec_time, + 'output_size': cmd_spec.output_size + } for f in conf['files']: - rm_files = rm_conf.get('files', []) if f['file'] in rm_files or f.get('symbolic_name') in rm_files: logger.warn("WARNING: Skipping file %s", f['file']) + BLACKLISTED_SPECS.append(f['symbolic_name']) else: file_specs = self._parse_file_spec(f) for s in file_specs: # filter files post-wildcard parsing if s['file'] in rm_conf.get('files', []): logger.warn("WARNING: Skipping file %s", s['file']) + BLACKLISTED_SPECS.append(s['symbolic_name']) else: - file_spec = InsightsFile(s, exclude, self.mountpoint, parent_pid) + file_spec = InsightsFile(s, self.mountpoint) self.archive.add_to_archive(file_spec) + collection_stats[s['file']] = { + 'exec_time': file_spec.exec_time, + 'output_size': file_spec.output_size + } if 'globs' in conf: for g in conf['globs']: - glob_specs = self._parse_glob_spec(g) - for g in glob_specs: - if g['file'] in rm_conf.get('files', []): - logger.warn("WARNING: Skipping file %s", g) - else: - glob_spec = InsightsFile(g, exclude, self.mountpoint, parent_pid) - self.archive.add_to_archive(glob_spec) + if g.get('symbolic_name') in rm_files: + # ignore glob via symbolic name + logger.warn("WARNING: Skipping file %s", g['glob']) + BLACKLISTED_SPECS.append(g['symbolic_name']) + else: + glob_specs = self._parse_glob_spec(g) + for g in glob_specs: + if g['file'] in rm_files: + logger.warn("WARNING: Skipping file %s", g['file']) + BLACKLISTED_SPECS.append(g['symbolic_name']) + else: + glob_spec = InsightsFile(g, self.mountpoint) + self.archive.add_to_archive(glob_spec) + collection_stats[g['file']] = { + 'exec_time': glob_spec.exec_time, + 'output_size': glob_spec.output_size + } logger.debug('Spec collection finished.') + self.redact(rm_conf) + # collect metadata logger.debug('Collecting metadata...') self._write_branch_info(branch_info) self._write_display_name() + self._write_ansible_host() self._write_version_info() self._write_tags() + self._write_blacklist_report(blacklist_report) + self._write_blacklisted_specs() + self._write_egg_release() + self._write_collection_stats(collection_stats) logger.debug('Metadata collection finished.') + def redact(self, rm_conf): + ''' + Perform data redaction (password sed command and patterns), + write data to the archive in place + ''' + logger.debug('Running content redaction...') + + if not re.match(r'/var/tmp/.+/insights-.+', self.archive.archive_dir): + # sanity check to make sure we're only modifying + # our own stuff in temp + # we should never get here but just in case + raise RuntimeError('ERROR: invalid Insights archive temp path') + + if rm_conf is None: + rm_conf = {} + exclude = None + regex = False + if rm_conf: + try: + exclude = rm_conf['patterns'] + if isinstance(exclude, dict) and exclude['regex']: + # if "patterns" is a dict containing a non-empty "regex" list + logger.debug('Using regular expression matching for patterns.') + exclude = exclude['regex'] + regex = True + logger.warn("WARNING: Skipping patterns defined in blacklist configuration") + except LookupError: + # either "patterns" was undefined in rm conf, or + # "regex" was undefined in "patterns" + exclude = None + if not exclude: + logger.debug('Patterns section of blacklist configuration is empty.') + + # TODO: consider implementing redact() in CoreCollector class rather than + # special handling here + if self.config.core_collect: + # redact only from the 'data' directory + searchpath = os.path.join(self.archive.archive_dir, 'data') + if not (os.path.isdir(searchpath) and + re.match(r'/var/tmp/.+/insights-.+/data', searchpath)): + # abort if the dir does not exist and isn't the correct format + # we should never get here but just in case + raise RuntimeError('ERROR: invalid Insights archive temp path') + else: + searchpath = self.archive.archive_dir + + for dirpath, dirnames, filenames in os.walk(searchpath): + for f in filenames: + fullpath = os.path.join(dirpath, f) + if (fullpath.endswith('etc/insights-client/machine-id') or + fullpath.endswith('etc/machine-id') or + fullpath.endswith('insights_commands/subscription-manager_identity')): + # do not redact the ID files + continue + redacted_contents = _process_content_redaction(fullpath, exclude, regex) + with open(fullpath, 'wb') as dst: + dst.write(redacted_contents) + def done(self, conf, rm_conf): """ Do finalization stuff @@ -266,13 +479,18 @@ def done(self, conf, rm_conf): and archive files. """ if self.config.obfuscate: + if rm_conf and rm_conf.get('keywords'): + logger.warn("WARNING: Skipping keywords defined in blacklist configuration") cleaner = SOSCleaner(quiet=True) clean_opts = CleanOptions( self.config, self.archive.tmp_dir, rm_conf, self.hostname_path) cleaner.clean_report(clean_opts, self.archive.archive_dir) if clean_opts.keyword_file is not None: os.remove(clean_opts.keyword_file.name) - logger.warn("WARNING: Skipping keywords found in remove.conf") + + # generate RHSM facts at this point + self._write_rhsm_facts(cleaner.hashed_fqdn, cleaner.ip_report) + if self.config.output_dir: # return the entire soscleaner dir # see additions to soscleaner.SOSCleaner.clean_report @@ -301,6 +519,7 @@ def __init__(self, config, tmp_dir, rm_conf, hostname_path): self.keyword_file = None self.keywords = None self.no_tar_file = config.output_dir + self.core_collect = config.core_collect if rm_conf: try: diff --git a/insights/client/insights_spec.py b/insights/client/insights_spec.py index af6e997c05..be1caed139 100644 --- a/insights/client/insights_spec.py +++ b/insights/client/insights_spec.py @@ -4,12 +4,14 @@ import shlex import logging import six +import time +import sys from subprocess import Popen, PIPE, STDOUT from tempfile import NamedTemporaryFile -from insights.util import mangle +from insights.util import mangle, which from .constants import InsightsConstants as constants -from .utilities import determine_hostname, systemd_notify +from .utilities import determine_hostname logger = logging.getLogger(__name__) @@ -18,34 +20,20 @@ class InsightsSpec(object): ''' A spec loaded from the uploader.json ''' - def __init__(self, config, spec, exclude, parent_pid=None): + def __init__(self, config, spec): self.config = config - - # exclusions patterns for this spec - # if exclude is an array of strings, it's old style - # if it's an object or an array of dicts, it's new style - # use regex if it's defined - self.regex = False - self.exclude = None - if exclude and isinstance(exclude, dict): - if 'regex' in exclude and exclude['regex']: - logger.debug('Using regular expression matching in remove.conf.') - self.regex = True - self.exclude = exclude['regex'] - else: - self.exclude = exclude - # pattern for spec collection self.pattern = spec['pattern'] if spec['pattern'] else None - # PID of parent insights-client process, to notify systemd watchdog - self.parent_pid = parent_pid + self.return_code = None + self.exec_time = None + self.output_size = None class InsightsCommand(InsightsSpec): ''' A command spec ''' - def __init__(self, config, spec, exclude, mountpoint, parent_pid=None): - InsightsSpec.__init__(self, config, spec, exclude, parent_pid) + def __init__(self, config, spec, mountpoint): + super(InsightsCommand, self).__init__(config, spec) self.command = spec['command'].replace( '{CONTAINER_MOUNT_POINT}', mountpoint) self.archive_path = mangle.mangle_command(self.command) @@ -58,28 +46,37 @@ def get_output(self): Execute a command through system shell. First checks to see if the requested command is executable. Returns (returncode, stdout, 0) ''' - # let systemd know we're still going - systemd_notify(self.parent_pid) - if self.is_hostname: # short circuit for hostame with internal method return determine_hostname() # all commands should timeout after a long interval so the client does not hang # prepend native nix 'timeout' implementation - timeout_command = 'timeout -s KILL %s %s' % ( - self.config.cmd_timeout, self.command) + + # use TERM for rpm/yum commands, KILL for everything else + if (self.command.startswith('/bin/rpm') or + self.command.startswith('yum') or + self.command.startswith('/usr/bin/yum')): + signal = 'TERM' + else: + signal = 'KILL' # ensure consistent locale for collected command output cmd_env = {'LC_ALL': 'C', 'PATH': '/sbin:/bin:/usr/sbin:/usr/bin', 'PYTHONPATH': os.getenv('PYTHONPATH')} + + timeout = which('timeout', env=cmd_env) + timeout_command = '%s -s %s %s %s' % ( + timeout, signal, self.config.cmd_timeout, self.command) + args = shlex.split(timeout_command) # never execute this stuff if set.intersection(set(args), constants.command_blacklist): raise RuntimeError("Command Blacklist: " + self.command) + exec_start = time.time() try: logger.debug('Executing: %s', args) proc0 = Popen(args, shell=False, stdout=PIPE, stderr=STDOUT, @@ -91,37 +88,10 @@ def get_output(self): else: raise err - dirty = False - - cmd = "sed -rf " + constants.default_sed_file - sedcmd = Popen(shlex.split(cmd), - stdin=proc0.stdout, - stdout=PIPE) - proc0.stdout.close() - proc0 = sedcmd - - if self.exclude is not None: - exclude_file = NamedTemporaryFile() - exclude_file.write("\n".join(self.exclude).encode('utf-8')) - exclude_file.flush() - if self.regex: - cmd = "grep -E -v -f %s" % exclude_file.name - else: - cmd = "grep -F -v -f %s" % exclude_file.name - proc1 = Popen(shlex.split(cmd), - stdin=proc0.stdout, - stdout=PIPE) - proc0.stdout.close() - stderr = None - if self.pattern is None or len(self.pattern) == 0: - stdout, stderr = proc1.communicate() - - # always log return codes for debug - logger.debug('Proc1 Status: %s', proc1.returncode) - logger.debug('Proc1 stderr: %s', stderr) - proc0 = proc1 + if proc0.returncode == 126 or proc0.returncode == 127: + stdout = "Could not find cmd: %s", self.command - dirty = True + dirty = False if self.pattern is not None and len(self.pattern): pattern_file = NamedTemporaryFile() @@ -144,13 +114,11 @@ def get_output(self): if not dirty: stdout, stderr = proc0.communicate() - # Required hack while we still pass shell=True to Popen; a Popen - # call with shell=False for a non-existant binary will raise OSError. - if proc0.returncode == 126 or proc0.returncode == 127: - stdout = "Could not find cmd: %s", self.command - logger.debug("Proc0 Status: %s", proc0.returncode) logger.debug("Proc0 stderr: %s", stderr) + self.return_code = proc0.returncode + self.exec_time = time.time() - exec_start + self.output_size = sys.getsizeof(stdout) return stdout.decode('utf-8', 'ignore').strip() @@ -158,8 +126,8 @@ class InsightsFile(InsightsSpec): ''' A file spec ''' - def __init__(self, spec, exclude, mountpoint, parent_pid=None): - InsightsSpec.__init__(self, None, spec, exclude, parent_pid) + def __init__(self, spec, mountpoint): + super(InsightsFile, self).__init__(None, spec) # substitute mountpoint for collection self.real_path = os.path.join(mountpoint, spec['file'].lstrip('/')) self.archive_path = spec['file'] @@ -168,40 +136,16 @@ def get_output(self): ''' Get file content, selecting only lines we are interested in ''' - # let systemd know we're still going - systemd_notify(self.parent_pid) - if not os.path.isfile(self.real_path): logger.debug('File %s does not exist', self.real_path) return - cmd = [] - cmd.append('sed') - cmd.append('-rf') - cmd.append(constants.default_sed_file) - cmd.append(self.real_path) - sedcmd = Popen(cmd, - stdout=PIPE) - - if self.exclude is not None: - exclude_file = NamedTemporaryFile() - exclude_file.write("\n".join(self.exclude).encode('utf-8')) - exclude_file.flush() - - if self.regex: - cmd = "grep -E -v -f %s" % exclude_file.name - else: - cmd = "grep -F -v -f %s" % exclude_file.name - args = shlex.split(cmd) - proc = Popen(args, stdin=sedcmd.stdout, stdout=PIPE) - sedcmd.stdout.close() - stdin = proc.stdout - if self.pattern is None: - output = proc.communicate()[0] - else: - sedcmd = proc + exec_start = time.time() + sedcmd = Popen(['sed', '', self.real_path], stdout=PIPE) - if self.pattern is not None: + if self.pattern is None: + output = sedcmd.communicate()[0] + else: pattern_file = NamedTemporaryFile() pattern_file.write("\n".join(self.pattern).encode('utf-8')) pattern_file.flush() @@ -211,12 +155,7 @@ def get_output(self): proc1 = Popen(args, stdin=sedcmd.stdout, stdout=PIPE) sedcmd.stdout.close() - if self.exclude is not None: - stdin.close() - output = proc1.communicate()[0] - - if self.pattern is None and self.exclude is None: - output = sedcmd.communicate()[0] - + self.exec_time = time.time() - exec_start + self.output_size = sys.getsizeof(output) return output.decode('utf-8', 'ignore').strip() diff --git a/insights/client/map_components.py b/insights/client/map_components.py new file mode 100644 index 0000000000..e36efb3ee3 --- /dev/null +++ b/insights/client/map_components.py @@ -0,0 +1,168 @@ +from __future__ import absolute_import +import six +import logging +import textwrap + +from .constants import InsightsConstants as constants + +APP_NAME = constants.app_name +logger = logging.getLogger(__name__) + + +def map_rm_conf_to_components(rm_conf, uploader_json): + ''' + In order to maximize compatibility between "classic" remove.conf + configurations and core collection, do the following mapping + strategy: + 1. If remove.conf entry matches a symbolic name, disable the + corresponding core component. + 2. If remove.conf entry is a raw command or file, do a reverse + lookup on the symbolic name based on stored uploader.json data, + then continue as in step 1. + 3. If neither conditions 1 or 2 are matched it is either + a) a mistyped command/file, or + b) an arbitrary file. + For (a), classic remove.conf configs require an exact match to + uploader.json. We can carry that condition into our + compatibility with core. + For (b), classic collection had the ability to skip arbitrary + files based on filepaths in uploader.json post-expansion + (i.e. a specific repo file in /etc/yum.repos.d). + Core checks all files collected against the file + blacklist filters, so these files will be omitted + just by the nature of core collection. + ''' + updated_commands = [] + updated_files = [] + updated_components = [] + + if not rm_conf: + return rm_conf + + logger.warning("If possible, commands and files specified in the blacklist configuration will be converted to Insights component specs that will be disabled as needed.") + + # save matches to a dict for informative logging + conversion_map = {} + longest_key_len = 0 + + for section in ['commands', 'files']: + if section not in rm_conf: + continue + for key in rm_conf[section]: + if section == 'commands': + symbolic_name = _search_uploader_json(uploader_json, ['commands'], key) + elif section == 'files': + # match both files and globs to rm_conf files + symbolic_name = _search_uploader_json(uploader_json, ['files', 'globs'], key) + + component = _get_component_by_symbolic_name(symbolic_name) + if component: + conversion_map[key] = component + if len(key) > longest_key_len: + longest_key_len = len(key) + updated_components.append(component) + else: + if section == 'commands': + updated_commands.append(key) + elif section == 'files': + updated_files.append(key) + + _log_conversion_table(conversion_map, longest_key_len) + + if 'components' in rm_conf: + # update components list if there already is one + original_comp_set = set(rm_conf['components']) + updated_comp_set = set(dict.fromkeys(updated_components)) + # avoid duplicates + rm_conf['components'] += list(updated_comp_set - original_comp_set) + else: + # otherwise create it + rm_conf['components'] = list(dict.fromkeys(updated_components)) + + rm_conf['commands'] = updated_commands + rm_conf['files'] = updated_files + + return rm_conf + + +def _search_uploader_json(uploader_json, headings, key): + ''' + Search an uploader.json block for a command/file from "name" + and return the symbolic name if it exists + + headings - list of headings to search inside uploader.json + key - raw command/file or symbolic name to search + conversion_map - list of names to found components for logging + longest_key_len - length of longest name for logging + ''' + for heading in headings: + # keys inside the dicts are the heading, but singular + singular = heading.rstrip('s') + + for spec in uploader_json[heading]: + if key == spec['symbolic_name'] or (key == spec[singular] and heading != 'globs'): + # matches to a symbolic name or raw command, cache the symbolic name + # only match symbolic name for globs + sname = spec['symbolic_name'] + if not six.PY3: + sname = sname.encode('utf-8') + return sname + # no match + return None + + +def _get_component_by_symbolic_name(sname): + # match a component to a symbolic name + # some symbolic names need to be renamed to fit specs + if sname is None: + # toss back bad input + return None + + spec_prefix = "insights.specs.default.DefaultSpecs." + spec_conversion = { + 'getconf_pagesize': 'getconf_page_size', + 'netstat__agn': 'netstat_agn', + 'rpm__V_packages': 'rpm_V_packages', + + 'machine_id1': 'machine_id', + 'machine_id2': 'machine_id', + 'machine_id3': 'machine_id', + 'limits_d': 'limits_conf', + 'modprobe_conf': 'modprobe', + 'modprobe_d': 'modprobe', + 'ps_auxwww': 'insights.specs.sos_archive.SosSpecs.ps_auxww', # special case + 'rh_mongodb26_conf': 'mongod_conf', + 'sysconfig_rh_mongodb26': 'sysconfig_mongod', + 'redhat_access_proactive_log': None, + + 'krb5_conf_d': 'krb5' + } + + if sname in spec_conversion: + if spec_conversion[sname] is None: + return None + if sname == 'ps_auxwww': + return spec_conversion[sname] + return spec_prefix + spec_conversion[sname] + return spec_prefix + sname + + +def _log_conversion_table(conversion_map, longest_key_len): + ''' + Handle wrapping & logging the conversions + ''' + max_log_len = 48 + + for n in conversion_map: + spec_name_no_prefix = conversion_map[n].rsplit('.', 1)[-1] + + # for specs exceeding a max length, wrap them past the first line + if longest_key_len > max_log_len: + log_len = max_log_len + else: + log_len = longest_key_len + + wrapped_spec = textwrap.wrap(n, max_log_len) + # log the conversion on the first line of the "wrap" + wrapped_spec[0] = '- {0:{1}} => {2}'.format(wrapped_spec[0], log_len, spec_name_no_prefix) + logger.warning('\n '.join(wrapped_spec)) diff --git a/insights/client/phase/v1.py b/insights/client/phase/v1.py index 4cc7b86761..1f4c3aa5a4 100644 --- a/insights/client/phase/v1.py +++ b/insights/client/phase/v1.py @@ -1,19 +1,19 @@ from __future__ import print_function import functools +from os.path import isfile import json import logging import os import sys -import atexit +import runpy from insights.client import InsightsClient from insights.client.config import InsightsConfig from insights.client.constants import InsightsConstants as constants from insights.client.support import InsightsSupport -from insights.client.utilities import validate_remove_file, print_egg_versions, write_to_disk +from insights.client.utilities import validate_remove_file, print_egg_versions from insights.client.schedule import get_scheduler from insights.client.apps.compliance import ComplianceClient -from insights.client.apps.aws import aws_main logger = logging.getLogger(__name__) @@ -23,10 +23,10 @@ def phase(func): def _f(): try: config = InsightsConfig().load_all() - except ValueError as e: + client = InsightsClient(config) + except (ValueError, OSError) as e: sys.stderr.write('ERROR: ' + str(e) + '\n') sys.exit(constants.sig_kill_bad) - client = InsightsClient(config) if config.debug: logger.info("Core path: %s", os.path.dirname(__file__)) try: @@ -64,27 +64,18 @@ def pre_update(client, config): # validate the remove file if config.validate: try: - if validate_remove_file(config): - sys.exit(constants.sig_kill_ok) - else: - sys.exit(constants.sig_kill_bad) + validate_remove_file(config) + sys.exit(constants.sig_kill_ok) except RuntimeError as e: logger.error(e) sys.exit(constants.sig_kill_bad) - if os.path.isfile(config.remove_file): - if os.stat(config.remove_file).st_size != 0: - try: - validate_remove_file(config) - except RuntimeError as e: - logger.error(e) - sys.exit(constants.sig_kill_bad) - # handle cron stuff if config.enable_schedule: # enable automatic scheduling logger.debug('Updating config...') - updated = get_scheduler(config).set_daily() + scheduler = get_scheduler(config) + updated = scheduler.schedule() if updated: logger.info('Automatic scheduling for Insights has been enabled.') sys.exit(constants.sig_kill_ok) @@ -121,6 +112,18 @@ def pre_update(client, config): print(json.dumps(resp)) sys.exit(constants.sig_kill_ok) + if config.checkin: + try: + checkin_success = client.checkin() + except Exception as e: + print(e) + sys.exit(constants.sig_kill_bad) + + if checkin_success: + sys.exit(constants.sig_kill_ok) + else: + sys.exit(constants.sig_kill_bad) + @phase def update(client, config): @@ -133,15 +136,12 @@ def update(client, config): @phase def post_update(client, config): - # create a machine id first thing. we'll need it for all uploads - logger.debug('Machine ID: %s', client.get_machine_id()) - logger.debug("CONFIG: %s", config) + print_egg_versions() - # --registering an AWS machine - if config.portal_access or config.portal_access_no_insights: - logger.debug('Entitling an AWS host. Bypassing registration check.') - return + if config.list_specs: + client.list_specs() + sys.exit(constants.sig_kill_ok) if config.show_results: try: @@ -173,12 +173,19 @@ def post_update(client, config): # put this first to avoid conflicts with register if config.unregister: if client.unregister(): + get_scheduler(config).remove_scheduling() sys.exit(constants.sig_kill_ok) else: sys.exit(constants.sig_kill_bad) - if config.offline: - logger.debug('Running client in offline mode. Bypassing registration.') + if config.offline or config.no_upload: + # create a machine id first thing. we'll need it for all uploads + logger.debug('Machine ID: %s', client.get_machine_id()) + logger.debug("CONFIG: %s", config) + if config.offline: + logger.debug('Running client in offline mode. Bypassing registration.') + else: + logger.debug("Running client without uploading. Bypassing registration.") return if config.display_name and not config.register: @@ -198,35 +205,41 @@ def post_update(client, config): elif reg is False: # unregistered sys.exit(constants.sig_kill_bad) - if config.register: - if (not config.disable_schedule and - get_scheduler(config).set_daily()): + if config.register and not config.disable_schedule: + scheduler = get_scheduler(config) + updated = scheduler.schedule() + if updated: logger.info('Automatic scheduling for Insights has been enabled.') return # -------delete everything above this line------- - if config.offline: - logger.debug('Running client in offline mode. Bypassing registration.') - return - - # --payload short circuits registration check - if config.payload: - logger.debug('Uploading a specified archive. Bypassing registration.') + if config.offline or config.no_upload or config.payload: + # create a machine id first thing. we'll need it for all uploads + logger.debug('Machine ID: %s', client.get_machine_id()) + logger.debug("CONFIG: %s", config) + if config.offline: + logger.debug('Running client in offline mode. Bypassing registration.') + elif config.no_upload: + logger.debug("Running client without uploading. Bypassing registration.") + else: + logger.debug('Uploading a specified archive. Bypassing registration.') return # check registration status before anything else - reg_check = client.get_registration_status() - if reg_check is None: - sys.exit(constants.sig_kill_bad) + if isfile(constants.machine_id_file): + reg_check = client.get_registration_status() + if reg_check is None: + sys.exit(constants.sig_kill_bad) + else: + reg_check = False # --status if config.status: if reg_check: logger.info('This host is registered.') - sys.exit(constants.sig_kill_ok) else: logger.info('This host is unregistered.') - sys.exit(constants.sig_kill_bad) + sys.exit(constants.sig_kill_ok) # put this first to avoid conflicts with register if config.unregister: @@ -247,20 +260,22 @@ def post_update(client, config): 'Use --register to register this host.') sys.exit(constants.sig_kill_bad) - # --force-reregister, clear machine-id - if config.reregister: - reg_check = False - client.clear_local_registration() - # --register was called if config.register: # don't actually need to make a call to register() since # system creation and upload are a single event on the platform + if reg_check is False and isfile(constants.machine_id_file): + # Do not register if a machine_id file is found + logger.info("Machine-id found, insights-client can not be registered." + " Please, unregister insights-client first: `insights-client --unregister`") + sys.exit(constants.sig_kill_bad) if reg_check: logger.info('This host has already been registered.') - if (not config.disable_schedule and - get_scheduler(config).set_daily()): - logger.info('Automatic scheduling for Insights has been enabled.') + if not config.disable_schedule: + scheduler = get_scheduler(config) + updated = scheduler.schedule() + if updated: + logger.info('Automatic scheduling for Insights has been enabled.') # set --display-name independent of register # only do this if set from the CLI. normally display_name is sent on upload @@ -270,17 +285,30 @@ def post_update(client, config): else: sys.exit(constants.sig_kill_bad) + # set --ansible-hostname independent of register + # only do this if set from the CLI. normally display_name is sent on upload + if 'ansible_host' in config._cli_opts and not config.register: + if client.set_ansible_host(config.ansible_host): + sys.exit(constants.sig_kill_ok) + else: + sys.exit(constants.sig_kill_bad) + + # create a machine id first thing. we'll need it for all uploads + logger.debug('Machine ID: %s', client.get_machine_id()) + logger.debug("CONFIG: %s", config) + @phase def collect_and_output(client, config): - # last phase, delete PID file on exit - atexit.register(write_to_disk, constants.pidfile, delete=True) - # register cloud (aws) - if config.portal_access or config.portal_access_no_insights: - if aws_main(config): - sys.exit(constants.sig_kill_ok) - else: + # run a specified module + if config.module: + try: + runpy.run_module(config.module) + except ImportError as e: + logger.error(e) sys.exit(constants.sig_kill_bad) + sys.exit(constants.sig_kill_ok) + # --compliance was called if config.compliance: config.payload, config.content_type = ComplianceClient(config).oscap_scan() @@ -294,7 +322,8 @@ def collect_and_output(client, config): except RuntimeError as e: logger.error(e) sys.exit(constants.sig_kill_bad) - config.content_type = 'application/vnd.redhat.advisor.collection+tgz' + if not config.content_type: + config.content_type = 'application/vnd.redhat.advisor.collection+tgz' if config.no_upload: # output options for which upload is not performed @@ -308,14 +337,23 @@ def collect_and_output(client, config): # no archive to upload, something went wrong sys.exit(constants.sig_kill_bad) resp = None + content_type = None + if config.content_type in ['gz', 'bz2', 'xz']: + content_type = 'application/vnd.redhat.advisor.collection+' + config.content_type + extension = os.path.splitext(insights_archive)[1][1:] + compression_type = content_type.split('+')[1] + if extension not in compression_type: + logger.error("Content type different from compression") + sys.exit(constants.sig_kill_bad) try: - resp = client.upload(payload=insights_archive, content_type=config.content_type) + resp = client.upload(payload=insights_archive, content_type=(content_type if content_type else config.content_type)) except (IOError, ValueError, RuntimeError) as e: logger.error(str(e)) sys.exit(constants.sig_kill_bad) if resp: if config.to_json: print(json.dumps(resp)) + client.show_inventory_deep_link() client.delete_cached_branch_info() diff --git a/insights/client/schedule.py b/insights/client/schedule.py index 7649b62465..64800a4202 100644 --- a/insights/client/schedule.py +++ b/insights/client/schedule.py @@ -28,7 +28,7 @@ def active(self): return os.path.isfile(self.target) return False - def set_daily(self): + def schedule(self): logger.debug('Scheduling cron.daily') try: if not os.path.exists(self.target): @@ -51,43 +51,91 @@ def remove_scheduling(self): class InsightsSchedulerSystemd(object): - - @property - def active(self): + ALL_TIMERS = ("insights-client", "insights-client-checkin") + + def __init__(self): + """ + Looks for loaded timers using `systemctl show`, stores their names in self.loaded_timers. No loaded timers + produce (). If an error occurs, self.loaded_timers becomes None and all methods (schedule, remove_scheduling, + active) then return None. + """ + results = self._run_systemctl_commands(self.ALL_TIMERS, "show", "--property", "LoadState") + if not results: + self.loaded_timers = None # Command failed. + else: + self.loaded_timers = tuple( + timer + for timer, result in results.items() + if result["status"] == 0 and result["output"] == "LoadState=loaded\n" + ) + if not self.loaded_timers: + logger.warning("No loaded timers found") + + @staticmethod + def _run_systemctl_command(*args): + cmd_args = " ".join(args) + command = "systemctl %s" % cmd_args + logger.debug("Running command %s", command) try: - systemctl_status = run_command_get_output('systemctl is-enabled insights-client.timer') - return systemctl_status['status'] == 0 + result = run_command_get_output(command) except OSError: - logger.exception('Could not get systemd status') - return False + logger.exception("Could not run %s", command) + return None + else: + logger.debug("Status: %s", result["status"]) + logger.debug("Output: %s", result["output"]) + return result + + @classmethod + def _run_systemctl_commands(cls, timers, *args): + if timers is None: + return None # Could not list loaded timers on init. + + results = {} + + for timer in timers: + unit = "%s.timer" % timer + command_args = args + (unit,) + result = cls._run_systemctl_command(*command_args) + if not result: + return None # Command failed. + results[timer] = result + + return results - def set_daily(self): - logger.debug('Starting systemd timer') - try: - # Start timers in the case of rhel 7 running systemd - systemctl_timer = run_command_get_output('systemctl start insights-client.timer') - systemctl_timer = run_command_get_output('systemctl enable insights-client.timer') - logger.debug("Starting Insights Client systemd timer.") - logger.debug("Status: %s", systemctl_timer['status']) - logger.debug("Output: %s", systemctl_timer['output']) - return self.active - except OSError: - logger.exception('Could not start systemd timer') - return False + @property + def active(self): + """ + Runs systemctl is-enabled for each loaded timers. Returns True if all loaded timers are enabled, None if any + systemctl command fails - here or in init. + """ + results = self._run_systemctl_commands(self.loaded_timers, "is-enabled") + return results and all(result["status"] == 0 for result in results.values()) + + def schedule(self): + """ + Runs systemctl enable --now for each loaded timers. Returns True if all loaded timers are successfully enabled, + False if any of them remains inactive. If no timer is loaded, returns {}, if any systemctl command fails - here + or in init - returns None. Both falsey as nothing has been actually enabled. + """ + logger.debug("Starting systemd timers") + results = self._run_systemctl_commands(self.loaded_timers, "enable", "--now") + return results and self.active def remove_scheduling(self): - logger.debug('Stopping all systemd timers') - try: - # Stop timers in the case of rhel 7 running systemd - systemctl_timer = run_command_get_output('systemctl disable insights-client.timer') - systemctl_timer = run_command_get_output('systemctl stop insights-client.timer') - logger.debug("Stopping Insights Client systemd timer.") - logger.debug("Status: %s", systemctl_timer['status']) - logger.debug("Output: %s", systemctl_timer['output']) - return not self.active - except OSError: - logger.exception('Could not stop systemd timer') - return False + """ + Runs systemctl disable --now for each loaded timers. Returns True if all loaded timers are successfully + disabled, False if any of them remains active. If no timer is loaded, returns {}, if any systemctl command + fails - here or in init - returns None. Both falsey as nothing has been actually disabled. + """ + logger.debug("Stopping all systemd timers") + results = self._run_systemctl_commands(self.loaded_timers, "disable", "--now") + + if results: + active = self.active + return None if active is None else not active + else: + return results def get_scheduler(config, source=None, target='/etc/cron.daily/' + APP_NAME): diff --git a/insights/client/support.py b/insights/client/support.py index bcef7f6262..de8508c435 100644 --- a/insights/client/support.py +++ b/insights/client/support.py @@ -15,13 +15,13 @@ from .constants import InsightsConstants as constants from .connection import InsightsConnection -from .utilities import write_registered_file, write_unregistered_file +from .utilities import write_registered_file, write_unregistered_file, write_to_disk APP_NAME = constants.app_name logger = logging.getLogger(__name__) -def _legacy_registration_check(pconn): +def _legacy_registration_check(api_reg_status): # check local registration record unreg_date = None unreachable = False @@ -35,7 +35,6 @@ def _legacy_registration_check(pconn): with open(constants.unregistered_files[0]) as reg_file: local_record += ' Unregistered at ' + reg_file.readline() - api_reg_status = pconn.api_registration_check() logger.debug('Registration status: %s', api_reg_status) if type(api_reg_status) is bool: if api_reg_status: @@ -58,13 +57,25 @@ def _legacy_registration_check(pconn): def registration_check(pconn): - if pconn.config.legacy_upload: - return _legacy_registration_check(pconn) status = pconn.api_registration_check() - if status: - write_registered_file() + # Legacy code + if pconn.config.legacy_upload: + status = _legacy_registration_check(status) + if isinstance(status, dict): + reg_status = status['status'] + if status['unreachable'] is True: + reg_status = None + # --- end legacy --- else: + reg_status = status + if reg_status: + write_registered_file() + elif reg_status is False: write_unregistered_file() + write_to_disk(constants.machine_id_file, delete=True) + if pconn.config.legacy_upload: + status['messages'].append("System unregistered locally via .unregistered file") + return status diff --git a/insights/client/url_cache.py b/insights/client/url_cache.py index f75f370655..78976bba09 100644 --- a/insights/client/url_cache.py +++ b/insights/client/url_cache.py @@ -17,6 +17,7 @@ class URLCache(object): URLCache is a simple pickle cache, intended to be used as an HTTP response cache. """ + def __init__(self, path=None): """ Initialize a URLCache, loading entries from @path, if provided. @@ -36,7 +37,7 @@ def get(self, url): try: item = self._cache[url] if item.cached_at + _KEEPTIME <= time.time(): - del (self._cache, url) + del self._cache[url] return None return self._cache[url] except KeyError: diff --git a/insights/client/utilities.py b/insights/client/utilities.py index b25837dab5..688ecd9fc5 100644 --- a/insights/client/utilities.py +++ b/insights/client/utilities.py @@ -11,17 +11,30 @@ import shlex import re import sys +import threading +import time +import json +import tarfile from subprocess import Popen, PIPE, STDOUT import yaml try: - from yaml import CLoader as Loader, CDumper as Dumper + from yaml import CDumper as Dumper except ImportError: - from yaml import Loader, Dumper + from yaml import Dumper from .. import package_info from .constants import InsightsConstants as constants -from .collection_rules import InsightsUploadConf +from .collection_rules import InsightsUploadConf, load_yaml + +from insights.core.context import Context +from insights.parsers.os_release import OsRelease +from insights.parsers.redhat_release import RedhatRelease + +try: + from insights_client.constants import InsightsConstants as wrapper_constants +except ImportError: + wrapper_constants = None logger = logging.getLogger(__name__) @@ -96,10 +109,6 @@ def delete_registered_file(): def delete_unregistered_file(): for f in constants.unregistered_files: write_to_disk(f, delete=True) - # this function only called when machine is registered, - # so while registering, delete this file too. we only - # need it around until we're registered - write_to_disk(constants.register_marker_file, delete=True) def delete_cache_files(): @@ -141,10 +150,10 @@ def generate_machine_id(new=False, write_to_disk(destination_file, content=machine_id) try: - uuid.UUID(machine_id, version=4) - return str(machine_id).strip() - except ValueError: + return str(uuid.UUID(str(machine_id).strip(), version=4)) + except ValueError as e: logger.error("Invalid machine ID: %s", machine_id) + logger.error("Error details: %s", str(e)) logger.error("Remove %s and a new one will be generated.\nRerun the client with --register", destination_file) sys.exit(constants.sig_kill_bad) @@ -171,7 +180,7 @@ def _expand_paths(path): def validate_remove_file(config): """ - Validate the remove file + Validate the remove file and tags file """ return InsightsUploadConf(config).validate() @@ -228,16 +237,14 @@ def get_version_info(): ''' Get the insights client and core versions for archival ''' - cmd = 'rpm -q --qf "%{VERSION}-%{RELEASE}" insights-client' + try: + client_version = wrapper_constants.version + except AttributeError: + # wrapper_constants is None or has no attribute "version" + client_version = None version_info = {} version_info['core_version'] = '%s-%s' % (package_info['VERSION'], package_info['RELEASE']) - rpm_proc = run_command_get_output(cmd) - if rpm_proc['status'] != 0: - # Unrecoverable error - logger.debug('Error occurred while running rpm -q. Details:\n%s' % rpm_proc['output']) - version_info['client_version'] = None - else: - version_info['client_version'] = rpm_proc['output'] + version_info['client_version'] = client_version return version_info @@ -291,31 +298,53 @@ def read_pidfile(): return pid -def systemd_notify(pid): +def _systemd_notify(pid): ''' Ping the systemd watchdog with the main PID so that the watchdog doesn't kill the process ''' - if not os.getenv('NOTIFY_SOCKET'): - # running standalone, not via systemd job - return + try: + proc = Popen(['/usr/bin/systemd-notify', '--pid=' + str(pid), 'WATCHDOG=1']) + except OSError as e: + logger.debug('Could not launch systemd-notify: %s', str(e)) + return False + stdout, stderr = proc.communicate() + if proc.returncode != 0: + logger.debug('systemd-notify returned %s', proc.returncode) + return False + return True + + +def systemd_notify_init_thread(): + ''' + Use a thread to periodically ping systemd instead + of calling it on a per-command basis + ''' + pid = read_pidfile() if not pid: logger.debug('No PID specified.') return + if not os.getenv('NOTIFY_SOCKET'): + # running standalone, not via systemd job + return if not os.path.exists('/usr/bin/systemd-notify'): # RHEL 6, no systemd return - try: - proc = Popen(['/usr/bin/systemd-notify', '--pid=' + str(pid), 'WATCHDOG=1']) - except OSError: - logger.debug('Could not launch systemd-notify.') - return - stdout, stderr = proc.communicate() - if proc.returncode != 0: - logger.debug('systemd-notify returned %s', proc.returncode) + def _sdnotify_loop(): + while True: + # run sdnotify every 30 seconds + if not _systemd_notify(pid): + # end the loop if something goes wrong + break + time.sleep(30) + + sdnotify_thread = threading.Thread(target=_sdnotify_loop, args=()) + sdnotify_thread.daemon = True + sdnotify_thread.start() -def get_tags(tags_file_path=os.path.join(constants.default_conf_dir, "tags.conf")): + +def get_tags(tags_file_path=constants.default_tags_file): ''' Load tag data from the tags file. @@ -323,17 +352,19 @@ def get_tags(tags_file_path=os.path.join(constants.default_conf_dir, "tags.conf" ''' tags = None - try: - with open(tags_file_path) as f: - data = f.read() - tags = yaml.load(data, Loader=Loader) - except EnvironmentError as e: - logger.debug("tags file does not exist: %s", os.strerror(e.errno)) + if os.path.isfile(tags_file_path): + try: + tags = load_yaml(tags_file_path) + except RuntimeError: + logger.error("Invalid YAML. Unable to load %s", tags_file_path) + return None + else: + logger.debug("%s does not exist", tags_file_path) return tags -def write_tags(tags, tags_file_path=os.path.join(constants.default_conf_dir, "tags.conf")): +def write_tags(tags, tags_file_path=constants.default_tags_file): """ Writes tags to tags_file_path @@ -346,3 +377,108 @@ def write_tags(tags, tags_file_path=os.path.join(constants.default_conf_dir, "ta with open(tags_file_path, mode="w+") as f: data = yaml.dump(tags, Dumper=Dumper, default_flow_style=False) f.write(data) + + +def migrate_tags(): + ''' + We initially released the tags feature with the tags file set as + tags.conf, but soon after switched it over to tags.yaml. There may be + installations out there with tags.conf files, so rename the files. + ''' + tags_conf = os.path.join(constants.default_conf_dir, 'tags.conf') + tags_yaml = os.path.join(constants.default_conf_dir, 'tags.yaml') + + if os.path.exists(tags_yaml): + # current default file exists, do nothing + return + if os.path.exists(tags_conf): + # old file exists and current does not + logger.info('Tags file %s detected. This filename is deprecated; please use %s. The file will be renamed automatically.', + tags_conf, tags_yaml) + try: + os.rename(tags_conf, tags_yaml) + except OSError as e: + logger.error(e) + + +def get_parent_process(): + ''' + Get parent process of the client + + Returns: string + ''' + ppid = os.getppid() + output = run_command_get_output('cat /proc/%s/status' % ppid) + if output['status'] == 0: + name = output['output'].splitlines()[0].split('\t')[1] + return name + else: + return "unknown" + + +def os_release_info(): + ''' + Use insights-core to fetch the os-release or redhat-release info + + Returns a tuple of OS name and version + ''' + os_family = "Unknown" + os_release = "" + for p in ["/etc/os-release", "/etc/redhat-release"]: + try: + with open(p) as f: + data = f.readlines() + + ctx = Context(content=data, path=p, relative_path=p) + if p == "/etc/os-release": + rls = OsRelease(ctx) + os_family = rls.data.get("NAME") + os_release = rls.data.get("VERSION_ID") + elif p == "/etc/redhat-release": + rls = RedhatRelease(ctx) + os_family = rls.product + os_release = rls.version + break + except IOError: + continue + except Exception as e: + logger.warning("Failed to detect OS version: %s", e) + return (os_family, os_release) + + +def largest_spec_in_archive(archive_file): + logger.info("Checking for large files...") + tar_file = tarfile.open(archive_file, 'r') + largest_fsize = 0 + largest_file_name = "" + largest_spec = "" + # get the name of the archive + name = os.path.basename(archive_file).split(".tar.gz")[0] + # get the archives from inside meta_data directory + metadata_top = os.path.join(name, "meta_data/") + data_top = os.path.join(name, "data") + for file in tar_file.getmembers(): + if metadata_top in file.name: + file_extract = tar_file.extractfile(file.name) + specs_metadata = json.load(file_extract) + results = specs_metadata.get("results", []) + if not results: + continue + if not isinstance(results, list): + # specs with only one resulting file are not in list form + results = [results] + for result in results: + # get the path of the spec result and check its filesize + fname = result.get("object", {}).get("relative_path") + abs_fname = os.path.join('.', data_top, fname) + # get the archives from inside data directory + data_file = tar_file.getmember(abs_fname) + if (data_file.size > largest_fsize): + largest_fsize = data_file.size + largest_file_name = fname + largest_spec = specs_metadata["name"] + return (largest_file_name, largest_fsize, largest_spec) + + +def size_in_mb(num_bytes): + return float(num_bytes) / (1024 * 1024) diff --git a/insights/collect.py b/insights/collect.py index 38ddaf7b39..5d9f420d27 100755 --- a/insights/collect.py +++ b/insights/collect.py @@ -8,17 +8,19 @@ ``insights.specs.Specs``. """ from __future__ import print_function -from contextlib import contextmanager import argparse import logging import os +import sys import tempfile import yaml from datetime import datetime -from insights import apply_configs, apply_default_enabled, dr -from insights.core import blacklist +from insights import apply_configs, apply_default_enabled, get_pool +from insights.core import blacklist, dr, filters +from insights.core.blacklist import BLACKLISTED_SPECS +from insights.core.exceptions import CalledProcessError from insights.core.serde import Hydration from insights.util import fs from insights.util.subproc import call @@ -66,7 +68,7 @@ enabled: true run_strategy: - name: parallel + name: serial args: max_workers: null @@ -78,6 +80,7 @@ # packages and modules to load packages: - insights.specs.default + - insights.specs.datasources # configuration of loaded components. names are prefixes, so any component with # a fully qualified name that starts with a key will get the associated @@ -85,6 +88,9 @@ # datasources. Can specify metadata, which must be a dictionary and will be # merged with the components' default metadata. configs: + - name: insights.specs.datasources + enabled: true + - name: insights.specs.Specs enabled: true @@ -94,20 +100,173 @@ - name: insights.parsers.hostname enabled: true - - name: insights.parsers.facter - enabled: true - - name: insights.parsers.systemid enabled: true - name: insights.combiners.hostname enabled: true + # needed for the CloudProvider combiner + - name: insights.parsers.installed_rpms + enabled: true + + - name: insights.parsers.dmidecode + enabled: true + + - name: insights.parsers.yum + enabled: true + + - name: insights.parsers.rhsm_conf + enabled: true + + - name: insights.combiners.cloud_provider + enabled: true + + # needed for the cloud related specs + - name: insights.components.cloud_provider.IsAWS + enabled: true + + - name: insights.components.cloud_provider.IsAzure + enabled: true + + - name: insights.components.cloud_provider.IsGCP + enabled: true + + # needed for the ceph related specs + - name: insights.components.ceph.IsCephMonitor + enabled: true + + # needed for the Services combiner + - name: insights.parsers.chkconfig + enabled: true + + - name: insights.parsers.systemd.unitfiles + enabled: true + + - name: insights.combiners.services + enabled: true + + # needed for the 'teamdctl_state_dump' spec + - name: insights.parsers.nmcli.NmcliConnShow + enabled: true + + # needed for multiple Datasouce specs + - name: insights.parsers.ps.PsAuxcww + enabled: true + + - name: insights.parsers.ps.PsAuxww + enabled: true + + - name: insights.combiners.ps + enabled: true + + # needed for httpd_certificate + - name: insights.combiners.httpd_conf.HttpdConfTree + enabled: true + + - name: insights.combiners.httpd_conf._HttpdConf + enabled: true + + # needed for httpd_on_nfs + - name: insights.parsers.mount.ProcMounts + enabled: true + + # needed for nginx_ssl_cert_enddate + - name: insights.combiners.nginx_conf.NginxConfTree + enabled: true + + - name: insights.combiners.nginx_conf._NginxConf + enabled: true + + # needed for mssql_tls_cert_enddate + - name: insights.parsers.mssql_conf.MsSQLConf + enabled: true + + # needed to collect the sap_hdb_version spec that uses the Sap combiner + - name: insights.parsers.lssap + enabled: true + + - name: insights.parsers.saphostctrl + enabled: true + + - name: insights.combiners.sap + enabled: true + + # needed for fw_devices and fw_security specs + - name: insights.parsers.dmidecode.DMIDecode + enabled: true + + - name: insights.parsers.virt_what.VirtWhat + enabled: true + + - name: insights.combiners.virt_what.VirtWhat + enabled: true + + - name: insights.components.virtualization.IsBareMetal + enabled: true + + # needed for the 'pre-check' of the 'ss' spec and the 'modinfo_filtered_modules' spec + - name: insights.parsers.lsmod.LsMod + enabled: true + + # needed for the 'pre-check' of the 'is_satellite_server' spec + - name: insights.combiners.satellite_version.SatelliteVersion + enabled: true + - name: insights.components.satellite.IsSatellite + enabled: true + + # needed for the 'pre-check' of the 'is_satellite_capsule' spec + - name: insights.combiners.satellite_version.CapsuleVersion + enabled: true + - name: insights.components.satellite.IsCapsule + enabled: true + + # needed for the 'pre-check' of the 'satellite_provision_param_settings' spec + - name: insights.components.satellite.IsSatellite611 + enabled: true + + # needed for the 'pre-check' of the 'corosync_cmapctl_cmd_list' spec + - name: insights.combiners.redhat_release.RedHatRelease + enabled: true + - name: insights.parsers.uname.Uname + enabled: true + - name: insights.parsers.redhat_release.RedhatRelease + enabled: true + - name: insights.components.rhel_version.IsRhel7 + enabled: true + - name: insights.components.rhel_version.IsRhel8 + enabled: true + - name: insights.components.rhel_version.IsRhel9 + enabled: true + + # needed for the 'pmlog_summary' spec + - name: insights.parsers.ros_config.RosConfig + enabled: true + + # needed for the 'container' specs + - name: insights.parsers.podman_list.PodmanListContainers + enabled: true + + - name: insights.parsers.docker_list.DockerListContainers + enabled: true + # needed because some specs aren't given names before they're used in DefaultSpecs - name: insights.core.spec_factory enabled: true + + # needed by the 'luks_data_sources' spec + - name: insights.parsers.blkid.BlockIDInfo + enabled: true + + - name: insights.components.cryptsetup + enabled: true """.strip() +EXCEPTIONS_TO_REPORT = set([ + OSError +]) +"""Exception types that should be reported on after core collection.""" + def load_manifest(data): """ Helper for loading a manifest yaml doc. """ @@ -195,26 +354,7 @@ def create_archive(path, remove_path=True): return archive_path -@contextmanager -def get_pool(parallel, kwargs): - """ - Yields: - a ThreadPoolExecutor if parallel is True and `concurrent.futures` exists. - `None` otherwise. - """ - - if parallel: - try: - from concurrent.futures import ThreadPoolExecutor - with ThreadPoolExecutor(thread_name_prefix="insights-collector-pool", **kwargs) as pool: - yield pool - except ImportError: - yield None - else: - yield None - - -def collect(manifest=default_manifest, tmp_path=None, compress=False): +def collect(manifest=default_manifest, tmp_path=None, compress=False, rm_conf=None, client_timeout=None): """ This is the collection entry point. It accepts a manifest, a temporary directory in which to store output, and a boolean for optional compression. @@ -228,9 +368,15 @@ def collect(manifest=default_manifest, tmp_path=None, compress=False): compress (boolean): True to create a tar.gz and remove the original workspace containing output. False to leave the workspace without creating a tar.gz - + rm_conf (dict): Client-provided python dict containing keys + "commands", "files", and "keywords", to be injected + into the manifest blacklist. + client_timeout (int): Client-provided command timeout value Returns: - The full path to the created tar.gz or workspace. + (str, dict): The full path to the created tar.gz or workspace. + And a dictionary with relevant exceptions captured by the broker during + core collection, this dictionary has the following structure: + ``{ exception_type: [ (exception_obj, component), (exception_obj, component) ]}``. """ manifest = load_manifest(manifest) @@ -244,9 +390,38 @@ def collect(manifest=default_manifest, tmp_path=None, compress=False): apply_blacklist(client.get("blacklist", {})) + # insights-client + if client_timeout: + try: + client['context']['args']['timeout'] = client_timeout + except LookupError: + log.warning('Could not set timeout option.') + rm_conf = rm_conf or {} + apply_blacklist(rm_conf) + for component in rm_conf.get('components', []): + if not dr.get_component_by_name(component): + log.warning('WARNING: Unknown component in blacklist: %s' % component) + else: + dr.set_enabled(component, enabled=False) + BLACKLISTED_SPECS.append(component.split('.')[-1]) + log.warning('WARNING: Skipping component: %s', component) + to_persist = get_to_persist(client.get("persist", set())) - hostname = call("hostname -f", env=SAFE_ENV).strip() + try: + filters.load() + except IOError as e: + # could not load filters file + log.debug("No filters available: %s", str(e)) + except AttributeError as e: + # problem parsing the filters + log.debug("Could not parse filters: %s", str(e)) + + try: + hostname = call("hostname -f", env=SAFE_ENV).strip() + except CalledProcessError: + # problem calling hostname -f + hostname = call("hostname", env=SAFE_ENV).strip() suffix = datetime.utcnow().strftime("%Y%m%d%H%M%S") relative_path = "insights-%s-%s" % (hostname, suffix) tmp_path = tmp_path or tempfile.gettempdir() @@ -260,17 +435,53 @@ def collect(manifest=default_manifest, tmp_path=None, compress=False): parallel = run_strategy.get("name") == "parallel" pool_args = run_strategy.get("args", {}) - with get_pool(parallel, pool_args) as pool: + with get_pool(parallel, "insights-collector-pool", pool_args) as pool: h = Hydration(output_path, pool=pool) broker.add_observer(h.make_persister(to_persist)) dr.run_all(broker=broker, pool=pool) + collect_errors = _parse_broker_exceptions(broker, EXCEPTIONS_TO_REPORT) + if compress: - return create_archive(output_path) - return output_path + return create_archive(output_path), collect_errors + return output_path, collect_errors + + +def _parse_broker_exceptions(broker, exceptions_to_report): + """ + Parse the exceptions captured in the broker during core collection + and keep only exception types configured in the ``exceptions_to_report``. + + Args: + broker (Broker): Broker object used for core collection. + exceptions_to_report (set): Exception types to retrieve from the broker. + + Returns: + (dict): A dictionary with relevant exceptions captured by the broker. + """ + errors = {} + try: + if broker.exceptions: + for component, exceptions in broker.exceptions.items(): + for ex in exceptions: + ex_type = type(ex) + if ex_type in exceptions_to_report: + if ex_type in errors.keys(): + errors[ex_type].append((ex, component)) + else: + errors[ex_type] = [(ex, component)] + except Exception as e: + log.warning("Could not parse exceptions from the broker.: %s", str(e)) + return errors def main(): + # Remove command line args so that they are not parsed by any called modules + # The main fxn is only invoked as a cli, if calling from another cli then + # use the collect function instead + collect_args = [arg for arg in sys.argv[1:]] if len(sys.argv) > 1 else [] + sys.argv = [sys.argv[0], ] if sys.argv else sys.argv + p = argparse.ArgumentParser() p.add_argument("-m", "--manifest", help="Manifest yaml.") p.add_argument("-o", "--out_path", help="Path to write output data.") @@ -278,7 +489,7 @@ def main(): p.add_argument("-v", "--verbose", help="Verbose output.", action="store_true") p.add_argument("-d", "--debug", help="Debug output.", action="store_true") p.add_argument("-c", "--compress", help="Compress", action="store_true") - args = p.parse_args() + args = p.parse_args(args=collect_args) level = logging.WARNING if args.verbose: @@ -297,7 +508,7 @@ def main(): manifest = default_manifest out_path = args.out_path or tempfile.gettempdir() - archive = collect(manifest, out_path, compress=args.compress) + archive, errors = collect(manifest, out_path, compress=args.compress) print(archive) diff --git a/insights/combiners/ansible_info.py b/insights/combiners/ansible_info.py new file mode 100644 index 0000000000..256a3532f6 --- /dev/null +++ b/insights/combiners/ansible_info.py @@ -0,0 +1,98 @@ +""" +Ansible Info +============ +Provide information about the Ansible packages installed on a system. +""" +from insights.core.plugins import combiner +from insights.parsers.installed_rpms import InstalledRpms + +ANSIBLE_TOWER_PKG = "ansible-tower" +ANSIBLE_AUTOMATION_HUB_PKG = "automation-hub" +ANSIBLE_CATALOG_WORKER_PKG = "catalog-worker" +ANSIBLE_AUTOMATION_CONTROLLER_PKG = "automation-controller" +ANSIBLE_PACKAGES = [ + ANSIBLE_TOWER_PKG, + ANSIBLE_AUTOMATION_HUB_PKG, + ANSIBLE_CATALOG_WORKER_PKG, + ANSIBLE_AUTOMATION_CONTROLLER_PKG, +] + + +@combiner(InstalledRpms) +class AnsibleInfo(dict): + """ + Provides information related to Ansible based on the RPMs installed. + + Provides properties to determine the Ansible specific system characteristics. The + base class of the combiner is ``dict`` with dictionary keys being the Ansible + package names, and data values being + :py:class:`insights.parsers.installed_rpms.InstalledRpm` objects. + See the :py:class:`insights.parsers.installed_rpms.InstalledRpm` + class for more information on object methods and values. + + Properties are provided to aid in fingerprinting of the system. + + Examples: + >>> type(info) + + >>> info.is_tower + True + >>> info.tower_version + '1.0.0' + >>> info.is_controller + True + >>> info.controller_version + '1.0.0' + """ + def __init__(self, rpms): + pkg_versions = dict([(pkg, rpms.get_max(pkg)) for pkg in ANSIBLE_PACKAGES if rpms.get_max(pkg) is not None]) + self.update(pkg_versions) + + @property + def is_tower(self): + """ bool: Whether or not this system has ``ansible-tower`` installed """ + return ANSIBLE_TOWER_PKG in self + + @property + def tower_version(self): + """ str: Version of ansible-tower installed or ``None``""" + return self[ANSIBLE_TOWER_PKG].version if ANSIBLE_TOWER_PKG in self else None + + @property + def is_controller(self): + """ + bool: Whether or not this system has ``ansible-tower`` or + ``automation-controller`` installed + """ + return ANSIBLE_TOWER_PKG in self or ANSIBLE_AUTOMATION_CONTROLLER_PKG in self + + @property + def controller_version(self): + """ + str: Version of ansible-tower installed, or if it's not installed + the version of automation-controller installed or ``None`` + """ + if ANSIBLE_TOWER_PKG in self: + return self[ANSIBLE_TOWER_PKG].version + elif ANSIBLE_AUTOMATION_CONTROLLER_PKG in self: + return self[ANSIBLE_AUTOMATION_CONTROLLER_PKG].version + + @property + def is_hub(self): + """ bool: Whether or not this system has ``automation-hub`` installed """ + return ANSIBLE_AUTOMATION_HUB_PKG in self + + @property + def hub_version(self): + """ str: Version of automation-hub installed or ``None``""" + return self[ANSIBLE_AUTOMATION_HUB_PKG].version if ANSIBLE_AUTOMATION_HUB_PKG in self else None + + @property + def is_catalog_worker(self): + """ bool: Whether or not this system has ``catalog-worker`` installed """ + return ANSIBLE_CATALOG_WORKER_PKG in self + + @property + def catalog_worker_version(self): + """ str: Version of catalog-worker installed or ``None``""" + return self[ANSIBLE_CATALOG_WORKER_PKG].version if ANSIBLE_CATALOG_WORKER_PKG in self else None diff --git a/insights/combiners/ceph_version.py b/insights/combiners/ceph_version.py index 65c77db65c..1d6ff38065 100644 --- a/insights/combiners/ceph_version.py +++ b/insights/combiners/ceph_version.py @@ -6,30 +6,13 @@ the ``CephVersion``, ``CephInsights`` and ``CephReport`` parsers. The order from most preferred to least preferred is `CephVersion``, ``CephInsights``, ``CephReport``. -Examples: - >>> type(cv) - - >>> cv.version - '3.2' - >>> cv.major - '3' - >>> cv.minor - '2' - >>> cv.downstream_release - '0' - >>> cv.upstream_version["release"] - 12 - >>> cv.upstream_version["major"] - 2 - >>> cv.upstream_version["minor"] - 8 """ from insights import combiner -from insights.parsers.ceph_version import CephVersion as CephV +from insights.parsers.ceph_version import (CephVersion as CephV, + get_community_version, get_ceph_version) from insights.parsers.ceph_insights import CephInsights from insights.parsers.ceph_cmd_json_parsing import CephReport -from insights.core.context import Context @combiner([CephV, CephInsights, CephReport]) @@ -37,7 +20,37 @@ class CephVersion(object): """ Combiner for Ceph Version information. It uses the results of the ``CephVersion``, ``CephInsights`` and ``CephReport`` parsers. - The order from most preferred to least preferred is `CephVersion``, ``CephInsights``, ``CephReport``. + + The prefered parsing order is `CephVersion``, ``CephInsights``, ``CephReport``. + + Attributes: + version (str): The Red Hat release version + major (str): The major version of Red Hat release version + minor (str): The minor version of Red Hat release version + is_els (boolean): If the verion in 'Extended life cycle support (ELS) add-on' phase + downstream_release (str): The downstream release info + upstream_version (dict): The detailed upstream version info with the + following keys `release (int)`, `major (int)` and `minor (int)`. + + Examples: + >>> type(cv) + + >>> cv.version + '3.2' + >>> cv.major + '3' + >>> cv.minor + '2' + >>> cv.is_els + False + >>> cv.downstream_release + '0' + >>> cv.upstream_version["release"] + 12 + >>> cv.upstream_version["major"] + 2 + >>> cv.upstream_version["minor"] + 8 """ def __init__(self, cv, ci, cr): @@ -45,21 +58,23 @@ def __init__(self, cv, ci, cr): self.version = cv.version self.major = cv.major self.minor = cv.minor + self.is_els = cv.is_els self.downstream_release = cv.downstream_release self.upstream_version = cv.upstream_version elif ci: - context = Context(content=ci.data["version"]["full"].strip().splitlines()) - cv = CephV(context) - self.version = cv.version - self.major = cv.major - self.minor = cv.minor - self.downstream_release = cv.downstream_release - self.upstream_version = cv.upstream_version + community_full = get_community_version(ci.data["version"]["full"].strip()) + cv = get_ceph_version(community_full) + self.version = cv.get('version') + self.major = cv.get('major') + self.minor = cv.get('minor') + self.is_els = cv.get('els', False) + self.downstream_release = cv.get('downstream_release') + self.upstream_version = cv.get('upstream_version') else: - context = Context(content=cr["version"].strip().splitlines()) - cv = CephV(context) - self.version = cv.version - self.major = cv.major - self.minor = cv.minor - self.downstream_release = cv.downstream_release - self.upstream_version = cv.upstream_version + cv = get_ceph_version(cr["version"].strip()) + self.version = cv.get('version') + self.major = cv.get('major') + self.minor = cv.get('minor') + self.is_els = cv.get('els', False) + self.downstream_release = cv.get('downstream_release') + self.upstream_version = cv.get('upstream_version') diff --git a/insights/combiners/cloud_instance.py b/insights/combiners/cloud_instance.py new file mode 100644 index 0000000000..4037334b19 --- /dev/null +++ b/insights/combiners/cloud_instance.py @@ -0,0 +1,88 @@ +""" +Cloud Instance +============== + +Combiner for the basic information of a cloud instance. It combines the +results of the following combiners and parsers: + +* :py:class:`insights.combiners.cloud_provider.CloudProvider` +* :py:class:`insights.parsers.aws_instance_id.AWSInstanceIdDoc` +* :py:class:`insights.parsers.azure_instance.AzureInstanceID` +* :py:class:`insights.parsers.azure_instance.AzureInstanceType` +* :py:class:`insights.parsers.gcp_instance_type.GCPInstanceType` +* :py:class:`insights.parsers.subscription_manager.SubscriptionManagerFacts` + +""" +from insights.combiners.cloud_provider import CloudProvider +from insights.core.exceptions import ContentException, SkipComponent +from insights.core.plugins import combiner +from insights.parsers.aws_instance_id import AWSInstanceIdDoc +from insights.parsers.azure_instance import AzureInstanceID, AzureInstanceType +from insights.parsers.gcp_instance_type import GCPInstanceType +from insights.parsers.subscription_manager import SubscriptionManagerFacts + + +@combiner( + CloudProvider, + [ + AWSInstanceIdDoc, + AzureInstanceID, + AzureInstanceType, + GCPInstanceType, + SubscriptionManagerFacts, + ] +) +class CloudInstance(object): + """ + Class to provide the basic information of a cloud instance. + + Attributes: + provider (str): The cloud provider, e.g. "aws", "azure", "ibm", + "google", or "alibaba". It's from the value of + :class:`insights.combiners.cloud_provider.CloudProvider.cloud_provider` + id (str): The ID of the cloud instance + type (str): The type of the cloud instance. + Different cloud providers have different illustration of the + `type` and `size`, here for this Combiner, we treat the `type` and + `size` as the same. E.g.:: + + - "Standard_L64s_v2" for Azure + - "x1.16xlarge" for AWS + - "m1-megamem-96" for GCP + + size (str): Alias of the `type` + + Examples: + >>> ci.provider + 'aws' + >>> ci.id == 'i-1234567890abcdef0' + True + >>> ci.type == 't2.micro' + True + >>> ci.size == 't2.micro' + True + """ + def __init__(self, cp, aws=None, azure_id=None, azure_type=None, + gcp=None, facts=None): + self.provider = cp.cloud_provider + self.id = None + # 1. Get from the Cloud REST API at first + if aws: + self.id = aws.get('instanceId') + self.type = aws.get('instanceType') + elif azure_id and azure_type: + self.id = azure_id.id + self.type = azure_type.raw + elif gcp: + self.type = gcp.raw + # 2. Check the "subscription-manager facts" + if self.id is None and facts: + key = "{0}_instance_id".format(self.provider) + if key not in facts: + raise ContentException("Unmatched/unsupported types!") + self.id = facts[key] + # The instance id is the key attribute of this Combiner + if self.id is None: + raise SkipComponent + # 'size' is the alias of 'type' + self.size = self.type diff --git a/insights/combiners/cloud_provider.py b/insights/combiners/cloud_provider.py index fdccf278ad..67a12a3650 100644 --- a/insights/combiners/cloud_provider.py +++ b/insights/combiners/cloud_provider.py @@ -4,9 +4,10 @@ Combiner for Cloud information. It uses the results of the multiple parsers: -* :class:`InstalledRpms`, -* :class:`YumRepoList` and -* :class:`DMIDecode` parsers +* :py:class:`insights.parsers.installed_rpms.InstalledRpms` +* :py:class:`insights.parsers.yum.YumRepoList` +* :py:class:`insights.parsers.dmidecode.DMIDecode` +* :py:class:`insights.parsers.rhsm_conf.RHSMConf` The combiner uses these parsers determine the Cloud Provider based on a set of criteria that is unique to each cloud provider. @@ -14,21 +15,25 @@ Examples: >>> cp_aws.cloud_provider 'aws' - >>> cp_aws.cp_bios_version == {'aws': '4.2.amazon', 'google': '', 'azure': '', 'alibaba': ''} + >>> cp_aws.cp_bios_version['aws'] == '4.2.amazon' True - >>> cp_aws.cp_rpms == {'aws': ['rh-amazon-rhui-client-2.2.124-1.el7'], 'google': [], 'azure': [], 'alibaba': []} + >>> cp_aws.cp_rpms['aws'] == ['rh-amazon-rhui-client-2.2.124-1.el7'] True >>> cp_aws.cp_uuid['aws'] 'EC2F58AF-2DAD-C57E-88C0-A81CB6084290' + >>> cp_aws.long_name + 'Amazon Web Services' >>> cp_azure.cloud_provider 'azure' - >>> cp_azure.cp_yum == {'aws': [], 'google': [], 'azure': ['rhui-microsoft-azure-rhel7-2.2-74'], 'alibaba': []} + >>> cp_azure.cp_yum['azure'] == ['rhui-microsoft-azure-rhel7-2.2-74'] True >>> cp_azure.cp_asset_tag['azure'] '7783-7084-3265-9085-8269-3286-77' >>> cp_alibaba.cloud_provider 'alibaba' - >>> cp_alibaba.cp_manufacturer == {'aws': '', 'google': '', 'azure': '', 'alibaba': 'Alibaba Cloud'} + >>> cp_alibaba.cp_manufacturer['alibaba'] == 'Alibaba Cloud' + True + >>> cp_ibm.cp_rhsm_server_hostname['ibm'] == 'host.networklayer.com' True """ @@ -37,10 +42,244 @@ from insights.parsers.installed_rpms import InstalledRpms from insights.parsers.dmidecode import DMIDecode from insights.parsers.yum import YumRepoList -from collections import namedtuple +from insights.parsers.rhsm_conf import RHSMConf + + +class CloudProviderInstance(object): + """ + Class to represent a base cloud provider instance + + Use this base class to derive new cloud provider classes. In each new cloud + provider class set the particular values that will be used to detect that + particular cloud provider. + + Attributes: + + rpm (str): RPM string in lowercase to use when searching for this cloud provider. + yum (str): Yum repo name string in lowercase to use when searching for this cloud provider. + bios_vendor_version (str): BIOS vendor version string in lowercase to use when searching + for this cloud provider. + manuf (str): Manufacturer string in lowercase to use when searching for this cloud provider. + asset_tag (str): Asset tag string in lowercase to use when searching for this + cloud provider. + uuid (str): UUID string in lowercase to use when searchinf for this cloud provider. + rhsm_hostname (str): Hostname string in lowercase to use when searching for this + cloud provider in ``rhsm.conf``. + cp_bios_vendor (str): BIOS vendor string value found in search for this cloud provider. + cp_bios_version (str): BIOS version string value found in search for this cloud provider. + cp_rpms (list): List of RPM string values found in search for this cloud provider. + cp_yum (list): List of Yum repo name string values found in search for this cloud provider. + cp_asset_tag (str): Asset tag string value found in search for this cloud provider. + cp_uuid (str): UUID string value found in search for this cloud provider. + cp_manufacturer (str): Manufacturer string value found in search for this cloud provider. + cp_rhsm_server_hostname (str): RHSM server hostname string value found in search for + this cloud provider. + + """ + def __init__(self, rpms=None, dmidcd=None, yum_repos=None, rhsm_cfg=None): + self._rpms = rpms + self._dmidcd = dmidcd + self._yum_repos = yum_repos + self._rhsm_cfg = rhsm_cfg + self.rpm = '' + self.yum = '' + self.bios_vendor_version = '' + self.manuf = '' + self.asset_tag = '' + self.uuid = '' + self.rhsm_hostname = '' + self.cp_bios_vendor = '' + self.cp_bios_version = '' + self.cp_rpms = [] + self.cp_yum = [] + self.cp_asset_tag = '' + self.cp_uuid = '' + self.cp_manufacturer = '' + self.cp_rhsm_server_hostname = '' + + def _get_cp_bios_vendor(self, vendor_version): + """ str: Returns BIOS vendor string if it matches ``vendor_version`` """ + vendor = '' + if self._dmidcd and self._dmidcd.bios: + vendor = ( + self._dmidcd.bios.get('vendor') + if vendor_version and vendor_version in self._dmidcd.bios.get('vendor', '').lower() else '' + ) + return vendor + + def _get_cp_bios_version(self, vendor_version): + """ str: Returns BIOS version string if it matches ``vendor_version`` """ + version = '' + if self._dmidcd and self._dmidcd.bios: + version = ( + self._dmidcd.bios.get('version') + if vendor_version and vendor_version in self._dmidcd.bios.get('version', '').lower() else '' + ) + return version + + def _get_rpm_cp_info(self, rpm): + """ list: Returns list of RPMs matching ``rpm`` """ + found_rpms = [] + if self._rpms: + for key, val in self._rpms.packages.items(): + for v in val: + if rpm and rpm in v.package.lower(): + found_rpms.append(v.package) + return found_rpms + + def _get_cp_from_manuf(self, manuf): + """ str: Returns manufacturer string if it matches ``manuf`` """ + manufacturer = '' + if self._dmidcd and self._dmidcd.system_info: + manufacturer = ( + self._dmidcd.system_info.get('manufacturer') + if manuf == self._dmidcd.system_info.get('manufacturer', '').lower() else '' + ) + return manufacturer + + def _get_cp_from_yum(self, repo_name): + """ list: Returns list of Yum repos matching ``repo_name`` """ + found_repos = [] + if self._yum_repos and hasattr(self._yum_repos, 'data'): + found_repos = [ + repo.get('id').lower() + for repo in self._yum_repos.data + if repo_name and repo_name in repo.get('id', '').lower() + ] + return found_repos + + def _get_cp_from_rhsm_conf(self, rhsm_server_hostname): + """ str: Returns rhsm server hostname string if it matches ``rhsm_server_hostname`` """ + server_hostname = '' + if self._rhsm_cfg and 'server' in self._rhsm_cfg and 'hostname' in self._rhsm_cfg['server']: + hostname = self._rhsm_cfg.get('server', 'hostname') + if hostname and hostname.lower().strip().endswith(rhsm_server_hostname): + server_hostname = hostname + return server_hostname + + def _get_cp_from_asset_tag(self, asset_tag): + """ str: Returns asset tag string if it matches ``asset_tag`` """ + tag = '' + if self._dmidcd and hasattr(self._dmidcd, 'data'): + ch_info = self._dmidcd.data.get('chassis_information', []) + if ch_info: + tag = ch_info[0].get('asset_tag') if asset_tag and asset_tag == ch_info[0].get('asset_tag', '') else '' + return tag + + def _get_cp_from_uuid(self, uuid): + """ str: Returns UUID string if it matches ``uuid`` """ + found_uuid = '' + if self._dmidcd and self._dmidcd.system_info: + found_uuid = ( + self._dmidcd.system_info.get('uuid') + if uuid and self._dmidcd.system_info.get('uuid', '').lower().strip().startswith(uuid) else '' + ) + return found_uuid + + @property + def name(self): + """ str: Short cloud provider class name or ID """ + return self._NAME + + @property + def long_name(self): + """ str: Long cloud provider name """ + return self._LONG_NAME + + +class GoogleCloudProvider(CloudProviderInstance): + """ + Class to identify Google Cloud provider + + Google CP can be identified by RPM and BIOS vendor/version + """ + _NAME = 'gcp' + _LONG_NAME = 'Google Cloud Platform' + + def __init__(self, *args, **kwargs): + super(GoogleCloudProvider, self).__init__(*args, **kwargs) + self.rpm = 'google-rhui-client' + self.bios_vendor_version = 'google' + self.cp_bios_vendor = self._get_cp_bios_vendor(self.bios_vendor_version) + self.cp_bios_version = self._get_cp_bios_version(self.bios_vendor_version) + self.cp_rpms = self._get_rpm_cp_info(self.rpm) -@combiner([InstalledRpms, DMIDecode, YumRepoList]) +class AlibabaCloudProvider(CloudProviderInstance): + """ + Class to identify Alibaba Cloud provider + + Alibaba CP can be identified by manufacturer + """ + _NAME = 'alibaba' + _LONG_NAME = 'Alibaba Cloud' + + def __init__(self, *args, **kwargs): + super(AlibabaCloudProvider, self).__init__(*args, **kwargs) + self.manuf = 'alibaba cloud' + self.cp_manufacturer = self._get_cp_from_manuf(self.manuf) + + +class AmazonCloudProvider(CloudProviderInstance): + """ + Class to identify Amazon Cloud provider + + Amazon CP can be identified by RPM, BIOS verndor/version, + and system UUID + """ + _NAME = 'aws' + _LONG_NAME = 'Amazon Web Services' + + def __init__(self, *args, **kwargs): + super(AmazonCloudProvider, self).__init__(*args, **kwargs) + self.rpm = 'rh-amazon-rhui-client' + self.bios_vendor_version = 'amazon' + self.uuid = 'ec2' + self.asset_tag = 'Amazon EC2' + self.cp_bios_vendor = self._get_cp_bios_vendor(self.bios_vendor_version) + self.cp_bios_version = self._get_cp_bios_version(self.bios_vendor_version) + self.cp_rpms = self._get_rpm_cp_info(self.rpm) + self.cp_uuid = self._get_cp_from_uuid(self.uuid) + self.cp_asset_tag = self._get_cp_from_asset_tag(self.asset_tag) + + +class AzureCloudProvider(CloudProviderInstance): + """ + Class to identify Azure Cloud provider + + Azure CP can be identified by RPM, Yum repo, and system asset tag + """ + _NAME = 'azure' + _LONG_NAME = 'Microsoft Azure' + + def __init__(self, *args, **kwargs): + super(AzureCloudProvider, self).__init__(*args, **kwargs) + self.rpm = 'walinuxagent' + self.yum = 'rhui-microsoft-azure' + self.asset_tag = '7783-7084-3265-9085-8269-3286-77' + self.cp_asset_tag = self._get_cp_from_asset_tag(self.asset_tag) + self.cp_rpms = self._get_rpm_cp_info(self.rpm) + self.cp_yum = self._get_cp_from_yum(self.yum) + + +class IBMCloudProvider(CloudProviderInstance): + """ + Class to identify IBM Cloud provider + + IBM CP can be identified by rhsm.conf server hostname setting + """ + _NAME = 'ibm' + _LONG_NAME = 'IBM Cloud' + + def __init__(self, *args, **kwargs): + super(IBMCloudProvider, self).__init__(*args, **kwargs) + self.rhsm_server_hostname = 'networklayer.com' + self.cp_rpms = self._get_rpm_cp_info(self.rpm) + self.cp_yum = self._get_cp_from_yum(self.yum) + self.cp_rhsm_server_hostname = self._get_cp_from_rhsm_conf(self.rhsm_server_hostname) + + +@combiner([InstalledRpms, DMIDecode, YumRepoList, RHSMConf]) class CloudProvider(object): """ Combiner class to provide cloud vendor facts @@ -67,170 +306,101 @@ class CloudProvider(object): cp_manufacturer (dict): Dictionary containing a value, for each provider, of system information used to determine cloud provider. Provider value will be empty if no matches are found. + cp_rhsm_server_hostname (dict): Dictionary containing a value, for each provider, + of rhsm.conf server hostnames. Value will be empty if no matches are found. cloud_provider (str): String representing the cloud provider that was detected. If none are detected then it will have the default value `None`. """ - - __CP = namedtuple('CP', 'name rpm yum vv manuf') - - __GOOGLE = __CP(name='google', rpm='google-rhui-client', yum='', vv='google', manuf='') - __ALIBABA = __CP(name='alibaba', rpm='', yum='', vv='', manuf='alibaba cloud') - __AWS = __CP(name='aws', rpm='rh-amazon-rhui-client', yum='', vv='amazon', manuf='') - __AZURE = __CP(name='azure', rpm='walinuxagent', yum='rhui-microsoft-azure', vv='', manuf='') - __PROVIDERS = [__GOOGLE, __ALIBABA, __AWS, __AZURE] - - ALIBABA = __ALIBABA.name - """Alibaba Cloud Provider Constant""" - - AWS = __AWS.name - """AWS Cloud Provider Constant""" - - AZURE = __AZURE.name - """AZURE Cloud Provider Constant""" - - GOOGLE = __GOOGLE.name - """GOOGLE Cloud Provider Constant""" - - def __init__(self, rpms, dmidcd, yrl): - - self.cp_bios_vendor = self._get_cp_bios_vendor(dmidcd) - self.cp_bios_version = self._get_cp_bios_version(dmidcd) - self.cp_rpms = self._get_rpm_cp_info(rpms) - self.cp_yum = self._get_cp_from_yum(yrl) - self.cp_asset_tag = self._get_cp_from_asset_tag(dmidcd) - self.cp_uuid = self._get_cp_from_uuid(dmidcd) - self.cp_manufacturer = self._get_cp_from_manuf(dmidcd) + ALIBABA = AlibabaCloudProvider._NAME + """Alibaba Cloud Provider short name""" + + AWS = AmazonCloudProvider._NAME + """AWS Cloud Provider short name""" + + AZURE = AzureCloudProvider._NAME + """AZURE Cloud Provider short name""" + + GOOGLE = GoogleCloudProvider._NAME + """GOOGLE Cloud Provider short name""" + + IBM = IBMCloudProvider._NAME + """IBM Cloud Provider short name""" + + # Add any new cloud provider classes to this list + _CLOUD_PROVIDER_CLASSES = [ + GoogleCloudProvider, + AlibabaCloudProvider, + AmazonCloudProvider, + AzureCloudProvider, + IBMCloudProvider, + ] + + def __init__(self, rpms, dmidcd, yrl, rhsm_cfg): + self._cp_objects = dict([ + (cls._NAME, cls(rpms=rpms, dmidcd=dmidcd, yum_repos=yrl, rhsm_cfg=rhsm_cfg)) + for cls in self._CLOUD_PROVIDER_CLASSES + ]) + self.cp_bios_vendor = dict([(name, cp.cp_bios_vendor) for name, cp in self._cp_objects.items()]) + self.cp_bios_version = dict([(name, cp.cp_bios_version) for name, cp in self._cp_objects.items()]) + self.cp_rpms = dict([(name, cp.cp_rpms) for name, cp in self._cp_objects.items()]) + self.cp_yum = dict([(name, cp.cp_yum) for name, cp in self._cp_objects.items()]) + self.cp_asset_tag = dict([(name, cp.cp_asset_tag) for name, cp in self._cp_objects.items()]) + self.cp_uuid = dict([(name, cp.cp_uuid) for name, cp in self._cp_objects.items()]) + self.cp_manufacturer = dict([(name, cp.cp_manufacturer) for name, cp in self._cp_objects.items()]) + self.cp_rhsm_server_hostname = dict([(name, cp.cp_rhsm_server_hostname) for name, cp in self._cp_objects.items()]) self.cloud_provider = self._select_provider() - def _provider_init_list(self): - prov = {} - for p in CloudProvider.__PROVIDERS: - prov[p.name] = [] - return prov - - def _provider_init_str(self): - prov = {} - for p in CloudProvider.__PROVIDERS: - prov[p.name] = '' - return prov - def _select_provider(self): - - if any(value for value in self.cp_bios_vendor.values()): - return ( - self.__AWS.name if (self.cp_bios_vendor['aws'] and - self.__AWS.vv in self.cp_bios_vendor['aws'].lower()) - else self.__GOOGLE.name if (self.cp_bios_vendor['google'] and - self.__GOOGLE.vv in self.cp_bios_vendor['google'].lower()) - else self.__AZURE.name if (self.cp_bios_vendor['azure'] and self.__AZURE.vv in - self.cp_bios_vendor['azure'].lower()) - else None - ) - - if any(value for value in self.cp_bios_version.values()): - return ( - self.__AWS.name if (self.cp_bios_version['aws'] and - self.__AWS.vv in self.cp_bios_version['aws'].lower()) - else self.__GOOGLE.name if (self.cp_bios_version['google'] and - self.__GOOGLE.vv in self.cp_bios_version['google'].lower()) - else self.__AZURE.name if (self.cp_bios_version['azure'] and - self.__AZURE.vv in self.cp_bios_version['azure'].lower()) - else None - ) - - if any(value for value in self.cp_rpms.values()): - return ( - self.__AWS.name if self.cp_rpms[CloudProvider.AWS] - else self.__GOOGLE.name if self.cp_rpms[CloudProvider.GOOGLE] - else self.__AZURE.name if self.cp_rpms[CloudProvider.AZURE] - else None - ) - - if self.cp_yum[CloudProvider.AZURE]: - return CloudProvider.AZURE - - if self.cp_asset_tag[CloudProvider.AZURE]: - return CloudProvider.AZURE - - if self.cp_uuid[CloudProvider.AWS]: - return CloudProvider.AWS - - if self.cp_manufacturer[CloudProvider.ALIBABA]: - return CloudProvider.ALIBABA - - def _get_rpm_cp_info(self, rpms): - - prov = self._provider_init_list() - - if rpms: - for p in self.__PROVIDERS: - for key, val in rpms.packages.items(): - for v in val: - prov[p.name].append(v.package) if p.rpm and p.rpm in v.package.lower() else prov - - return prov - - def _get_cp_from_yum(self, yrl): - - prov = self._provider_init_list() - - if yrl and hasattr(yrl, 'data'): - for p in self.__PROVIDERS: - for yval in yrl.data: - prov[p.name].append(yval.get('id').lower()) \ - if p.yum and p.yum in yval.get('id').lower() \ - else prov - - return prov - - def _get_cp_from_asset_tag(self, dmidcd): - - prov = self._provider_init_str() - - if dmidcd and hasattr(dmidcd, 'data'): - ch_info = dmidcd.data.get('chassis_information') - if ch_info: - asset_tag = ch_info[0].get('asset_tag') - prov['azure'] = asset_tag if asset_tag == '7783-7084-3265-9085-8269-3286-77' else '' - return prov - - def _get_cp_bios_vendor(self, dmidcd): - - prov = self._provider_init_str() - - if dmidcd and dmidcd.bios: - for p in self.__PROVIDERS: - prov[p.name] = dmidcd.bios.get('vendor') if p.vv and p.vv in dmidcd.bios.get('vendor').lower() \ - else '' - return prov - - def _get_cp_bios_version(self, dmidcd): - - prov = self._provider_init_str() - - if dmidcd and dmidcd.bios: - for p in self.__PROVIDERS: - prov[p.name] = dmidcd.bios.get('version') if p.vv and p.vv in dmidcd.bios.get('version').lower() \ - else '' - return prov - - def _get_cp_from_uuid(self, dmidcd): - - prov = self._provider_init_str() - - if dmidcd and dmidcd.bios: - prov['aws'] = dmidcd.system_info.get('uuid') if dmidcd.system_info.get('uuid').lower().startswith('ec2') \ - else '' - return prov - - def _get_cp_from_manuf(self, dmidcd): - - prov = self._provider_init_str() - - if dmidcd and dmidcd.system_info: - prov[CloudProvider.__ALIBABA.name] = ( - dmidcd.system_info.get('manufacturer') - if dmidcd.system_info.get('manufacturer').lower() == CloudProvider.__ALIBABA.manuf - else '' - ) - return prov + """ + This method provides the logic to identify which cloud provider is present. + + If new data sources and/or cloud providers are added you must add logic here to + identify the new cloud provider. + + Returns: + str: Returns the name of the cloud provider, corresponds to ``name`` property + in cloud provider classes. If no cloud provider is identified, ``None`` is returned + """ + # Check bios vendor first + if self._cp_objects[self.AWS].cp_bios_vendor: + return self.AWS + elif self._cp_objects[self.GOOGLE].cp_bios_vendor: + return self.GOOGLE + elif self._cp_objects[self.AZURE].cp_bios_vendor: + return self.AZURE + + # Specific vendor not detected, so check bios version + if self._cp_objects[self.AWS].cp_bios_version: + return self.AWS + elif self._cp_objects[self.GOOGLE].cp_bios_version: + return self.GOOGLE + elif self._cp_objects[self.AZURE].cp_bios_version: + return self.AZURE + + # BIOS vendor and version not detected check for RPMs + if self._cp_objects[self.AWS].cp_rpms: + return self.AWS + elif self._cp_objects[self.GOOGLE].cp_rpms: + return self.GOOGLE + elif self._cp_objects[self.AZURE].cp_rpms: + return self.AZURE + + # No luck, check for other attributes + if self._cp_objects[self.AZURE].cp_yum or self._cp_objects[self.AZURE].cp_asset_tag: + return self.AZURE + + if self._cp_objects[self.AWS].cp_uuid and self._cp_objects[self.AWS].cp_asset_tag: + return self.AWS + + if self._cp_objects[self.ALIBABA].cp_manufacturer: + return self.ALIBABA + + if self._cp_objects[self.IBM].cp_rhsm_server_hostname: + return self.IBM + + return None + + @property + def long_name(self): + """ str: Return long name for the specific cloud provider, or ``None`` if no cloud provider """ + return self._cp_objects[self.cloud_provider].long_name if self.cloud_provider is not None else None diff --git a/insights/combiners/cpu_vulns_all.py b/insights/combiners/cpu_vulns_all.py index e7f7525608..e8e33aec03 100644 --- a/insights/combiners/cpu_vulns_all.py +++ b/insights/combiners/cpu_vulns_all.py @@ -4,10 +4,9 @@ This combiner provides an interface to CPU vulnerabilities parsers for cpu vulnerabilities """ - +from insights.core.exceptions import SkipComponent from insights.core.plugins import combiner from insights.parsers.cpu_vulns import CpuVulns -from insights.parsers import SkipComponent @combiner(CpuVulns) diff --git a/insights/combiners/crio_conf.py b/insights/combiners/crio_conf.py new file mode 100644 index 0000000000..252b395dcc --- /dev/null +++ b/insights/combiners/crio_conf.py @@ -0,0 +1,156 @@ +""" +crio configuration +================== +The crio files are normally available to rules as a list of CrioConf objects. +""" + +from insights.contrib.ConfigParser import NoOptionError, NoSectionError +from insights.core.plugins import combiner +from insights.parsers.crio_conf import CrioConf + + +@combiner(CrioConf) +class AllCrioConf(object): + """ + Combiner for accessing all the crio configuration files. There may be multi + files for crio configuration, and the main config file is crio.conf. In the + situation that the same section is both in crio.conf and other configuration + files, the item in crio.conf has the lowest precedence. Files in the + directory,'/etc/crio/crio.conf.d/', are sorted by name in lexical order and + applied in that order. If multiple configuration files specify the same + configuration option the setting specified in the file sorted last takes + precedence over any other value. This combiner will parse all the CrioConf + objects and return a dictionary containing all valid data. + + Sample files:: + + /etc/crio/crio.conf: + + [crio] + storage_driver = "overlay" + storage_option = [ + "overlay.override_kernel_check=1", + ] + + [crio.runtime] + selinux = true + + [crio.network] + plugin_dirs = [ + "/usr/libexec/cni", + ] + [crio.metrics] + + /etc/crio/crio.conf.d/00-conmon.conf + + [crio] + internal_wipe = true + storage_driver = "device mapper" + + /etc/crio/crio.conf.d/99-conmon.conf + + [crio] + storage_driver = "overlay2" + + [crio.api] + stream_address = "" + stream_port = "10010" + + [crio.runtime] + selinux = true + conmon = "" + conmon_cgroup = "pod" + default_env = [ + "NSS_SDB_USE_CACHE=no", + ] + log_level = "info" + cgroup_manager = "systemd" + + Examples: + >>> all_crio_conf.sections() + ['crio', 'crio.runtime', 'crio.api', 'crio.network', 'crio.metrics'] + >>> all_crio_conf.options('crio.api') + ['stream_address', 'stream_port'] + >>> all_crio_conf.files + ['/etc/crio/crio.conf', '/etc/crio/crio.conf.d/00-conmon.conf', + '/etc/crio/crio.conf.d/99-conmon.conf'] + >>> all_crio_conf.get('crio', 'storage_driver') + '"overlay2"' + + Attributes: + files (list): The list of configuration file names. + """ + def __init__(self, crio_confs): + self.data = {} + self.files = [] + conf_d_data = [] + + def dict_merge(dest, src): + if not src: + return + + for section in src.sections(): + if section not in dest: + dest[section] = {} + + for option in src.items(section): + dest[section][option] = src.get(section, option) + + for crio_conf in crio_confs: + self.files.append(crio_conf.file_path) + if crio_conf.file_path == "/etc/crio/crio.conf": + dict_merge(self.data, crio_conf) + else: + conf_d_data.append(crio_conf) + + conf_d_data.sort(key=lambda e: e.file_name) + for crio_conf in conf_d_data: + dict_merge(self.data, crio_conf) + + super(AllCrioConf, self).__init__() + + def get(self, section, option): + """ + Args: + section (str): The section str to search for. + option (str): The option str to search for. + + Returns: + str: Returns the value of the option in the specified section. + """ + if section not in self.data: + raise NoSectionError(section) + + header = self.data.get(section) + if option not in header: + raise NoOptionError(section, option) + + return header.get(option) + + def sections(self): + """ + Return a list of section names. + """ + return list(self.data.keys()) + + def has_section(self, section): + """ + Indicate whether the named section is present in the configuration. + Return True if the given section is present, and False if not present. + """ + return section in self.data + + def options(self, section): + """ + Return a list of option names for the given section name. + """ + return list(self.data[section].keys()) if self.has_section(section) else [] + + def has_option(self, section, option): + """ + Check for the existence of a given option in a given section. + Return True if the given option is present, and False if not present. + """ + if section not in self.data: + return False + return option in self.data[section] diff --git a/insights/combiners/cryptsetup.py b/insights/combiners/cryptsetup.py new file mode 100644 index 0000000000..717750738a --- /dev/null +++ b/insights/combiners/cryptsetup.py @@ -0,0 +1,56 @@ +""" +Cryptsetup - combine metadata about LUKS devices +================================================ + +Combine outputs of LuksDump and LuksMeta parsers (with the same UUID) into a +single dictionary. +""" + +import copy + +from insights import SkipComponent +from insights.core.plugins import combiner +from insights.parsers.cryptsetup_luksDump import LuksDump +from insights.parsers.luksmeta import LuksMeta + + +@combiner(LuksDump, optional=[LuksMeta]) +class LuksDevices(list): + """ + Combiner for LUKS encrypted devices information. It uses the results of + the ``LuksDump`` and ``LuksMeta`` parser (they are matched based UUID of + the device they were collected from). + + + Examples: + >>> luks_devices[0]["header"]["Version"] + '1' + >>> "luksmeta" in luks_devices[0] + True + >>> "luksmeta" in luks_devices[1] + False + >>> luks_devices[0]["luksmeta"][0] + Keyslot on index 0 is 'active' with no embedded metadata + """ + + def __init__(self, luks_dumps, luks_metas): + luksmeta_by_uuid = {} + + if luks_metas: + for luks_meta in luks_metas: + if "device_uuid" not in luks_meta: + continue + + luksmeta_by_uuid[luks_meta["device_uuid"].lower()] = luks_meta + + for luks_dump in luks_dumps: + uuid = luks_dump.dump["header"]["UUID"].lower() + luks_dump_copy = copy.deepcopy(luks_dump.dump) + + if luks_metas and uuid in luksmeta_by_uuid: + luks_dump_copy["luksmeta"] = luksmeta_by_uuid[uuid] + + self.append(luks_dump_copy) + + if not self: + raise SkipComponent diff --git a/insights/combiners/du.py b/insights/combiners/du.py new file mode 100644 index 0000000000..f6c8d74371 --- /dev/null +++ b/insights/combiners/du.py @@ -0,0 +1,43 @@ +""" +Disk Usage +========== + +Combiners for gathering information from du parsers. +""" + +from insights import combiner +from insights.parsers.du import DiskUsageDir + + +@combiner(DiskUsageDir) +class DiskUsageDirs(dict): + """ + Combiner for the :class:`insights.parsers.du.DiskUsageDir` parser. + + The parser is multioutput, one parser instance for each directory disk + usage. This combiner puts all of them back together and presents them as a + dict where the keys are the directory names and the space usage are the + values. + + Sample input data for du commands as parsed by the parsers:: + + # Output of the command: + # /bin/du -s -k /var/log + 553500 /var/log + + # Output of the command: + # /bin/du -s -k /var/lib/pgsql + 519228 /var/lib/pgsql + + Examples: + >>> type(disk_usage_dirs) + + >>> sorted(disk_usage_dirs.keys()) + ['/var/lib/pgsql', '/var/log'] + >>> disk_usage_dirs['/var/lib/pgsql'] + 519228 + """ + def __init__(self, du_dirs): + super(DiskUsageDirs, self).__init__() + for du in du_dirs: + self.update(du) diff --git a/insights/combiners/grub_conf.py b/insights/combiners/grub_conf.py index ed6a06c384..0d2b83a04c 100644 --- a/insights/combiners/grub_conf.py +++ b/insights/combiners/grub_conf.py @@ -1,7 +1,7 @@ """ GrubConf - The valid GRUB configuration ======================================= -Combiner for Red Hat Grub v1 and Grub v2 information. +Combiner for Red Hat Grub v1 Grub v2, and BLS information. This combiner uses the parsers: :class:`insights.parsers.grub_conf.Grub1Config`, @@ -9,38 +9,32 @@ :class:`insights.parsers.grub_conf.Grub2Config`, :class:`insights.parsers.grub_conf.Grub2EFIConfig`, and :class:`insights.parsers.grub_conf.BootLoaderEntries`. +:class:`insights.parsers.grub_env.GrubEnv`. -It determines which parser was used by checking one of the follwing +It determines which parser was used by checking one of the following parsers/combiners: :class:`insights.parsers.installed_rpms.InstalledRpms`, :class:`insights.parsers.cmdline.CmdLine`, :class:`insights.parsers.ls_sys_firmware.LsSysFirmware`, and -:class:`insights.combiners.redhat_release.RedHatRelease`. - - """ +import re +from insights import SkipComponent from insights.core.plugins import combiner -from insights.combiners.redhat_release import RedHatRelease -from insights.parsers.grub_conf import BootEntry, get_kernel_initrds -from insights.parsers.grub_conf import Grub1Config, Grub1EFIConfig -from insights.parsers.grub_conf import Grub2Config, Grub2EFIConfig -from insights.parsers.grub_conf import BootLoaderEntries as BLE -from insights.parsers.ls_sys_firmware import LsSysFirmware -from insights.parsers.installed_rpms import InstalledRpms from insights.parsers.cmdline import CmdLine -from insights import SkipComponent +from insights.parsers.grub_conf import (get_kernel_initrds, BootEntry, Grub1Config, Grub1EFIConfig, Grub2Config, + Grub2EFIConfig, BootLoaderEntries as BLE) +from insights.parsers.grubenv import GrubEnv +from insights.parsers.installed_rpms import InstalledRpms +from insights.parsers.ls_sys_firmware import LsSysFirmware -@combiner(BLE, optional=[LsSysFirmware]) +@combiner(BLE, optional=[GrubEnv, LsSysFirmware]) class BootLoaderEntries(object): """ Combine all :class:`insights.parsers.grub_conf.BootLoaderEntries` parsers into one Combiner - Raises: - SkipComponent: when no any BootLoaderEntries Parsers. - Attributes: version (int): The version of the GRUB configuration, 1 or 2 is_efi (bool): If the host is boot with EFI @@ -50,30 +44,68 @@ class BootLoaderEntries(object): kernel_initrds (dict): Dict of the `kernel` and `initrd` files referenced in GRUB configuration files is_kdump_iommu_enabled (bool): If any kernel entry contains "intel_iommu=on" + + Raises: + SkipComponent: when no any BootLoaderEntries Parsers. """ - def __init__(self, grub_bles, sys_firmware): + def __init__(self, grub_bles, grubenv, sys_firmware): self.version = self._version = 2 self.is_efi = self._efi = '/sys/firmware/efi' in sys_firmware if sys_firmware else False self.entries = [] self.boot_entries = [] self.is_kdump_iommu_enabled = False + for ble in grub_bles: - self.entries.append(ble.entry) - self.boot_entries.append(BootEntry({'name': ble.title, 'cmdline': ble.cmdline})) + # Make a copy of the ble entry, so that no write + # backs occur below when expanding variables. + self.entries.append(ble.entry.copy()) + self.boot_entries.append(BootEntry({'name': ble.title, 'cmdline': ble.cmdline, + 'version': ble.entry.get('version')})) self.is_kdump_iommu_enabled = self.is_kdump_iommu_enabled or ble.is_kdump_iommu_enabled + + # If grub_bles and grubenv expand the $kernelopts, + # $tuned_params, and $tuned_initrd variables. + if grub_bles and grubenv: + for entry in self.entries: + entry_options = entry.get('options', "") + if "$kernelopts" in entry_options or "$tuned_params" in entry_options: + entry['options'] = re.sub("\\$kernelopts", grubenv.kernelopts, + entry['options']).strip() + entry['options'] = re.sub("\\$tuned_params", grubenv.tuned_params, + entry['options']).strip() + + if "$tuned_initrd" in entry.get('initrd', "") and grubenv.get('tuned_initrd'): + entry['initrd'] = re.sub("\\$tuned_initrd", grubenv.get('tuned_initrd', ""), + entry['initrd']).strip() + + for entry in self.boot_entries: + entry_options = entry.get('cmdline', "") + if "$kernelopts" in entry_options or "$tuned_params" in entry_options: + entry['cmdline'] = re.sub("\\$kernelopts", grubenv.kernelopts, entry['cmdline']).strip() + entry['cmdline'] = re.sub("\\$tuned_params", grubenv.tuned_params, entry['cmdline']).strip() + self.kernel_initrds = get_kernel_initrds(self.entries) if not self.entries: raise SkipComponent() -@combiner([Grub1Config, Grub2Config, - Grub1EFIConfig, Grub2EFIConfig, - BootLoaderEntries], - optional=[InstalledRpms, CmdLine, LsSysFirmware, RedHatRelease]) +@combiner([Grub1Config, Grub2Config, Grub1EFIConfig, Grub2EFIConfig, BootLoaderEntries], + optional=[InstalledRpms, CmdLine, LsSysFirmware]) class GrubConf(object): """ - Process Grub configuration v1 or v2 based on which type is passed in. + Process Grub configuration v1, v2, and BLS based on which type is passed in. + + Attributes: + version (int): returns 1 or 2, version of the GRUB configuration + is_efi (bool): returns True if the host is boot with EFI + kernel_initrds (dict): returns a dict of the `kernel` and `initrd` + files referenced in GRUB configuration files + is_kdump_iommu_enabled (bool): returns True if any kernel entry + contains "intel_iommu=on" + + Raises: + Exception: when cannot find any valid grub configuration. Examples: >>> type(grub_conf) @@ -89,51 +121,37 @@ class GrubConf(object): False >>> grub_conf.get_grub_cmdlines('') [] - - Raises: - Exception: when cannot find any valid grub configuration. - - Attributes: - version (int): returns 1 or 2, version of the GRUB configuration - is_efi (bool): returns True if the host is boot with EFI - kernel_initrds (dict): returns a dict of the `kernel` and `initrd` - files referenced in GRUB configuration files - is_kdump_iommu_enabled (bool): returns True if any kernel entry - contains "intel_iommu=on" """ - def __init__(self, grub1, grub2, grub1_efi, grub2_efi, grub_bles, - rpms, cmdline, sys_firmware, rh_rel): - + rpms, cmdline, sys_firmware): self.version = self.is_kdump_iommu_enabled = None self.grub = self.kernel_initrds = None - _grubs = list(filter(None, [grub1, grub2, grub1_efi, grub2_efi, grub_bles])) - # Check if `/sys/firmware/efi` exist? self.is_efi = '/sys/firmware/efi' in sys_firmware if sys_firmware else False + _grubs = list(filter(None, [grub1, grub2, grub1_efi, grub2_efi, grub_bles])) if len(_grubs) == 1: self.grub = _grubs[0] self.is_efi = self.is_efi if sys_firmware else self.grub._efi else: _grub1, _grub2 = (grub1_efi, grub2_efi) if self.is_efi else (grub1, grub2) - if rh_rel and rh_rel.rhel8: + if grub_bles and _grub2 and 'blscfg' in _grub2.get('configs', ''): self.grub = grub_bles # Check grub version via installed-rpms else: if rpms: # grub1 - if 'grub2' not in rpms and 'grub' in rpms and _grub1 is not None: + if 'grub2' not in rpms and 'grub' in rpms and _grub1: self.grub = _grub1 # grub2 - if 'grub' not in rpms and 'grub2' in rpms and _grub2 is not None: + if 'grub' not in rpms and 'grub2' in rpms and _grub2: self.grub = _grub2 # Check grub version via the booted CmdLine if self.grub is None and cmdline: # grub1 - if "BOOT_IMAGE" not in cmdline or 'rd_LVM_LV' in cmdline: + if "BOOT_IMAGE" not in cmdline: self.grub = _grub1 # grub2 - if "BOOT_IMAGE" in cmdline or 'rd.lvm.lv' in cmdline: + if "BOOT_IMAGE" in cmdline: self.grub = _grub2 if self.grub: diff --git a/insights/combiners/hostname.py b/insights/combiners/hostname.py index 186410b38f..1c95bd4582 100644 --- a/insights/combiners/hostname.py +++ b/insights/combiners/hostname.py @@ -3,26 +3,23 @@ ======== Combiner for ``hostname`` information. It uses the results of all the -``Hostname`` parsers, ``Facter`` and the ``SystemID`` parser to get the fqdn, +``Hostname`` parsers and the ``SystemID`` parser to get the fqdn, hostname and domain information. """ - from insights.core.plugins import combiner from insights.core.serde import deserializer, serializer from insights.parsers.hostname import Hostname as HnF, HostnameShort as HnS, HostnameDefault as HnD -from insights.parsers.facter import Facter from insights.parsers.systemid import SystemID -from insights.util import deprecated -@combiner([HnF, HnD, HnS, Facter, SystemID]) +@combiner([HnF, HnD, HnS, SystemID]) class Hostname(object): """ - Check hostname, facter and systemid to get the fqdn, hostname and domain. + Check hostname and systemid to get the fqdn, hostname and domain. - Prefer hostname to facter and systemid. + Prefer hostname to systemid. Examples: >>> type(hostname) @@ -37,19 +34,16 @@ class Hostname(object): Raises: Exception: If no hostname can be found in any of the source parsers. """ - def __init__(self, hf, hd, hs, ft, sid): + def __init__(self, hf, hd, hs, sid): self.fqdn = self.hostname = self.domain = None - if hf or hs or hd or ft: - hn = hf or hs or hd or ft + if hf or hs or hd: + hn = hf or hs or hd self.hostname = self.fqdn = hn.hostname self.domain = '' if hf and hf.fqdn: self.fqdn = hf.fqdn self.domain = hf.domain - elif ft and ft.fqdn: - self.fqdn = ft.fqdn - self.domain = ft.domain if ft.domain else ".".join(self.fqdn.split(".")[1:]) else: self.fqdn = sid.get("profile_name") if self.fqdn: @@ -60,36 +54,6 @@ def __init__(self, hf, hd, hs, ft, sid): raise Exception("Unable to get hostname.") -@combiner([HnF, HnD, HnS, Facter, SystemID]) -def hostname(hf, hd, hs, ft, sid): - """ - .. warning:: - This combiner methode is deprecated, please use - :py:class:`insights.combiners.hostname.Hostname` instead. - - Check hostname, facter and systemid to get the fqdn, hostname and domain. - - Prefer hostname to facter and systemid. - - Examples: - >>> hn.fqdn - 'rhel7.example.com' - >>> hn.hostname - 'rhel7' - >>> hn.domain - 'example.com' - - Returns: - insights.combiners.hostname.Hostname: A class with `fqdn`, - `hostname` and `domain` attributes. - - Raises: - Exception: If no hostname can be found in any of the source parsers. - """ - deprecated(hostname, "Use the `Hostname` class instead.") - return Hostname(hf, hd, hs, ft, sid) - - @serializer(Hostname) def serialize(obj, root=None): return {"fqdn": obj.fqdn, "hostname": obj.hostname, "domain": obj.domain} diff --git a/insights/combiners/httpd_conf.py b/insights/combiners/httpd_conf.py index a72e40f491..d6ba898cd0 100644 --- a/insights/combiners/httpd_conf.py +++ b/insights/combiners/httpd_conf.py @@ -10,349 +10,13 @@ At this point in time, you should **NOT** filter the httpd configurations to avoid finding directives in incorrect sections. """ -import six -import string -from insights.contrib.ipaddress import ip_address, ip_network -from collections import namedtuple - -from insights import run -from insights.core import ConfigCombiner, ConfigParser -from insights.core.plugins import combiner, parser -from insights.parsr.query import (Directive, Entry, pred, pred2, Section, - startswith) -from insights.parsr import (Char, EOF, EOL, EndTagName, Forward, FS, GT, InSet, - Literal, LT, Letters, Lift, LineEnd, Many, Number, OneLineComment, - PosMarker, QuotedString, skip_none, StartTagName, String, WS, WSChar) -from insights.parsers.httpd_conf import HttpdConf, dict_deep_merge, ParsedData -from insights.specs import Specs -from insights.util import deprecated +from insights.core import ConfigCombiner +from insights.core.plugins import combiner +from insights.parsr.query import startswith +from insights.parsers.httpd_conf import HttpdConf, HttpdConfSclHttpd24, HttpdConfSclJbcsHttpd24 @combiner(HttpdConf) -class HttpdConfAll(object): - """ - .. warning:: - This combiner class is deprecated, please use - :py:class:`insights.combiners.httpd_conf.HttpdConfTree` instead. - - A combiner for parsing all httpd configurations. It parses all sources and makes a composition - to store actual loaded values of the settings as well as information about parsed configuration - files and raw values. - - Note: - ``ParsedData`` is a named tuple with the following properties: - - ``value`` - the value of the option. - - ``line`` - the complete line as found in the config file. - - ``section`` - the section type that the option belongs to. - - ``section_name`` - the section name that the option belongs to. - - ``file_name`` - the config file name. - - ``file_path`` - the complete config file path. - - ``ConfigData`` is a named tuple with the following properties: - - ``file_name`` - the config file name. - - ``file_path`` - the complete config file path. - - ``data_dict`` - original data dictionary from parser. - - Attributes: - data (dict): Dictionary of parsed settings in format {option: [ParsedData, ParsedData]}. - It stores a list of parsed values, usually only the last value is needed, - except situations when directives which can use selective overriding, - such as ``UserDir``, are used. - config_data (list): List of parsed config files in containing ConfigData named tuples. - """ - ConfigData = namedtuple('ConfigData', ['file_name', 'file_path', 'full_data_dict']) - - def __init__(self, httpd_conf): - deprecated(HttpdConfAll, "Import HttpdConfTree from 'insights.combiners.httpd_conf' instead.") - - self.data = {} - self.config_data = [] - - config_files_data = [] - main_config_data = [] - - for httpd_parser in httpd_conf: - file_name = httpd_parser.file_name - file_path = httpd_parser.file_path - - # Flag to be used for different handling of the main config file - main_config = httpd_parser.file_name == 'httpd.conf' - - if not main_config: - config_files_data.append(self.ConfigData(file_name, file_path, - httpd_parser.data)) - else: - main_config_data.append(self.ConfigData(file_name, file_path, - httpd_parser.first_half)) - main_config_data.append(self.ConfigData(file_name, file_path, - httpd_parser.second_half)) - - # Sort configuration files - config_files_data.sort() - - # Add both parts of main configuration file and store as attribute. - # These values can be used when looking for bad settings which are not actually active - # but may become active if other configurations are changed - if main_config_data: - self.config_data = [main_config_data[0]] + config_files_data + [main_config_data[1]] - else: - self.config_data = config_files_data - - # Store active settings - the last parsed value us stored - self.data = {} - for _, _, full_data in self.config_data: - copy_data = full_data.copy() - for option, parsed_data in copy_data.items(): - if isinstance(parsed_data, dict): - if option not in self.data: - self.data[option] = {} - dict_deep_merge(self.data[option], parsed_data) - else: - if option not in self.data: - self.data[option] = [] - self.data[option].extend(parsed_data) - - def get_setting_list(self, directive, section=None): - """ - Returns the parsed data of the specified directive as a list - - Parameters: - directive (str): The directive to look for - section (str or tuple): The section the directive belongs to - - - str: The section type, e.g. "IfModule" - - tuple(section, section_name): e.g. ("IfModule", "prefork") - - Note:: - `section_name` can be ignored or can be a part of the actual name. - - Returns: - (list of dict or named tuple `ParsedData`): - When `section` is not None, returns the list of dict that wraps - the section and the directive's named tuples ParsedData, in - order how they are parsed. - - When `section` is None, returns the list of named tuples - ParsedData, in order how they are parsed. - - If directive or section does not exist, returns empty list. - """ - def _deep_search(data, dr, sc): - """ - Utility function to get search the directive `dr` in the nested - dict - - Parameters: - data (dict): The target dictionary - dr (str): The directive to look for - sc (tuple): The section the directive belongs to - - Returns: - (list of dict): List of dict that wraps the section and the - directive's named tuples ParsedData in order how they are parsed. - """ - result = [] - for d, v in data.items(): - if isinstance(d, tuple): - if d[0] == sc[0] and sc[1] in d[1]: - val = v.get(dr) - if val: - result.append({d: val}) - result.extend(_deep_search(v, dr, sc)) - return result - - if section: - if isinstance(section, str): - section = (section, '') - elif isinstance(section, tuple) and len(section) == 1: - section = (section[0], '') - elif (not isinstance(section, tuple) or (len(section) == 0 or len(section) > 2)): - return [] - return _deep_search(self.data, directive, section) - - return self.data.get(directive, []) - - def get_active_setting(self, directive, section=None): - """ - Returns the parsed data of the specified directive as a list of named tuples. - - Parameters: - directive (str): The directive to look for - section (str or tuple): The section the directive belongs to - - - str: The section type, e.g. "IfModule" - - tuple(section, section_name): e.g. ("IfModule", "prefork") - - Note:: - `section_name` can be ignored or can be a part of the actual name. - - Returns: - (list or named tuple `ParsedData`): - When `section` is not None, returns the list of named tuples - ParsedData, in order how they are parsed. - If directive or section does not exist, returns empty list. - - When `section` is None, returns the named tuple ParsedData of - the directive directly. - If directive or section does not exist, returns None. - - """ - values_list = self.get_setting_list(directive, section) - if section is not None: - if values_list: - for i, val in enumerate(values_list): - # From each section, preserve only the last ParsedData - # {(section, ""): [ParsedData, ParsedData]} ---> ParsedData - values_list[i] = list(val.values())[0][-1] - return values_list - return [] - else: - if values_list: - return values_list[-1] - - def get_section_list(self, section): - """ - Returns the specified sections. - - Parameters: - section (str): The section to look for, e.g. "Directory" - - Returns: - (list of tuple): List of tuples, each tuple has three elements - the - first being a tuple of the section and section name, the second - being the file name of the file where that section resides, - the third being the full file path of the file. Therefore, the result - looks like this: [(('VirtualHost', '192.0.2.1'), '00-z.conf', - '/etc/httpd/conf.d/00-z.conf')] - - If section does not exist, returns empty list. - """ - def _deep_search(data, sc): - """ - Utility function to search for sections in the nested dict - - Parameters: - data (dict): The target dictionary - sc (str): The section the directive belongs to - - Returns: - (list of tuple): List of tuples, each tuple has three elements - the - first being a tuple of the section and section name, the second - being the file name of the file where that section resides, - the third being the full file path of the file. Therefore, the result - looks like this: [(('VirtualHost', '192.0.2.1'), '00-z.conf', - '/etc/httpd/conf.d/00-z.conf')] - """ - result = [] - for d, v in data.items(): - if isinstance(d, tuple): - if d[0] == sc: - # file of the section - sect_file_name = None - sect_file_path = None - for subkey, subvalue in v.items(): - if subvalue and isinstance(subkey, str) and isinstance(subvalue, list) and isinstance(subvalue[0], ParsedData): - # it is a directive, not a section, there's at least one ParsedData - sect_file_name = subvalue[0].file_name - sect_file_path = subvalue[0].file_path - # assuming all directives in this section come from the same file - break - result.append((d, sect_file_name, sect_file_path)) - else: - result.extend(_deep_search(v, sc)) - return result - - if section: - return _deep_search(self.data, section) - - return [] - - -class DocParser(object): - def __init__(self, ctx): - self.ctx = ctx - - Complex = Forward() - Comment = (WS >> OneLineComment("#")).map(lambda x: None) - - Name = String(string.ascii_letters + "_/") - Num = Number & (WSChar | LineEnd) - - StartName = WS >> PosMarker(StartTagName(Letters)) << WS - EndName = WS >> EndTagName(Letters, ignore_case=True) << WS - - Cont = Char("\\") + EOL - AttrStart = Many(WSChar) - AttrEnd = (Many(WSChar) + Cont) | Many(WSChar) - - OpAttr = (Literal("!=") | Literal("<=") | Literal(">=") | InSet("<>")) & WSChar - BareAttr = String(set(string.printable) - (set(string.whitespace) | set("<>'\""))) - Attr = AttrStart >> (Num | QuotedString | OpAttr | BareAttr) << AttrEnd - Attrs = Many(Attr) - - StartTag = (WS + LT) >> (StartName + Attrs) << (GT + WS) - EndTag = (WS + LT + FS) >> EndName << (GT + WS) - - Simple = WS >> (Lift(self.to_directive) * PosMarker(Name) * Attrs) << WS - Stanza = Simple | Complex | Comment | Many(WSChar | EOL, lower=1).map(lambda x: None) - Complex <= (Lift(self.to_section) * StartTag * Many(Stanza).map(skip_none)) << EndTag - Doc = Many(Stanza).map(skip_none) - - self.Top = Doc + EOF - - def typed(self, val): - try: - v = val.lower() - if v in ("on", "yes", "true"): - return True - if v in ("off", "no", "false"): - return False - except: - pass - return val - - def to_directive(self, name, attrs): - attrs = attrs if len(attrs) > 1 else [self.typed(a) for a in attrs] - return Directive(name=name.value, attrs=attrs, lineno=name.lineno, - src=self.ctx) - - def to_section(self, tag, children): - name, attrs = tag - attrs = attrs if len(attrs) > 1 else [self.typed(a) for a in attrs] - return Section(name=name.value, attrs=attrs, children=children, - lineno=name.lineno, src=self.ctx) - - def __call__(self, content): - try: - return self.Top(content) - except: - raise - - -def parse_doc(content, ctx=None): - """ Parse a configuration document into a tree that can be queried. """ - if isinstance(content, list): - content = "\n".join(content) - parse = DocParser(ctx) - result = parse(content)[0] - return Entry(children=result, src=ctx) - - -@parser(Specs.httpd_conf, continue_on_error=False) -class _HttpdConf(ConfigParser): - """ Parser for individual httpd configuration files. """ - def __init__(self, *args, **kwargs): - self.parse = DocParser(self) - super(_HttpdConf, self).__init__(*args, **kwargs) - - def parse_doc(self, content): - if isinstance(content, list): - content = "\n".join(content) - result = self.parse(content)[0] - return Entry(children=result, src=self) - - -@combiner(_HttpdConf) class HttpdConfTree(ConfigCombiner): """ Exposes httpd configuration through the parsr query interface. Correctly @@ -370,14 +34,7 @@ def conf_path(self): return res.value if res else "/etc/httpd" -@parser(Specs.httpd_conf_scl_httpd24, continue_on_error=False) -class _HttpdConfSclHttpd24(ConfigParser): - """ Parser for individual httpd configuration files. """ - def parse_doc(self, content): - return parse_doc(content, ctx=self) - - -@combiner(_HttpdConfSclHttpd24) +@combiner(HttpdConfSclHttpd24) class HttpdConfSclHttpd24Tree(ConfigCombiner): """ Exposes httpd configuration Software Collection httpd24 through the parsr query @@ -395,14 +52,7 @@ def conf_path(self): return res.value if res else "/opt/rh/httpd24/root/etc/httpd" -@parser(Specs.httpd_conf_scl_jbcs_httpd24, continue_on_error=False) -class _HttpdConfSclJbcsHttpd24(ConfigParser): - """ Parser for individual httpd configuration files. """ - def parse_doc(self, content): - return parse_doc(content, ctx=self) - - -@combiner(_HttpdConfSclJbcsHttpd24) +@combiner(HttpdConfSclJbcsHttpd24) class HttpdConfSclJbcsHttpd24Tree(ConfigCombiner): """ Exposes httpd configuration Software Collection jbcs-httpd24 through the parsr query @@ -418,32 +68,3 @@ def __init__(self, confs): def conf_path(self): res = self.main.find("ServerRoot") return res.value if res else "/opt/rh/jbcs-httpd24/root/etc/httpd" - - -def get_tree(root=None): - """ - This is a helper function to get an httpd configuration component for your - local machine or an archive. Use it in interactive sessions. - """ - return run(HttpdConfTree, root=root).get(HttpdConfTree) - - -is_private = pred(lambda x: ip_address(six.u(x)).is_private) -""" -Predicate to check if an ip address is private. - -Example: - conf["VirtualHost", in_network("128.39.0.0/16")] -""" - -in_network = pred2(lambda x, y: (ip_address(six.u(x)) in ip_network(six.u(y)))) -""" -Predicate to check if an ip address is in a given network. - -Example: - conf["VirtualHost", in_network("128.39.0.0/16")] -""" - - -if __name__ == "__main__": - run(HttpdConfTree, print_summary=True) diff --git a/insights/combiners/identity_domain.py b/insights/combiners/identity_domain.py new file mode 100644 index 0000000000..e0c0cb43ed --- /dev/null +++ b/insights/combiners/identity_domain.py @@ -0,0 +1,289 @@ +""" +Identity Domain - Combiner for domain enrollment +================================================ + +The combiner detects enrollment into identity domains such as IPA, +Active Directory, generic Kerberos realm, and generic LDAP. It parses +domains and realms from SSSD, KRB5, IPA, and Samba configuration. + +Supported domain types +---------------------- + +* IPA (RHEL IdM, FreeIPA) +* Active Directory (SSSD) +* Active Directory (Samba winbind) +* generic LDAP domain (SSSD) +* generic LDAP domain with Kerberos authentication (SSSD) +* generic Kerberos realm (from ``krb5.conf``) + +The combiner cannot detect generic Kerberos realms that solely rely upon +DNS realm lookup (``dns_lookup_realm``). + +Examples:: + + DomainInfo( + name="ipa.test", + domain_type="IPA", + server_software="IPA", + client_software="SSSD", + domain="ipa.test", + realm="IPA.TEST", + workgroup=None, + ipa_mode="client", + ) + + DomainInfo( + name="ad-winbind.test", + domain_type="Active Directory (winbind)", + server_software="Active Directory", + client_software="winbind", + domain="ad-winbind.test", + realm="AD-WINBIND.TEST", + workgroup="AD-WINBIND", + ipa_mode=None, + ) +""" +import collections + +from insights.core.exceptions import SkipComponent +from insights.core.plugins import combiner +from insights.combiners.ipa import IPA +from insights.parsers.samba import SambaConfigs +from insights.parsers.sssd_conf import SSSD_Config +from insights.combiners.krb5 import AllKrb5Conf + + +class DomainTypes(object): + """Human readable domain type""" + + # SSSD domain types + IPA = "IPA" + AD_SSSD = "Active Directory (SSSD)" + LDAP = "LDAP" + LDAP_KRB5 = "LDAP/Kerberos" + # krb5.conf but not in sssd.conf + KRB5 = "Kerberos" + # Samba winbind + AD_WINBIND = "Active Directory (winbind)" + + +class ServerSoftware(object): + """Server software""" + + IPA = "IPA" + AD = "Active Directory" + LDAP = "generic LDAP" + LDAP_KRB5 = "generic LDAP/Kerberos" + KRB5 = "generic Kerberos" + + +class ClientSoftware(object): + """Client software""" + + SSSD = "SSSD" + WINBIND = "winbind" + KRB5 = "Kerberos" + + +class IPAMode(object): + """IPA mode (server or client-only)""" + + IPA_CLIENT = "client" + IPA_SERVER = "server" + + +DomainInfo = collections.namedtuple( + "DomainInfo", + [ + "name", + "domain_type", + "server_software", + "client_software", + "domain", + "realm", + "workgroup", + "ipa_mode", + ], +) +"""Identity domain information + +Attributes: + name (str): user-friendly name + either SSSD's domain name, domain name, or lower-case realm name + domain_type (str): domain type, e.g. ``IPA`` or ``Active Directory (SSSD)`` + server_software (str): name of the server software, e.g. ``Active Directory`` + client_software (str): name of the client software, e.g. ``SSSD`` or ``winbind`` + domain (str, None): name of the identity domain, + not set for generic Kerberos or LDAP + realm (str, None): Kerberos realm name, + not set for generic LDAP + workgroup (str, None): workgroup name, + only set for AD with winbind + ipa_mode (str, None): IPA mode (server or client), + only set for IPA +""" + + +@combiner(optional=[SSSD_Config, AllKrb5Conf, IPA, SambaConfigs]) +class IdentityDomain(object): + """ + A combiner for identity domains. + + Raises: + SkipComponent: When no identity domains are detected. + + Attributes: + domains (list): List of the namedtuple `DomainInfo` + default_realm (str, None): default realm name (if configured) + dns_lookup_realm (bool): is Kerberos realm DNS lookup enabled? + dns_lookup_kdc (bool): is Kerberos KDC DNS lookup enabled? + """ + + def __init__(self, sssd=None, krb5=None, ipa=None, smb=None): + if sssd is None and krb5 is None and smb is None: + # ipa depends on sssd + raise SkipComponent("KRB5, SSSD, and Samba are not configured") + + self.domains = [] + self._realms = set() + + if krb5 is not None: + self.default_realm = krb5.default_realm + self.dns_lookup_realm = krb5.dns_lookup_realm + self.dns_lookup_kdc = krb5.dns_lookup_kdc + else: + self.default_realm = None + # krb5.conf default is True + self.dns_lookup_realm = True + self.dns_lookup_kdc = True + + if sssd is not None: + self._parse_sssd(sssd, ipa) + if smb is not None: + self._parse_smb(smb) + if krb5 is not None: + # parse /etc/krb5.conf last to skip SSSD and Samba realms + self._parse_krb5(krb5) + + if not self.domains: + raise SkipComponent("No identity domains detected") + + def _add_domaininfo( + self, name, dtype, srv, clnt, domain, realm, workgroup=None, ipa_mode=None + ): + if realm is not None: + if realm in self._realms: + # already configured + return + self._realms.add(realm) + + di = DomainInfo(name, dtype, srv, clnt, domain, realm, workgroup, ipa_mode) + self.domains.append(di) + + def _parse_sssd(self, sssd, ipa): + """Extract domains from sssd.conf + + Supports id_providers "ad", "ipa", and "ldap". + """ + id_auth_providers = set(["ldap", "krb5", "ipa", "ad", "proxy"]) + for name in sssd.domains: + if "/" in name: + # Ignore trusted domain (subdomain) configuration. Subdomain + # settings are configured as + # `[domain/parent.example/subdomain.example]`. + continue + conf = sssd.domain_config(name) + id_provider = conf.get("id_provider") + ipa_mode = None + + auth_provider = conf.get("auth_provider") + if auth_provider is None and id_provider in id_auth_providers: + # most id providers are also an auth providers + auth_provider = id_provider + elif auth_provider == "none": + auth_provider = None + + if id_provider == "ad": + dtype = DomainTypes.AD_SSSD + srv = ServerSoftware.AD + domain = conf.get("ad_domain", name) + realm = conf.get("krb5_domain", domain.upper()) + elif id_provider == "ipa": + if ipa is None or not ipa.is_client: + # unsupported configuration + continue + dtype = DomainTypes.IPA + srv = ServerSoftware.IPA + domain = conf.get("ipa_domain", name) + realm = conf.get("krb5_domain", domain.upper()) + ipa_mode = IPAMode.IPA_SERVER if ipa.is_server else IPAMode.IPA_CLIENT + elif id_provider == "ldap": + if auth_provider == "ldap": + dtype = DomainTypes.LDAP + srv = ServerSoftware.LDAP + domain = None + realm = None + elif auth_provider == "krb5": + dtype = DomainTypes.LDAP_KRB5 + srv = ServerSoftware.LDAP_KRB5 + domain = None + # krb5_domain is required + realm = conf.get("krb5_realm", name.upper()) + else: + # unsupported configuration + continue + elif id_provider in ("proxy", "files"): + # not an identity domain + continue + else: + # unsupported configuration + continue + + self._add_domaininfo( + name, dtype, srv, ClientSoftware.SSSD, domain, realm, ipa_mode=ipa_mode + ) + + def _parse_smb(self, smb): + """Parse smb.conf to detect AD with winbind + + We ignore IPA DC here as the information is already provided by + `sssd.conf`. IPA DC has either server role `ROLE_IPA_DC` + (Samba >= 4.16.0) or `ROLE_DOMAIN_PDC`, and always + `security=user`. + """ + if ( + smb.server_role != "ROLE_DOMAIN_MEMBER" or + not smb.has_option("global", "security") or + smb.get("global", "security").upper() != "ADS" or + not smb.has_option("global", "realm") + ): + return + realm = smb.get("global", "realm") + domain = realm.lower() + + if smb.has_option("global", "workgroup"): + workgroup = smb.get("global", "workgroup") + else: + workgroup = realm.split(".", 1)[0] + + self._add_domaininfo( + domain, + DomainTypes.AD_WINBIND, + ServerSoftware.AD, + ClientSoftware.WINBIND, + domain, + realm, + workgroup, + ) + + def _parse_krb5(self, krb5): + """Parse krb5.conf to detect additional generic Kerberos realms""" + for realm in krb5.realms: + self._add_domaininfo( + realm.lower(), + DomainTypes.KRB5, + ServerSoftware.KRB5, + ClientSoftware.KRB5, + None, + realm, + ) diff --git a/insights/combiners/ipa.py b/insights/combiners/ipa.py new file mode 100644 index 0000000000..21306d1b6d --- /dev/null +++ b/insights/combiners/ipa.py @@ -0,0 +1,84 @@ +""" +IPA - Combiner for RHEL IdM / FreeIPA information +================================================= +""" +from insights.core.plugins import combiner +from insights.core.exceptions import SkipComponent +from insights.parsers.installed_rpms import InstalledRpms +from insights.parsers.redhat_release import RedhatRelease +from insights.parsers.ipa_conf import IPAConfig +from insights.parsers.sssd_conf import SSSD_Config + + +@combiner(IPAConfig, SSSD_Config, InstalledRpms, RedhatRelease) +class IPA(object): + """Combiner for IPA, SSSD, and installed RPMs + + Provides additional information, e.g. whether the host is an IPA server. + """ + + def __init__(self, ipa_conf, sssd_conf, rpms, release): + self._ipa_conf = ipa_conf + self._sssd_conf = sssd_conf + # IPA package names are different on Fedora + if release.is_fedora: + self._client_rpm = rpms.get_max("freeipa-client") + self._server_rpm = rpms.get_max("freeipa-server") + else: + self._client_rpm = rpms.get_max("ipa-client") + self._server_rpm = rpms.get_max("ipa-server") + if self._client_rpm is None: + raise SkipComponent("IPA client package is not installed") + self._is_client = None + self._is_server = None + + @property + def ipa_conf(self): + """Get IPAConfig object""" + return self._ipa_conf + + @property + def sssd_conf(self): + """Get SSSD_Config object""" + return self._sssd_conf + + @property + def sssd_domain_config(self): + """Get SSSD domain configuration for host's IPA domain""" + return self._sssd_conf.domain_config(self._ipa_conf.domain) + + @property + def is_client(self): + """Is the host an IPA client?""" + # IPAConfig validates that /etc/ipa/default.conf exists and is a + # valid IPA config file with all required values present. + if self._is_client is None: + id_provider = self.sssd_domain_config.get("id_provider") + if id_provider == "ipa": + self._is_client = True + else: + self._is_client = False + + return self._is_client + + @property + def is_server(self): + """Is the host an IPA server?""" + if self._is_server is None: + server_mode = self.sssd_domain_config.get( + "ipa_server_mode", "false" + ) + if ( + self._server_rpm and + # all servers are also clients + self.is_client and + # only servers use LDAPI (LDAP over Unix socket) + self._ipa_conf.ldap_uri.startswith("ldapi://") and + # SSSD domain must be in server mode + server_mode.lower() == "true" + ): + self._is_server = True + else: + self._is_server = False + + return self._is_server diff --git a/insights/combiners/ipcs_shared_memory.py b/insights/combiners/ipcs_shared_memory.py index dbaa5a6af1..1ace1ab19c 100644 --- a/insights/combiners/ipcs_shared_memory.py +++ b/insights/combiners/ipcs_shared_memory.py @@ -7,9 +7,9 @@ shared memory of special ``PID``. """ - -from insights import combiner, LegacyItemAccess -from insights.parsers import ParseException +from insights.core import LegacyItemAccess +from insights.core.exceptions import ParseException +from insights.core.plugins import combiner from insights.parsers.ipcs import IpcsM, IpcsMP diff --git a/insights/combiners/krb5.py b/insights/combiners/krb5.py index 06f326b893..cf356b436f 100644 --- a/insights/combiners/krb5.py +++ b/insights/combiners/krb5.py @@ -4,11 +4,10 @@ The krb5 files are normally available to rules as a list of Krb5Configuration objects. """ - -from .. import LegacyItemAccess +from copy import deepcopy +from insights.core import LegacyItemAccess from insights.core.plugins import combiner -from insights.parsers.krb5 import Krb5Configuration -from insights.parsers.httpd_conf import dict_deep_merge +from insights.parsers.krb5 import Krb5Configuration, _handle_krb5_bool @combiner(Krb5Configuration) @@ -58,6 +57,8 @@ class AllKrb5Conf(LegacyItemAccess): True >>> all_krb5['realms']['dns_lookup_realm'] 'false' + >>> all_krb5.files + ['krb5.conf', 'test.conf', 'test2.conf'] Attributes: includedir (list): The directory list that `krb5.conf` includes via @@ -66,6 +67,11 @@ class AllKrb5Conf(LegacyItemAccess): via `include` directive module (list): The module list that `krb5.conf` specifed via 'module' directive + files (list): The list of configuration file names. + dns_lookup_realm (bool): is Kerberos realm DNS lookup enabled? + dns_lookup_kdc (bool): is Kerberos KDC DNS lookup enabled? + default_realm (str/None): default realm for clients + realms (set): realm names from [realms] block """ def __init__(self, krb5configs): @@ -74,8 +80,10 @@ def __init__(self, krb5configs): self.includedir = [] self.include = [] self.module = [] + self.files = [] for krb5_parser in krb5configs: + self.files.append(krb5_parser.file_name) if krb5_parser.file_path == "/etc/krb5.conf": main_data = krb5_parser.data self.includedir = krb5_parser.includedir @@ -91,6 +99,30 @@ def __init__(self, krb5configs): else: self.data[key] = value + def _getbool(option, default=None): + if not self.has_option("libdefaults", option): + return default + return self.getboolean("libdefaults", option) + + self.dns_lookup_realm = _getbool("dns_lookup_realm", True) + self.dns_lookup_kdc = _getbool("dns_lookup_kdc", True) + if self.has_option("libdefaults", "default_realm"): + self.default_realm = self["libdefaults"]["default_realm"] + else: + self.default_realm = None + + self.realms = set() + if self.has_section("realms"): + r = self["realms"] + for name, value in r.items(): + if ( + # realm entries must be dicts + isinstance(value, dict) and + # realm names look like "UPPER-CASE.COM" + not any(c.islower() or c == "_" for c in name) + ): + self.realms.add(name) + super(AllKrb5Conf, self).__init__() def sections(self): @@ -120,3 +152,33 @@ def has_option(self, section, option): if section not in self.data: return False return option in self.data[section] + + def getboolean(self, section, option): + """Parse option as bool + + Returns None is not a krb5.conf boolean string. + """ + value = self.data[section][option] + return _handle_krb5_bool(value) + + +def dict_deep_merge(tgt, src): + """ + Utility function to merge the source dictionary `src` to the target + dictionary recursively + + Note: + The type of the values in the dictionary can only be `dict` or `list` + + Parameters: + tgt (dict): The target dictionary + src (dict): The source dictionary + """ + for k, v in src.items(): + if k in tgt: + if isinstance(tgt[k], dict) and isinstance(v, dict): + dict_deep_merge(tgt[k], v) + else: + tgt[k].extend(deepcopy(v)) + else: + tgt[k] = deepcopy(v) diff --git a/insights/combiners/logrotate_conf.py b/insights/combiners/logrotate_conf.py index 222261be85..496ae14a29 100644 --- a/insights/combiners/logrotate_conf.py +++ b/insights/combiners/logrotate_conf.py @@ -9,20 +9,14 @@ options, and all other options (if there are) will be discarded. """ - import operator import os -import string + from fnmatch import fnmatch -from insights.core import ConfigCombiner, ConfigParser -from insights.core.plugins import combiner, parser -from insights.parsers.logrotate_conf import LogrotateConf +from insights.core import ConfigCombiner +from insights.core.plugins import combiner +from insights.parsers.logrotate_conf import LogrotateConf, LogRotateConfPEG from insights.parsr.query import eq -from insights.specs import Specs -from insights.parsr import (AnyChar, Choice, EOF, EOL, Forward, LeftCurly, - LineEnd, Literal, Many, Number, OneLineComment, Opt, PosMarker, - QuotedString, RightCurly, skip_none, String, WS, WSChar) -from insights.parsr.query import Directive, Entry, Section @combiner(LogrotateConf) @@ -146,65 +140,7 @@ def configfile_of_logfile(self, log_file): return f -class DocParser(object): - def __init__(self, ctx): - self.ctx = ctx - - scripts = set("postrotate prerotate firstaction lastaction preremove".split()) - Stanza = Forward() - Spaces = Many(WSChar) - Bare = String(set(string.printable) - (set(string.whitespace) | set("#{}'\""))) - Num = Number & (WSChar | LineEnd) - Comment = OneLineComment("#").map(lambda x: None) - ScriptStart = WS >> PosMarker(Choice([Literal(s) for s in scripts])) << WS - ScriptEnd = Literal("endscript") - Line = (WS >> AnyChar.until(EOL) << WS).map(lambda x: "".join(x)) - Lines = Line.until(ScriptEnd).map(lambda x: "\n".join(x)) - Script = ScriptStart + Lines << ScriptEnd - Script = Script.map(lambda x: [x[0], [x[1]], None]) - BeginBlock = WS >> LeftCurly << WS - EndBlock = WS >> RightCurly - First = PosMarker((Bare | QuotedString)) << Spaces - Attr = Spaces >> (Num | Bare | QuotedString) << Spaces - Rest = Many(Attr) - Block = BeginBlock >> Many(Stanza).map(skip_none).map(self.to_entries) << EndBlock - Stmt = WS >> (Script | (First + Rest + Opt(Block))) << WS - Stanza <= WS >> (Stmt | Comment) << WS - Doc = Many(Stanza).map(skip_none).map(self.to_entries) - - self.Top = Doc + EOF - - def to_entries(self, x): - ret = [] - for i in x: - name, attrs, body = i - if body: - for n in [name.value] + attrs: - ret.append(Section(name=n, children=body, lineno=name.lineno)) - else: - ret.append(Directive(name=name.value, attrs=attrs, lineno=name.lineno)) - return ret - - def __call__(self, content): - return self.Top(content) - - -def parse_doc(content, ctx=None): - """ Parse a configuration document into a tree that can be queried. """ - if isinstance(content, list): - content = "\n".join(content) - parse = DocParser(ctx) - result = parse(content)[0] - return Entry(children=result, src=ctx) - - -@parser(Specs.logrotate_conf, continue_on_error=False) -class _LogRotateConf(ConfigParser): - def parse_doc(self, content): - return parse_doc("\n".join(content), ctx=self) - - -@combiner(_LogRotateConf) +@combiner(LogRotateConfPEG) class LogRotateConfTree(ConfigCombiner): """ Exposes logrotate configuration through the parsr query interface. @@ -228,12 +164,3 @@ def find_matches(self, confs, pattern): elif fnmatch(c.file_path, pattern): results.append(c) return sorted(results, key=operator.attrgetter("file_name")) - - -def get_tree(root=None): - """ - This is a helper function to get a logrotate configuration component for - your local machine or an archive. It's for use in interactive sessions. - """ - from insights import run - return run(LogRotateConfTree, root=root).get(LogRotateConfTree) diff --git a/insights/combiners/lspci.py b/insights/combiners/lspci.py new file mode 100644 index 0000000000..42d28acc3b --- /dev/null +++ b/insights/combiners/lspci.py @@ -0,0 +1,182 @@ +""" +LsPci - Commands ``lspci`` +========================== + +This combiner combines the following Parsers to a list. +- LsPci - the output of command ``lspci -k`` +- LsPciVmmkn - the output of command ``lspci -vmmkn`` +""" +from insights import combiner +from insights.parsers import keyword_search +from insights.parsers.lspci import LsPci, LsPciVmmkn + + +@combiner([LsPci, LsPciVmmkn]) +class LsPci(list): + """ + Combines the Parser LsPci of ``/sbin/lspci -k`` command and Parser + LsPciVmmkn of ``/sbin/lspci -vmmkn`` command. + + .. note:: + In case the ``lspci -k`` sometimes outputs the `Slot` in the full + format of ``domain:bus:device.function``, and the ``lspci -k`` is more + common than ``lspci -vmmkn``, so this combiner will take the `Slot` of + the `lspci -k` as the key. + + Typical output of the ``lspci -k`` command is:: + + 00:00.0 Host bridge: Intel Corporation Haswell-ULT DRAM Controller (rev 09) + Subsystem: Lenovo ThinkPad X240 + Kernel driver in use: hsw_uncore + 00:02.0 VGA compatible controller: Intel Corporation Haswell-ULT Integrated Graphics Controller (rev 09) + Subsystem: Lenovo ThinkPad X240 + Kernel driver in use: i915 + Kernel modules: i915 + 00:03.0 Audio device: Intel Corporation Haswell-ULT HD Audio Controller (rev 09) + Subsystem: Lenovo ThinkPad X240 + Kernel driver in use: snd_hda_intel + Kernel modules: snd_hda_intel + 00:16.0 Communication controller: Intel Corporation 8 Series HECI #0 (rev 04) + Subsystem: Lenovo ThinkPad X240 + Kernel driver in use: mei_me + Kernel modules: mei_me + 00:19.0 Ethernet controller: Intel Corporation Ethernet Connection I218-LM (rev 04) + Subsystem: Lenovo ThinkPad X240 + Kernel driver in use: e1000e + Kernel modules: e1000e + 00:1b.0 Audio device: Intel Corporation 8 Series HD Audio Controller (rev 04) + Subsystem: Lenovo ThinkPad X240 + Kernel driver in use: snd_hda_intel + Kernel modules: snd_hda_intel + + Typical output of the ``lspci -vmmkn`` command is:: + + Slot: 00:00.0 + Class: 0600 + Vendor: 8086 + Device: 0a04 + SVendor: 17aa + SDevice: 2214 + Rev: 09 + Driver: hsw_uncore + + Slot: 00:02.0 + Class: 0300 + Vendor: 8086 + Device: 0a16 + SVendor: 17aa + SDevice: 2214 + Rev: 09 + Driver: i915 + Module: i915 + + Slot: 00:03.0 + Class: 0403 + Vendor: 8086 + Device: 0a0c + SVendor: 17aa + SDevice: 2214 + Rev: 09 + Driver: snd_hda_intel + Module: snd_hda_intel + + Slot: 00:16.0 + Class: 0780 + Vendor: 8086 + Device: 9c3a + SVendor: 17aa + SDevice: 2214 + Rev: 04 + Driver: mei_me + Module: mei_me + + Slot: 00:19.0 + Class: 0200 + Vendor: 8086 + Device: 155a + SVendor: 17aa + SDevice: 2214 + Rev: 04 + Driver: e1000e + Module: e1000e + + Slot: 00:1b.0 + Class: 0403 + Vendor: 8086 + Device: 9c20 + SVendor: 17aa + SDevice: 2214 + Rev: 04 + Driver: snd_hda_intel + Module: snd_hda_intel + + Examples: + >>> type(lspci) + + >>> sorted(lspci.pci_dev_list) + ['00:00.0', '00:02.0', '00:03.0', '00:16.0', '00:19.0', '00:1b.0'] + >>> lspci.search(Dev_Details__contains='I218')[0]['Slot'] + '00:19.0' + """ + def __init__(self, lspci_k, lspci_vmmkn): + if lspci_vmmkn: + for dev in lspci_vmmkn: + # use the local copy to prevent from writing back to the parser + dev = dev.copy() + if lspci_k and dev['Slot'] in lspci_k: + # use the local copy to prevent from writing back to the parser + dev_k = [v for v in lspci_k.data.values() if v['Slot'].endswith(dev['Slot'])][0].copy() + # Since the 'lspci -k' is a more common command than the + # 'lspci -vmmkn', the following line should be commented + # out to use the 'Slot' in 'lspci -k' as the 'Slot' in + # this combiner: + # dev_k.pop('Slot') if 'Slot' in dev_k else None + dev_k.pop('Kernel driver in use') if 'Kernel driver in use' in dev_k else None + dev_k.pop('Kernel modules') if 'Kernel modules' in dev_k else None + dev.update(dev_k) + self.append(dev) + self._pci_dev_list = (lspci_k if lspci_k else lspci_vmmkn).pci_dev_list + else: + for dev in lspci_k.data.values(): + # use the local copy to prevent from writing back to the parser + dev = dev.copy() + dev.update(Driver=dev.pop('Kernel driver in use')) if 'Kernel driver in use' in dev else None + dev.update(Module=[i.strip() for i in dev.pop('Kernel modules').split(',')]) if 'Kernel modules' in dev else None + self.append(dev) + self._pci_dev_list = lspci_k.pci_dev_list + + @property + def pci_dev_list(self): + """ + The list of PCI devices. + """ + return self._pci_dev_list + + def search(self, **kwargs): + """ + Get the details of PCI devices by searching the table with kwargs. + + This uses the :py:func:`insights.parsers.keyword_search` function for + searching; see its documentation for usage details. If no search + parameters are given, no rows are returned. + + It simplify the value of the column according to actual usage. + + Returns: + list: A list of dictionaries of PCI devices that match the given + search criteria. + + Examples: + >>> len(lspci.search(Subsystem__startswith='Lenovo')) + 6 + >>> len(lspci.search(Subsystem__startswith='Lenovo', Dev_Details__startswith='Audio device')) + 2 + >>> lspci.search(Driver='snd_hda_intel', Dev_Details__contains='8') == [ + ... {'Slot': '00:1b.0', 'Class': '0403', 'Vendor': '8086', + ... 'Device': '9c20', 'SVendor': '17aa', 'SDevice': '2214', + ... 'Rev': '04', 'Driver': 'snd_hda_intel', + ... 'Module': ['snd_hda_intel'], 'Subsystem': 'Lenovo ThinkPad X240', + ... 'Dev_Details': 'Audio device: Intel Corporation 8 Series HD Audio Controller (rev 04)'}] + True + """ + return keyword_search(self, **kwargs) diff --git a/insights/combiners/modinfo.py b/insights/combiners/modinfo.py index f2bcb873c5..ed4459c3c6 100644 --- a/insights/combiners/modinfo.py +++ b/insights/combiners/modinfo.py @@ -1,43 +1,31 @@ """ -ModInfo -======= - -The ModInfo combiner gathers all the ModInfoEach parsers into a dictionary -indexed by the module name. +Combiners - command ``modinfo `` +============================================= +ModulesInfo +----------- +The ModulesInfo combines the collected modules info from the result of +``KernelModulesInfo``. """ - +from insights.core.exceptions import SkipComponent from insights.core.plugins import combiner -from insights.parsers.modinfo import ModInfoEach, ModInfoAll -from insights import SkipComponent +from insights.parsers.modinfo import KernelModulesInfo -@combiner([ModInfoAll, ModInfoEach]) -class ModInfo(dict): +@combiner([KernelModulesInfo]) +class ModulesInfo(dict): """ - Combiner for accessing all the modinfo outputs. + Combiner to combine the result of KernelModulesInfo which supports filter + and the parsers which only support one single module. It refers + ``KernelModulesInfo`` first. Examples: - >>> type(modinfo_obj) - - >>> type(modinfo_obj['i40e']) - - >>> modinfo_obj['i40e'].module_name - 'i40e' - >>> modinfo_obj['i40e'].module_name - 'i40e' - >>> modinfo_obj['i40e']['retpoline'] - 'Y' - >>> modinfo_obj['i40e'].module_version - '2.3.2-k' - >>> modinfo_obj['i40e'].module_path - '/lib/modules/3.10.0-993.el7.x86_64/kernel/drivers/net/ethernet/intel/i40e/i40e.ko.xz' - >>> "i40e" in modinfo_obj.retpoline_y + >>> type(modules_obj) + + >>> 'i40e' in modules_obj True - >>> "bnx2x" in modinfo_obj.retpoline_y + >>> 'bnx2x' in modules_obj.retpoline_y False - >>> "bnx2x" in modinfo_obj.retpoline_n - True Raises: SkipComponent: When content is empty. @@ -46,26 +34,12 @@ class ModInfo(dict): retpoline_y (set): A set of names of the modules with the attribute "retpoline: Y". retpoline_n (set): A set of names of the modules with the attribute "retpoline: N". """ - def __init__(self, mi_all, mi_each): + def __init__(self, filtered_modules_info): self.retpoline_y = set() self.retpoline_n = set() - if mi_all: - self.update(mi_all) - self.retpoline_y = mi_all.retpoline_y - self.retpoline_n = mi_all.retpoline_n - else: - for m in mi_each: - name = m.module_name - self[name] = m - self.retpoline_y.add(name) if m.get('retpoline') == 'Y' else None - self.retpoline_n.add(name) if m.get('retpoline') == 'N' else None - - if len(self) == 0: - raise SkipComponent("No Parsed Contents") - - @property - def data(self): - """ - (dict): Dict with the module name as the key and the module details as the value. - """ - return self + if filtered_modules_info: + self.update(filtered_modules_info) + self.retpoline_n = filtered_modules_info.retpoline_n + self.retpoline_y = filtered_modules_info.retpoline_y + if not self: + raise SkipComponent diff --git a/insights/combiners/modprobe.py b/insights/combiners/modprobe.py index a3fb596e4f..97c6c6e53d 100644 --- a/insights/combiners/modprobe.py +++ b/insights/combiners/modprobe.py @@ -82,7 +82,7 @@ def __init__(self, modprobe): if section not in self.data: self.data[section] = {} for name, value in sectdict.items(): - if name in self.data[section]: + if name in self.data[section] and type(self.data[section][name][0]) == list: # append to this module's value - should only # happen for aliases. self.data[section][name][0].append(value) diff --git a/insights/combiners/multinode.py b/insights/combiners/multinode.py index cff7fd989b..a94ace88f1 100644 --- a/insights/combiners/multinode.py +++ b/insights/combiners/multinode.py @@ -1,11 +1,11 @@ from insights import combiner -from insights.combiners.hostname import hostname +from insights.combiners.hostname import Hostname from insights.core.context import create_product from insights.parsers.metadata import MetadataJson from insights.specs import Specs -@combiner(MetadataJson, [hostname, Specs.machine_id]) +@combiner(MetadataJson, [Hostname, Specs.machine_id]) def multinode_product(md, hn, machine_id): hn = hn.fqdn if hn else machine_id.content[0].rstrip() return create_product(md.data, hn) diff --git a/insights/combiners/nfs_exports.py b/insights/combiners/nfs_exports.py index 887d76c3c5..7dd9c26505 100644 --- a/insights/combiners/nfs_exports.py +++ b/insights/combiners/nfs_exports.py @@ -29,11 +29,13 @@ True """ - from insights.core.plugins import combiner from insights.parsers.nfs_exports import NFSExports, NFSExportsD -import collections +try: + from six.moves import collections_abc +except ImportError: + import collections as collections_abc @combiner(NFSExports, optional=[NFSExportsD]) @@ -70,7 +72,7 @@ def __init__(self, nfsexports, nfsexportsd): sources = [nfsexports] # Make sure exports are stored in the order they're parsed - # alphabetically by file name. Ignore it if nfsexportsd isn't valid. - if isinstance(nfsexportsd, collections.Iterable): + if isinstance(nfsexportsd, collections_abc.Iterable): sources.extend(sorted(nfsexportsd, key=lambda f: f.file_path)) def add_paths_to_dict(src_path, src_dict, dest_dict): diff --git a/insights/combiners/nginx_conf.py b/insights/combiners/nginx_conf.py index 49de08b3aa..d2eae628f3 100644 --- a/insights/combiners/nginx_conf.py +++ b/insights/combiners/nginx_conf.py @@ -1,71 +1,24 @@ -#!/usr/bin/env python """ NginxConfTree - Combiner for nginx configuration -======================================================= -This module models nginx configuration as a tree. It correctly handles include -directives by splicing individual document trees into their parents until one -document tree is left. - -A DSL is provided to query the tree through a select function or brackets []. -The brackets allow a more conventional lookup feel but aren't quite as powerful -as using select directly. +================================================ """ -import os -import string -from insights import combiner, parser, run -from insights.core import ConfigCombiner, ConfigParser +from insights.core import ConfigCombiner, ContainerConfigCombiner +from insights.core.plugins import combiner +from insights.parsers.nginx_conf import NginxConfPEG, ContainerNginxConfPEG from insights.parsr.query import eq -from insights.parsr import (Char, EOF, Forward, LeftCurly, Lift, LineEnd, - RightCurly, Many, Number, OneLineComment, Parser, PosMarker, SemiColon, - QuotedString, skip_none, String, WS, WSChar) -from insights.parsr.query import Directive, Entry, Section -from insights.specs import Specs - - -class EmptyQuotedString(Parser): - def __init__(self, chars): - super(EmptyQuotedString, self).__init__() - single = Char("'") >> String(set(chars) - set("'"), "'", 0) << Char("'") - double = Char('"') >> String(set(chars) - set('"'), '"', 0) << Char('"') - self.add_child(single | double) - - def process(self, pos, data, ctx): - return self.children[0].process(pos, data, ctx) - - -@parser(Specs.nginx_conf, continue_on_error=False) -class _NginxConf(ConfigParser): - def __init__(self, *args, **kwargs): - def to_entry(name, attrs, body): - if body == ";": - return Directive(name=name.value, attrs=attrs, lineno=name.lineno, src=self) - return Section(name=name.value, attrs=attrs, children=body, lineno=name.lineno, src=self) - - name_chars = string.ascii_letters + "_/" - Stmt = Forward() - Num = Number & (WSChar | LineEnd | SemiColon) - Comment = OneLineComment("#").map(lambda x: None) - BeginBlock = WS >> LeftCurly << WS - EndBlock = WS >> RightCurly << WS - Bare = String(set(string.printable) - (set(string.whitespace) | set("#;{}'\""))) - Name = WS >> PosMarker(String(name_chars) | EmptyQuotedString(name_chars)) << WS - Attr = WS >> (Num | Bare | QuotedString) << WS - Attrs = Many(Attr) - Block = BeginBlock >> Many(Stmt).map(skip_none) << EndBlock - Stanza = (Lift(to_entry) * Name * Attrs * (Block | SemiColon)) | Comment - Stmt <= WS >> Stanza << WS - Doc = Many(Stmt).map(skip_none) - self.Top = Doc + EOF - super(_NginxConf, self).__init__(*args, **kwargs) +from os.path import dirname - def parse_doc(self, content): - return Entry(children=self.Top("\n".join(content))[0], src=self) - -@combiner(_NginxConf) +@combiner(NginxConfPEG) class NginxConfTree(ConfigCombiner): """ - Exposes nginx configuration through the parsr query interface. + This module models nginx configuration as a tree. It correctly handles include + directives by splicing individual document trees into their parents until one + document tree is left. + + A DSL is provided to query the tree through a select function or brackets []. + The brackets allow a more conventional lookup feel but aren't quite as powerful + as using select directly. See the :py:class:`insights.core.ConfigComponent` class for example usage. """ @@ -74,16 +27,29 @@ def __init__(self, confs): @property def conf_path(self): - return os.path.dirname(self.main.file_path) + return dirname(self.main.file_path) -def get_tree(root=None): - """ - This is a helper function to get an nginx configuration component for your - local machine or an archive. It's for use in interactive sessions. +@combiner(ContainerNginxConfPEG) +class ContainerNginxConfTree(list): """ - return run(NginxConfTree, root=root).get(NginxConfTree) + This module models the nginx configuration of the same running containers + as a tree and wrap the `tree` of containers into a list. + Within the tree, It correctly handles include directives by splicing + individual document trees into their parents until one document tree is + left. - -if __name__ == "__main__": - run(NginxConfTree, print_summary=True) + See the :py:class:`insights.core.ConfigComponent` class for example usage. + """ + def __init__(self, ctn_confs): + containers = {} + for ctn in ctn_confs: + if ctn.container_id not in containers: + containers[ctn.container_id] = [ctn] + else: + containers[ctn.container_id].append(ctn) + + for ctn_id, confs in containers.items(): + self.append(ContainerConfigCombiner( + confs, "nginx.conf", eq("include"), + confs[0].engine, confs[0].image, ctn_id)) diff --git a/insights/combiners/nmcli_dev_show.py b/insights/combiners/nmcli_dev_show.py index 63dfbb30e6..7925413d34 100644 --- a/insights/combiners/nmcli_dev_show.py +++ b/insights/combiners/nmcli_dev_show.py @@ -4,9 +4,9 @@ As there are three different file paths in different sos packages, create this combiner to fix this issue. """ - +from insights.core.exceptions import SkipComponent from insights.core.plugins import combiner -from insights.parsers.nmcli import NmcliDevShow, NmcliDevShowSos, SkipException +from insights.parsers.nmcli import NmcliDevShow, NmcliDevShowSos @combiner([NmcliDevShow, NmcliDevShowSos]) @@ -34,7 +34,7 @@ def __init__(self, nmclidevshow, nmclidevshowsos): self._con_dev.extend(item.connected_devices) if not data: - raise SkipException() + raise SkipComponent() super(AllNmcliDevShow, self).__init__() self.update(data) diff --git a/insights/combiners/os_release.py b/insights/combiners/os_release.py new file mode 100644 index 0000000000..f4db3e95b0 --- /dev/null +++ b/insights/combiners/os_release.py @@ -0,0 +1,323 @@ +""" +OSRelease +========= +The ``OSRelease`` combiner uses the following parsers to try to identify if the +current host is installed with a "Red Hat Enterprise Linux" system. + + - :py:class:`insights.parsers.uname.Uname` + - :py:class:`insights.parsers.dmesg.DmesgLineList` + - :py:class:`insights.parsers.installed_rpms.InstalledRpms` + +It provides an attribute `is_rhel` that indicates if the host is "RHEL" or not. +It also provides an attribute `release` which returns the estimated OS release +name of the system, "Unknown" will be returned by default when cannot identify +the OS. + +* TODO: + The lists of keywords to identify NON-RHEL system of each sub-combiners are + based on our current knowledge, and maybe not sufficient. It needs to be + updated timely according to new found Linux distributions. +""" +from insights.core.filters import add_filter +from insights.core.plugins import combiner +# TODO: replace DmesgLineList with '/proc/version' (not collected yet) +from insights.parsers.dmesg import DmesgLineList +from insights.parsers.installed_rpms import InstalledRpm, InstalledRpms +from insights.parsers.os_release import OsRelease +from insights.parsers.redhat_release import RedhatRelease +from insights.parsers.uname import Uname + +RHEL_KEYS = ['rhel', 'red hat enterprise linux'] +OTHER_LINUX_KEYS = { + # other_linux: (dmesg-keywords, release-packages) + 'Fedora': ( + ['fedora'], + ['fedora-release']), + 'CentOS': ( + ['centos'], + ['centos-stream-release', 'centos-release']), + 'Oracle': ( + ['oracle'], + ['enterprise-release', 'oraclelinux-release']), + 'CloudLinux': ( + ['cloudlinux'], + ['cloudlinux-release']), + 'ClearOS': ( + ['clearos'], + ['clearos-release']), + 'AlmaLinux': ( + ['almalinux'], + ['almalinux-release']), + 'Rocky': ( + ['rockylinux', 'rocky'], + ['rocky-release']), + 'Scientific': ( + [], # Empty + ['sl-release']), + 'SUSE': ( + ['suse', 'sles', 'novell'], + ['sles-release', 'sles_es-release-server']), +} +# Get "Linux version" from `dmesg` +DMESG_LINUX_BUILD_INFO = 'Linux version' +add_filter(DmesgLineList, DMESG_LINUX_BUILD_INFO) +DmesgLineList.keep_scan("linux_version", DMESG_LINUX_BUILD_INFO, num=1) +# Must-install packages for minimum RHEL system +MINIMUM_RHEL_PKGS = [ + # 'kernel' is must-install too, it's checked individually (booting kernel) + 'audit-libs', + 'basesystem', + 'bash', + 'coreutils', + 'dbus', + 'dmidecode', + 'dnf', # RHEL 8+ + 'dracut', + 'filesystem', + 'firewalld', + 'glibc', + 'gmp', + 'krb5-libs', + 'libacl', + 'libgcc', + 'libselinux', + 'NetworkManager', + 'openssl-libs', + 'passwd', + 'redhat-release', # RHEL 8+ + 'redhat-release-server', # RHEL 6/7 + 'systemd', # RHEL 7+ + 'util-linux', + 'yum', # RHEL 6/7 +] +"""Must-install packages for minimum installed RHEL system.""" +THRESHOLD = 0.75 +"""Threshold of the must-install packages to identify NON-RHEL""" + + +def _from_os_release(osr): + """ + Internal function to check the `/etc/os-release`. + """ + def _filter(name): + """Remove falsy or items contain RHEL info""" + if not name or any(k in name.lower() for k in RHEL_KEYS): + return False + return name + + names = list(filter(_filter, [osr.get('ID'), osr.get('NAME'), + osr.get('PRETTY_NAME')])) + if names: + # NON-RHEL: /etc/os-release + return dict(other_linux=names[-1]) + # RHEL + return dict(other_linux='RHEL') + + +def _from_redhat_release(rhr): + """ + Internal function to check the `/etc/redhat-release`. + """ + if not rhr.is_rhel: + return dict(other_linux=rhr.product) + # RHEL + return dict(other_linux='RHEL') + + +def _from_uname(uname): + """ + Internal function to check the `uname -a` output. + + 1. Oracle kernels may contain 'uek' or 'ol' in the kernel NVR. + 2. Fedora kernels contains 'fc' in the NVR. + 3. RHEL kernels have '.el' in the NVR + RHEL based Linux kernels may also have the '.el' in the NVR, + but they are also checked in other sub-combiners. + 4. Otherwise, flag it as an "Unknown" + """ + LINUX_UNAME_KEYS = [ + ('Oracle', ['uek', 'ol']), + ('Fedora', ['fc']), + ('RHEL', ['.el']), # the last item + ] + kernel = uname.kernel + release = 'Unknown' + for rel, keys in LINUX_UNAME_KEYS: + if any(key in kernel for key in keys): + release = rel + break + if 'RHEL' != release: + return dict(other_linux=release, kernel=kernel) + # Not Sure + return dict() + + +def _from_dmesg(dmesg): + """ + Internal function to check the `dmesg` output. + + The `dmesg` includes a line containing the kernel build information, + e.g. the build host and GCC version. + If this line doesn't contain 'redhat.com' then we can assume the kernel + wasn't built on a Red Hat machine and this should be flagged. + """ + line = dmesg.linux_version[0]['raw_message'] + low_line = line.lower() + if 'redhat.com' not in low_line: + release = 'Unknown' + for rel, keys in OTHER_LINUX_KEYS.items(): + if any(kw in low_line for kw in keys[0]): + release = rel + break + return dict(other_linux=release, build_info=line) + # Not Sure + return dict() + + +def _from_installed_rpms(rpms, uname): + """ + Internal function to check the `rpm -qa --qf ...` output. + + Two parts are included, see below: + """ + # Part-1: the known non-rhel-release packages exists + for rel, pkgs in OTHER_LINUX_KEYS.items(): + for pkg_name in pkgs[1]: + pkg = rpms.newest(pkg_name) + if pkg: + return dict(other_linux=rel, release=pkg.nvr) + # Part-2: too many must-install packages are NOT from Red Hat + # - more than THRESHOLD packages are not signed and not provided by Red Hat + # faulty_packages >= THRESHOLD * must-install packages + installed_packages = 0 + vendor, ng_pkgs = '', set() + if uname: + # check the booting 'kernel' first + installed_packages += 1 + boot_kn = InstalledRpm.from_package('kernel-{0}'.format(uname.kernel)) + for pkg in rpms.packages.get('kernel', []): + if pkg == boot_kn: + vendor = pkg.vendor + if pkg.redhat_signed is False and vendor != 'Red Hat, Inc.': + ng_pkgs.add(pkg.nvr) + # check the booting kernel only + break + # check other packages + for pkg_name in MINIMUM_RHEL_PKGS: + pkg = rpms.newest(pkg_name) + if pkg: + # count the package only when it's installed + installed_packages += 1 + if pkg.redhat_signed is False and pkg.vendor != 'Red Hat, Inc.': + ng_pkgs.add(pkg.nvr) + # check the result + if len(ng_pkgs) >= round(THRESHOLD * installed_packages) > 0: + # NON-RHEL: more than THRESHOLD packages are NOT from Red Hat + ret = dict(other_linux='Unknown', faulty_packages=sorted(ng_pkgs)) + if vendor: + ret.update(kernel_vendor=vendor) + # try to get the release from kernel vendor + if 'red hat' not in vendor.lower(): + sep = ',' if ',' in vendor else ' ' + release = vendor.split(sep)[0].strip() + ret.update(other_linux=release) + return ret + # RHEL + return dict(other_linux='RHEL') + + +@combiner(optional=[Uname, DmesgLineList, InstalledRpms, + OsRelease, RedhatRelease]) +class OSRelease(object): + """ + A Combiner identifies whether the current Linux a Red Hat Enterprise Linux + or not. + + Examples: + >>> type(osr) + + >>> osr.is_rhel + False + >>> osr.release == "Oracle" + True + >>> sorted(osr.reasons.keys()) + ['build_info', 'faulty_packages', 'kernel', 'kernel_vendor'] + >>> 'version kernel-4.18.0-372.19.1.el8_6uek' in osr.reasons['build_info'] + True + >>> osr.reasons['kernel'] + '4.18.0-372.19.1.el8_6uek.x86_64' + >>> osr.reasons['kernel_vendor'] == 'Oracle America' + True + >>> 'glibc-2.28-211.el8' in osr.reasons['faulty_packages'] + True + """ + def __init__(self, uname, dmesg, rpms, osr, rhr): + def _update_other_linux(ret, data): + if data.get('other_linux') == 'Unknown' and 'other_linux' in ret: + # Don't update 'other_linux' to 'Unknown' if identified already + data.pop('other_linux') + ret.update(data) + return ret + + self._release = 'RHEL' + self._reasons = {} + _dmesg = dmesg.linux_version if dmesg else dmesg + if not list(filter(None, [uname, _dmesg, rpms])): + # When uname, dmesg, and rpms are all unavailable + if osr or rhr: + # Use 'os-release' and 'redhat-release + ret = _from_os_release(osr) if osr else dict() + ret.update(_from_redhat_release(rhr)) if rhr else None + if ret.get('other_linux', 'RHEL') != 'RHEL': + self._release = ret['other_linux'] + self._reasons = {'reason': 'NON-RHEL: os-release/redhat-release'} + else: + # Nothing means NON-RHEL + self._release = 'Unknown' + self._reasons = {'reason': 'Nothing available to check'} + else: + # Uname -> Dmesg -> RPMs + result = _from_uname(uname) if uname else dict() + if dmesg and dmesg.linux_version: + result.update(_update_other_linux(result, _from_dmesg(dmesg))) + if rpms: + result.update(_update_other_linux( + result, _from_installed_rpms(rpms, uname))) + # 'other_linux' means NON-RHEL + if 'other_linux' in result and result['other_linux'] != 'RHEL': + self._release = result.pop('other_linux') + self._reasons = result + + @property + def is_rhel(self): + """ + Returns True if it's RHEL, False for NON-RHEL. + """ + return self._release == 'RHEL' + + @property + def release(self): + """ + Returns the estimated release name of the running Linux. + """ + return self._release + + @property + def reasons(self): + """ + Returns a dict indicating why the host is a NON-RHEL. Empty when + it's an RHEL. The keys include:: + + kernel (str): the kernel package + build_info (str): the kernel build information + release (str): the release package + faulty_packages (list): the packages that are not signed and not + provided by Red Hat + reason (str): a string when nothing is available to check + """ + return self._reasons + + @property + def product(self): + """ Alias of `release`. Keep backward compatible """ + return self._release diff --git a/insights/combiners/package_provides_httpd.py b/insights/combiners/package_provides_httpd.py deleted file mode 100644 index 927841ea58..0000000000 --- a/insights/combiners/package_provides_httpd.py +++ /dev/null @@ -1,66 +0,0 @@ -""" -PackageProvidesHttpdAll - Combiner for packages which provide httpd -=================================================================== - -Combiner for collecting all the running httpd command and the corresponding RPM package name -which is parsed by the PackageProvidesHttpd parser. - -""" - -from insights.core.plugins import combiner -from insights.parsers.package_provides_httpd import PackageProvidesHttpd -from .. import LegacyItemAccess - - -@combiner(PackageProvidesHttpd) -class PackageProvidesHttpdAll(LegacyItemAccess): - """ - This combiner will receive a list of parsers named PackageProvidesHttpd, one for each running instance of httpd - and each parser instance will contain the command information and the RPM package information. - It works as a ``dict`` with the httpd command information as the key and the - corresponding RPM package information as the value. - - Examples: - >>> sorted(packages.running_httpds) - ['/opt/rh/httpd24/root/usr/sbin/httpd', '/usr/sbin/httpd'] - >>> packages.get_package("/usr/sbin/httpd") - 'httpd-2.4.6-88.el7.x86_64' - >>> packages.get("/opt/rh/httpd24/root/usr/sbin/httpd") - 'httpd24-httpd-2.4.34-7.el7.x86_64' - >>> packages["/usr/sbin/httpd"] - 'httpd-2.4.6-88.el7.x86_64' - - """ - - def __init__(self, package_provides_httpd): - self.data = {} - for pkg in package_provides_httpd: - self.data[pkg.command] = pkg.package - super(PackageProvidesHttpdAll, self).__init__() - - @property - def running_httpds(self): - """ - Returns the list of httpd commands which are running on the system. - """ - return list(self.data.keys()) - - def get_package(self, httpd_command): - """ - Returns the installed httpd package that provides the specified `httpd_command`. - - Parameters: - httpd_command (str): The specified httpd command, e.g. found in ``ps`` command. - - Returns: - (str): The package that provides the httpd command. - """ - - return self.data.get(httpd_command) - - @property - def packages(self): - """ - Returns the list of corresponding httpd RPM packages which are running on the system. - """ - return list(self.data.values()) diff --git a/insights/combiners/package_provides_java.py b/insights/combiners/package_provides_java.py deleted file mode 100644 index 8d59fc4761..0000000000 --- a/insights/combiners/package_provides_java.py +++ /dev/null @@ -1,66 +0,0 @@ -""" -PackageProvidesJavaAll - Combiner for packages which provide java -================================================================= - -Combiner for collecting all the java command and the corresponding package name -which is parsed by the PackageProvidesJava parser. - -""" - -from .. import LegacyItemAccess -from insights.core.plugins import combiner -from insights.parsers.package_provides_java import PackageProvidesJava - - -@combiner(PackageProvidesJava) -class PackageProvidesJavaAll(LegacyItemAccess): - """ - Combiner for collecting all the java command and the corresponding package - name which is parsed by the PackageProvidesJava parser. - It works as a ``dict`` with the java command as the key and the - corresponding package name as the value. - - Examples: - >>> PACKAGE_COMMAND_MATCH_1 = '''/usr/lib/jvm/jre/bin/java java-1.8.0-openjdk-headless-1.8.0.141-3.b16.el6_9.x86_64''' - >>> PACKAGE_COMMAND_MATCH_2 = '''/usr/lib/jvm/java-1.8.0-openjdk-headless-1.8.0.141-3.b16.el6_9.x86_64/bin/java java-1.8.0-openjdk-headless-1.8.0.141-3.b16.el6_9.x86_64''' - >>> pack1 = PackageProvidesJava(context_wrap(PACKAGE_COMMAND_MATCH_1)) - >>> pack2 = PackageProvidesJava(context_wrap(PACKAGE_COMMAND_MATCH_2)) - >>> shared = [{PackageProvidesJavaAll: [pack1, pack2]}] - >>> packages = shared[PackageProvidesJavaAll] - >>> packages.running_javas - ['/usr/lib/jvm/jre/bin/java', - '/usr/lib/jvm/java-1.8.0-openjdk-headless-1.8.0.141-3.b16.el6_9.x86_64/bin/java'] - >>> packages.get_package("/usr/lib/jvm/jre/bin/java") - 'java-1.8.0-openjdk-headless-1.8.0.141-3.b16.el6_9.x86_64' - >>> packages.get("/usr/lib/jvm/jre/bin/java") - 'java-1.8.0-openjdk-headless-1.8.0.141-3.b16.el6_9.x86_64' - >>> packages["/usr/lib/jvm/jre/bin/java"] - 'java-1.8.0-openjdk-headless-1.8.0.141-3.b16.el6_9.x86_64' - - """ - - def __init__(self, package_provides_java): - self.data = {} - for pkg in package_provides_java: - self.data[pkg.command] = pkg.package - super(PackageProvidesJavaAll, self).__init__() - - @property - def running_javas(self): - """ - Returns the list of java commands which are running on the system. - """ - return self.data.keys() - - def get_package(self, java_command): - """ - Returns the installed java package that provides the specified `java_command`. - - Parameters: - java_command (str): The specified java command, e.g. found in ``ps`` command. - - Returns: - (str): The package that provides the java command. - """ - - return self.data.get(java_command) diff --git a/insights/combiners/ps.py b/insights/combiners/ps.py index fcbae73e21..1105af6e31 100644 --- a/insights/combiners/ps.py +++ b/insights/combiners/ps.py @@ -6,6 +6,7 @@ More specifically this consolidates data from :py:class:`insights.parsers.ps.PsEo`, :py:class:`insights.parsers.ps.PsAuxcww`, +:py:class:`insights.parsers.ps.PsEoCmd`, :py:class:`insights.parsers.ps.PsEf`, :py:class:`insights.parsers.ps.PsAux`, :py:class:`insights.parsers.ps.PsAuxww` and @@ -21,8 +22,8 @@ Examples: - >>> ps_combiner.pids - [1, 2, 3, 8, 9, 10, 11, 12] + >>> sorted(ps_combiner.pids) + [1, 2, 3, 8, 9, 10, 11, 12, 13] >>> '[kthreadd]' in ps_combiner.commands True >>> '[kthreadd]' in ps_combiner @@ -53,10 +54,10 @@ from insights.core.plugins import combiner from insights.parsers import keyword_search -from insights.parsers.ps import PsAlxwww, PsAuxww, PsAux, PsAuxcww, PsEo, PsEf +from insights.parsers.ps import PsAlxwww, PsAuxww, PsAux, PsAuxcww, PsEo, PsEf, PsEoCmd -@combiner([PsAlxwww, PsAuxww, PsAux, PsEf, PsAuxcww, PsEo]) +@combiner([PsAlxwww, PsAuxww, PsAux, PsEf, PsAuxcww, PsEo, PsEoCmd]) class Ps(object): """ ``Ps`` combiner consolidates data from the parsers in ``insights.parsers.ps`` module. @@ -97,7 +98,7 @@ class Ps(object): 'WCHAN': None } - def __init__(self, ps_alxwww, ps_auxww, ps_aux, ps_ef, ps_auxcww, ps_eo): + def __init__(self, ps_alxwww, ps_auxww, ps_aux, ps_ef, ps_auxcww, ps_eo, ps_eo_cmd): self._pid_data = {} # order of parsers is important here @@ -105,6 +106,8 @@ def __init__(self, ps_alxwww, ps_auxww, ps_aux, ps_ef, ps_auxcww, ps_eo): self.__update_data(ps_eo) if ps_auxcww: self.__update_data(ps_auxcww) + if ps_eo_cmd: + self.__update_data(ps_eo_cmd) if ps_ef: # mapping configurations to combine PsEf data mapping = { @@ -222,9 +225,9 @@ def __iter__(self): def __update_data(self, ps_parser, mapping=None): """ Updates internal dictionary with the processes data from the parser. - New PIDs will be add added to the dictionary and existing ones - will be updated. ``mapping`` needs to specify attribute mapping - metadata for proper consolidation of data. + New PIDs will be added to the dictionary and existing ones will be + updated. ``mapping`` needs to specify attribute mapping metadata + for proper consolidation of data. Args: ps_parser (insights.parsers.ps.Ps): Ps parser implementation instance. @@ -240,8 +243,7 @@ def update_row(input_row, mapping): temp_row = self.__map_row(pid, input_row, mapping) pid_row.update(temp_row) - [update_row(row, mapping) - for row in ps_parser.data] + [update_row(row, mapping) for row in ps_parser.data if row['PID'].isdigit()] def __convert_data_types(self): """ @@ -257,9 +259,9 @@ def convert_attr(attr_name, row): row[attr_name] = type_ctor(row[attr_name]) [convert_attr(attr_name, row) - for attr_name in self.__CONVERSION_MAP - for row in self._pid_data.values() - if attr_name in row] + for attr_name in self.__CONVERSION_MAP + for row in self._pid_data.values() + if attr_name in row] def __map_row(self, pid, row, mapping): """ diff --git a/insights/combiners/redhat_release.py b/insights/combiners/redhat_release.py index 5185abd0f7..80987f2811 100644 --- a/insights/combiners/redhat_release.py +++ b/insights/combiners/redhat_release.py @@ -12,12 +12,12 @@ """ from collections import namedtuple + +from insights.core.exceptions import SkipComponent from insights.core.plugins import combiner +from insights.core.serde import deserializer, serializer from insights.parsers.redhat_release import RedhatRelease as rht_release from insights.parsers.uname import Uname -from insights.core.serde import serializer, deserializer -from insights.parsers import SkipComponent -from insights.util import deprecated Release = namedtuple("Release", field_names=["major", "minor"]) @@ -34,48 +34,6 @@ def deserialize(_type, obj, root=None): return Release(**obj) -@combiner([rht_release, Uname]) -def redhat_release(rh_release, un): - """ - .. warning:: - This combiner methode is deprecated, please use - :py:class:`insights.combiners.redhat_release.RedHatRelease` instead. - - Combiner method to check uname and redhat-release for rhel major/minor - version. - - Prefer uname to redhat-release. - - Returns: - Release: A named tuple with the following items: - - major: integer - - minor: integer - - Raises: - SkipComponent: If the version can't be determined even though a Uname - or RedhatRelease was provided. - - Examples: - >>> rh_release.major - 7 - >>> rh_release.minor - 2 - >>> rh_release - Release(major=7, minor=2) - - """ - - deprecated(redhat_release, "Use the `RedHatRelease` class instead.") - - if un and un.release_tuple[0] != -1: - return Release(*un.release_tuple) - - if rh_release: - return Release(rh_release.major, rh_release.minor) - - raise SkipComponent("Unabled to determine release.") - - @combiner([Uname, rht_release]) class RedHatRelease(object): """ @@ -90,6 +48,7 @@ class RedHatRelease(object): rhel6 (str): The RHEL version when it's RHEL6, otherwise None rhel7 (str): The RHEL version when it's RHEL7, otherwise None rhel8 (str): The RHEL version when it's RHEL8, otherwise None + rhel9 (str): The RHEL version when it's RHEL9, otherwise None Raises: SkipComponent: If the version can't be determined even though a Uname @@ -115,7 +74,7 @@ def __init__(self, uname, rh_rel): self.major = uname.redhat_release.major self.minor = uname.redhat_release.minor self.rhel = '{0}.{1}'.format(self.major, self.minor) - elif rh_rel and rh_rel.is_rhel: + elif rh_rel: self.major = rh_rel.major self.minor = rh_rel.minor self.rhel = rh_rel.version @@ -126,6 +85,7 @@ def __init__(self, uname, rh_rel): self.rhel6 = self.rhel if self.major == 6 else None self.rhel7 = self.rhel if self.major == 7 else None self.rhel8 = self.rhel if self.major == 8 else None + self.rhel9 = self.rhel if self.major == 9 else None @serializer(RedHatRelease) @@ -137,6 +97,7 @@ def serialize_RedHatRelease(obj, root=None): "rhel6": obj.rhel6, "rhel7": obj.rhel7, "rhel8": obj.rhel8, + "rhel9": obj.rhel9, } @@ -149,4 +110,5 @@ def deserialize_RedHatRelease(_type, obj, root=None): foo.rhel6 = obj.get("rhel6") foo.rhel7 = obj.get("rhel7") foo.rhel8 = obj.get("rhel8") + foo.rhel9 = obj.get("rhel9") return foo diff --git a/insights/combiners/rhel_for_edge.py b/insights/combiners/rhel_for_edge.py new file mode 100644 index 0000000000..066e5ef42e --- /dev/null +++ b/insights/combiners/rhel_for_edge.py @@ -0,0 +1,81 @@ +""" +Combiner for edge computing systems +=================================== +This combiner uses the following parsers to determine if the system is an edge computing systems. + +* :py:class:`insights.parsers.installed_rpms.InstalledRpms` +* :py:class:`insights.parsers.cmdline.CmdLine` +* :py:class:`insights.parsers.systemd.unitfiles.ListUnits` +* :py:class:`insights.parsers.redhat_release.RedhatRelease` +""" +from insights.core.exceptions import SkipComponent +from insights.core.plugins import combiner +from insights.parsers.cmdline import CmdLine +from insights.parsers.installed_rpms import InstalledRpms +from insights.parsers.systemd.unitfiles import ListUnits +from insights.parsers.redhat_release import RedhatRelease +from insights.parsers.rpm_ostree_status import RpmOstreeStatus + + +@combiner(ListUnits, optional=[RpmOstreeStatus, InstalledRpms, CmdLine, RedhatRelease]) +class RhelForEdge(object): + """Combiner for checking if the system is an edge computing system. Edge + computing as well as the Red Hat CoreOS packages are managed via rpm-ostree. + Use the string "Red Hat Enterprise Linux release" from + ``/etc/redhat-release`` to determine an edge computing system. The Red Hat + CoreOS system will have "Red Hat Enterprise Linux CoreOS release" as the + string. + + .. note:: + RHEL for EDGE is available and supported since RHEL 8.3. + + When an edge computing system (created from online console edge image) is + configured to use the automated management, the output of ``rhc status`` is + as below:: + + Connection status for : + - Connected to Red Hat Subscription Management + - The Red Hat connector daemon is active + + The ``rhcd.service`` running on an edge computing system signifies that it + is configured to use the automated management. + + Attributes: + is_edge (bool): True when it is an edge computing system + is_automated (bool): True when the the edge computing system is configured to use automated management + + .. note:: + It is possible to run ``rhcd.service`` on the edge systems created + from the cockpit edge image. The **is_automated** attribute is only for + front-end resolution surface. It is used when the edge image is from + the online console. + + Examples: + >>> type(rhel_for_edge_obj) + + >>> rhel_for_edge_obj.is_edge + True + >>> rhel_for_edge_obj.is_automated + True + + """ + + def __init__(self, units, rpmostreestatus, rpms, cmdline, redhatrelease): + self.is_edge = False + self.is_automated = False + if rpmostreestatus: + origin = rpmostreestatus.query.deployments.origin + origin_check = [item.value.endswith("edge") for item in origin] + if origin_check and all(origin_check): + self.is_edge = True + if units.is_running("rhcd.service"): + self.is_automated = True + elif rpms and cmdline and redhatrelease: + if ('rpm-ostree' in rpms and 'yum' not in rpms) and \ + ('ostree' in cmdline) and \ + ("red hat enterprise linux release" in redhatrelease.raw.lower()): + self.is_edge = True + if units.is_running("rhcd.service"): + self.is_automated = True + else: + raise SkipComponent("Unable to determine if this system is created from an edge image.") diff --git a/insights/combiners/rsyslog_confs.py b/insights/combiners/rsyslog_confs.py new file mode 100644 index 0000000000..ae6b32709f --- /dev/null +++ b/insights/combiners/rsyslog_confs.py @@ -0,0 +1,43 @@ +""" +RsyslogConfAll - files ``/etc/rsyslog.conf`` and ``/etc/rsyslog.d/*.conf`` +========================================================================== + +Combiner for accessing all the rsyslog comfiguration files. There may be +multiple rsyslog configuration, and the main configuration file is +``/etc/rsyslog.conf``. This combiner will not check same option in multi +files, user needs to check this situation in plugin if it is necessary. + +""" +from insights.core.plugins import combiner +from insights.parsers.rsyslog_conf import RsyslogConf + + +@combiner(RsyslogConf) +class RsyslogAllConf(dict): + """ + Combiner for accessing all the rsyslog configuration files. + + Examples: + >>> type(confs) + + >>> len(confs) + 2 + >>> confs['/etc/rsyslog.conf'][0] + '$ModLoad imuxsock' + """ + def __init__(self, confs): + super(RsyslogAllConf, self).__init__() + data = {} + + # Combine rsyslog configuration files into one dict. Key is file name, value is content of configuration file. + for conf in confs: + if conf.file_path == "/etc/rsyslog.conf": + # Check if there is include option, if not, only parse /etc/rsyslog.conf even + # /etc/rsyslog.d/*.conf exist. + if not any(["include(" in item or "$IncludeConfig" in item for item in conf]): + data.clear() + data[conf.file_path] = conf + break + data[conf.file_path] = conf + + self.update(data) diff --git a/insights/combiners/sap.py b/insights/combiners/sap.py index 3c51a3e7a2..bf482bf66b 100644 --- a/insights/combiners/sap.py +++ b/insights/combiners/sap.py @@ -4,24 +4,41 @@ This combiner combines the result of insights.parsers.saphostctrl.SAPHostCtrlInstances` and `:class:`insights.parsers.lssap.Lssap` to get the available SAP instances. -Prefer the ``SAPHostCtrlInstances`` to ``Lssap``. +Prefer the ``SAPHostCtrlInstances`` to ``Lssap``. """ from collections import namedtuple from insights import SkipComponent from insights.core.plugins import combiner -from insights.combiners.hostname import hostname +from insights.combiners.hostname import Hostname from insights.parsers.lssap import Lssap from insights.parsers.saphostctrl import SAPHostCtrlInstances SAPInstances = namedtuple("SAPInstances", - field_names=["name", "hostname", "sid", "type", "number", "fqdn", "version"]) + field_names=["name", "hostname", "sid", "type", "full_type", "number", "fqdn", "version"]) """namedtuple: Type for storing the SAP instance.""" +FUNC_FULL_TYPES = [ + 'Solution Manager Diagnostic Agent', + 'Diagnostic Agent' +] +NETW_TYPES = ('D', 'ASCS', 'DVEBMGS', 'J', 'SCS', 'ERS', 'W', 'G', 'JC') +""" +D : NetWeaver (ABAP Dialog Instance) +ASCS : NetWeaver (ABAP Central Services) +DVEBMGS: NetWeaver (Primary Application server) +J : NetWeaver (Java App Server Instance) +SCS : NetWeaver (Java Central Services) +ERS : NetWeaver (Enqueue Replication Server) +W : NetWeaver (WebDispatcher) +G : NetWeaver (Gateway) +JC : NetWeaver (Java App Server Instance) +""" -@combiner(hostname, [SAPHostCtrlInstances, Lssap]) + +@combiner(Hostname, [SAPHostCtrlInstances, Lssap]) class Sap(dict): """ Combiner for combining the result of :class:`insights.parsers.lssap.Lssap` @@ -59,11 +76,11 @@ class Sap(dict): E.g. HANA, NetWeaver, ASCS, or others local_instances (list): List of all SAP instances running on this host """ - FUNC_INSTS = ('SMDA',) """ tuple: Tuple of the prefix string of the functional SAP instances""" def __init__(self, hostname, insts, lssap): hn = hostname.hostname + fqdn = hostname.fqdn data = {} self.local_instances = [] self.business_instances = [] @@ -71,14 +88,18 @@ def __init__(self, hostname, insts, lssap): self.all_instances = [] self._types = set() if insts: - for inst in insts.data: + self._types = insts.types + self.all_instances = insts.instances + for inst in insts: k = inst['InstanceName'] - self.all_instances.append(k) - self._types.add(inst['InstanceType']) - self.local_instances.append(k) if hn == inst['Hostname'] else None + if (hn == inst['Hostname'].split('.')[0] or + fqdn == inst['FullQualifiedHostname'] or + fqdn == inst['Hostname']): + self.local_instances.append(k) data[k] = SAPInstances(k, inst['Hostname'], inst['SID'], + inst['InstanceName'].strip('1234567890'), inst['InstanceType'], inst['SystemNumber'], inst['FullQualifiedHostname'], @@ -89,11 +110,12 @@ def __init__(self, hostname, insts, lssap): t = k.rstrip('1234567890') self.all_instances.append(k) self._types.add(t) - self.local_instances.append(k) if hn == inst['SAPLOCALHOST'] else None + self.local_instances.append(k) if hn == inst['SAPLOCALHOST'].split('.')[0] else None data[k] = SAPInstances(k, inst['SAPLOCALHOST'], inst['SID'], t, + t, # Use short inst['Nr'], None, inst['Version']) @@ -102,8 +124,10 @@ def __init__(self, hostname, insts, lssap): self.update(data) - for i in self.all_instances: - (self.function_instances if i.startswith(self.FUNC_INSTS) else self.business_instances).append(i) + for i in self.values(): + (self.function_instances + if i.full_type in FUNC_FULL_TYPES else + self.business_instances).append(i.name) def version(self, instance): """str: Returns the version of the ``instance``.""" @@ -114,9 +138,13 @@ def sid(self, instance): return self[instance].sid if instance in self else None def type(self, instance): - """str: Returns the type code of the ``instance``.""" + """str: Returns the short type code of the ``instance``.""" return self[instance].type if instance in self else None + def full_type(self, instance): + """str: Returns the full type code of the ``instance``.""" + return self[instance].full_type if instance in self else None + def hostname(self, instance): """str: Returns the hostname of the ``instance``.""" return self[instance].hostname if instance in self else None @@ -128,7 +156,7 @@ def number(self, instance): @property def is_netweaver(self): """bool: Is any SAP NetWeaver instance detected?""" - return 'D' in self._types + return any(_t in self._types for _t in NETW_TYPES) @property def is_hana(self): @@ -137,7 +165,7 @@ def is_hana(self): @property def is_ascs(self): - """bool: Is any SAP System Central Services instance detected?""" + """bool: Is any ABAP Central Services instance detected?""" return 'ASCS' in self._types @property diff --git a/insights/combiners/satellite_version.py b/insights/combiners/satellite_version.py index ae282cad03..8949da9ab0 100644 --- a/insights/combiners/satellite_version.py +++ b/insights/combiners/satellite_version.py @@ -168,7 +168,7 @@ def __init__(self, rpms, sat6_ver): raise SkipComponent("Not a Satellite machine or unable to determine Satellite version") -@combiner(InstalledRpms) +@combiner(InstalledRpms, optional=[SatelliteVersion]) class CapsuleVersion(object): """ Check the parser @@ -210,17 +210,18 @@ class CapsuleVersion(object): >>> cap_ver.release '1.el7sat' """ - def __init__(self, rpms): + def __init__(self, rpms, sat_server): self.full = None self.version = None self.release = None self.major = None self.minor = None + if sat_server: + raise SkipComponent('Not a Satellite Capsule machine') # For Capsule, ONLY 6.2 and newer are supported sat62_pkg = rpms.get_max('satellite-capsule') - # foreman package should not be there on Capsule Server - if sat62_pkg and 'foreman' not in rpms: + if sat62_pkg: self.full = sat62_pkg.package self.version = sat62_pkg.version self.release = sat62_pkg.release diff --git a/insights/combiners/smt.py b/insights/combiners/smt.py index 467b9aff25..25e5ef7909 100644 --- a/insights/combiners/smt.py +++ b/insights/combiners/smt.py @@ -43,7 +43,13 @@ def __init__(self, cpu_online, cpu_siblings): max_cpu_core_id = max([core.core_id for core in cpu_online]) for n in range(max_cpu_core_id + 1): online = [core for core in cpu_online if core.core_id == n] - online = online[0].on + # On some boxes cpu0 doesn't have the online file, since technically cpu0 will always + # be online. So check if online returns anything before trying to access online[0]. + # If it returns nothing and n is 0 set online to True. + if online: + online = online[0].on + elif not online and n == 0: + online = True siblings = [sibling for sibling in cpu_siblings if sibling.core_id == n] if len(siblings) != 0: siblings = siblings[0].siblings diff --git a/insights/combiners/ssl_certificate.py b/insights/combiners/ssl_certificate.py new file mode 100644 index 0000000000..8ccb0014f5 --- /dev/null +++ b/insights/combiners/ssl_certificate.py @@ -0,0 +1,99 @@ +""" +Combiners for getting the earliest expiry date from a lot of SSL certificates +============================================================================= + +This module contains the following combiners: + +EarliestNginxSSLCertExpireDate - The earliest expire date in a lot of nginx ssl certificates +-------------------------------------------------------------------------------------------- +Combiner to get the earliest expire date in a lot of nginx ssl certificates. + +EarliestHttpdSSLCertExpireDate - The earliest expire date in a lot of httpd ssl certificates +-------------------------------------------------------------------------------------------- +Combiner to get the earliest expire date in a lot of httpd ssl certificates. + +EarliestHttpdCertInNSSExpireDate - The earliest expire date in a lot of httpd certificates stored in nss database +----------------------------------------------------------------------------------------------------------------- +Combiner to get the earliest expire date in a lot of httpd certificates stored in nss database. +""" +from insights.core.exceptions import SkipComponent +from insights.core.plugins import combiner +from insights.parsers.certificates_enddate import CertificatesEnddate +from insights.parsers.ssl_certificate import HttpdCertInfoInNSS, NginxSSLCertExpireDate, HttpdSSLCertExpireDate + + +class EarliestSSLCertExpireDate(object): + """ + The base class to get the earliest expiry date from a lot of :class:`insights.parsers.ssl_certificate.CertificateInfo` instances. + + Attributes: + earliest_expire_date (str): The earliest expire date in string format. + ssl_cert_path (str): The SSL certificate path which is expired first. + + Examples: + >>> type(ssl_certs) + + >>> ssl_certs.earliest_expire_date.str + 'Dec 18 07:02:43 2021' + >>> ssl_certs.ssl_cert_path + '/test/b.pem' + """ + def __init__(self, certificate_info_list): + self.earliest_expire_date = None + self.ssl_cert_path = None + for ssl_cert_expiry_date in certificate_info_list: + if (self.earliest_expire_date is None or + (isinstance(ssl_cert_expiry_date.get('notAfter', ''), CertificatesEnddate.ExpirationDate) and + ssl_cert_expiry_date['notAfter'].datetime < self.earliest_expire_date.datetime)): + self.earliest_expire_date = ssl_cert_expiry_date['notAfter'] + self.ssl_cert_path = ssl_cert_expiry_date.cert_path + if self.earliest_expire_date is None: + raise SkipComponent + + +@combiner(NginxSSLCertExpireDate) +class EarliestNginxSSLCertExpireDate(EarliestSSLCertExpireDate): + """ + Combiner to get the earliest expire date in a lot of nginx ssl certificates. + + Examples: + >>> type(nginx_certs) + + >>> nginx_certs.earliest_expire_date.str + 'Dec 18 07:02:43 2021' + >>> nginx_certs.ssl_cert_path + '/test/d.pem' + """ + pass + + +@combiner(HttpdSSLCertExpireDate) +class EarliestHttpdSSLCertExpireDate(EarliestSSLCertExpireDate): + """ + Combiner to get the earliest expire date in a lot of httpd ssl certificates. + + Examples: + >>> type(httpd_certs) + + >>> httpd_certs.earliest_expire_date.str + 'Dec 18 07:02:43 2021' + >>> httpd_certs.ssl_cert_path + '/test/d.pem' + """ + pass + + +@combiner(HttpdCertInfoInNSS) +class EarliestHttpdCertInNSSExpireDate(EarliestSSLCertExpireDate): + """ + Combiner to get the earliest expire date in a lot of httpd certificates stored in NSS database. + + Examples: + >>> type(httpd_certs_in_nss) + + >>> httpd_certs_in_nss.earliest_expire_date.str + 'Sun Jan 07 05:26:10 2022' + >>> httpd_certs_in_nss.ssl_cert_path + ('/etc/httpd/nss', 'testcerta') + """ + pass diff --git a/insights/combiners/sudoers.py b/insights/combiners/sudoers.py new file mode 100644 index 0000000000..e863e8d028 --- /dev/null +++ b/insights/combiners/sudoers.py @@ -0,0 +1,51 @@ +""" +Sudoers - files ``/etc/sudoers`` or ``/etc/sudoers.d/*`` +======================================================== + +Module for combining the parsing results of ``/etc/sudoers`` and +``/etc/sudoers.d/*`` files. +""" +from insights import combiner +from insights.parsers.sudoers import SudoersBase, EtcSudoers + + +@combiner(EtcSudoers) +class Sudoers(SudoersBase): + """ + Class to combiner the ``/etc/sudoers`` and ``/etc/sudoers.d/*`` + + Attributes: + lines(list): The list of RAW lines of all the ``/etc/sudoers`` and + ``/etc/sudoers.d/*`` files. The order of lines keeps their original + order them in files. And the files are read with `"filename`" + alphabetical order. + + .. note:: + 1. If there is not `"#includedir /etc/sudoers.d"` line in the entry + file ``/etc/sudoers``, the ``/etc/sudoers.d/*`` files will be + skipped. + + 2. Two helper functions :func:`insights.parsers.sudoers.SudoersBase.get()` + and :func:`insights.parsers.sudoers.SudoersBase.last()` are also + provided to quickly get the specified line(s). + For details, see the super-class: + :class:`insights.parsers.sudoers.SudoersBase`. + + Examples: + >>> type(sudo) + + >>> sudo.get(['wheel', 'ALL=(ALL)', 'ALL']) + ['%wheel ALL=(ALL) ALL'] + >>> sudo.last("#includedir") + '#includedir /etc/sudoers.d' + """ + def __init__(self, sudoers): + self.lines = [] + first = False + for sdr in sorted(sudoers, key=lambda x: x.file_path): + self.lines.extend(sdr.lines) + if not first: + first = True + include = sdr.last('#includedir') + if not include or '/etc/sudoers.d' not in include.split()[-1]: + break diff --git a/insights/combiners/sys_vmbus_devices.py b/insights/combiners/sys_vmbus_devices.py new file mode 100644 index 0000000000..e3b7d133ce --- /dev/null +++ b/insights/combiners/sys_vmbus_devices.py @@ -0,0 +1,48 @@ +""" +VMBus device info +================= +""" +from insights.core.plugins import combiner +from insights.parsers.sys_vmbus import SysVmbusDeviceID, SysVmbusClassID + + +@combiner(SysVmbusDeviceID, SysVmbusClassID) +class SysVmBusDeviceInfo(object): + ''' + Combiner to access all the VMBus devices. + + Attributes: + devices (list): The list is dict. + + Sample output:: + + [ + { + 'device_id': '47505500-0001-0000-3130-444531444234', + 'class_id': '44c4f61d-4444-4400-9d52-802e27ede19f', + 'description': 'PCI Express pass-through' + } + ] + + Examples: + >>> len(output.devices) + 2 + >>> output.devices[0].get('device_id', '') + '47505500-0001-0000-3130-444531444234' + >>> output.devices[0].get('class_id', '') + '44c4f61d-4444-4400-9d52-802e27ede19f' + >>> output.devices[0].get('description', '') + 'PCI Express pass-through' + ''' + def __init__(self, device_id, class_id): + self.devices = [] + for d in device_id: + for c in class_id: + if d.id in c.file_path: + self.devices.append( + { + 'device_id': d.id, + 'class_id': c.id, + 'description': c.desc + } + ) diff --git a/insights/combiners/sysctl_conf.py b/insights/combiners/sysctl_conf.py new file mode 100644 index 0000000000..f182064d4c --- /dev/null +++ b/insights/combiners/sysctl_conf.py @@ -0,0 +1,71 @@ +""" +Sysctl configuration files +========================== +""" +import re + +from insights.core.plugins import combiner +from insights.parsers.sysctl import SysctlConf, SysctlDConfEtc, SysctlDConfUsr + + +@combiner([SysctlConf, SysctlDConfUsr, SysctlDConfEtc]) +class SysctlConfs(dict): + """ + Combiner for accessing all the sysctl configuration files in one + structure. + + Sample input:: + + # sysctl.conf sample + # + kernel.domainname = example.com + + ; this one has a space which will be written to the sysctl! + kernel.modprobe = /sbin/mod probe + + Attributes: + search(dict): Returns a dict of any kv pairs where the + contains the search word. + + Examples: + >>> type(sysctl_conf) + + >>> sysctl_conf['kernel.domainname'] + 'example.com' + >>> sysctl_conf['kernel.modprobe'] + '/sbin/mod probe' + >>> sysctl_conf['kernel.sysrq'] + '1' + >>> "vm.dirty_ratio" in sysctl_conf + True + >>> sysctl_conf.search("domainname") + {'kernel.domainname': 'example.com'} + """ + def __init__(self, sysctl_conf, sysctl_d_confs_usr, sysctl_d_confs_etc): + super(SysctlConfs, self).__init__() + if sysctl_d_confs_usr: + # Sort based on the filename to make sure + # entries are overridden in the correct order. + sysctl_d_confs_usr = sorted(sysctl_d_confs_usr, key=lambda x: x.file_name) + + for conf in sysctl_d_confs_usr: + self.update(conf) + + if sysctl_d_confs_etc: + # Sort based on the filename to make sure + # entries are overridden in the correct order. + sysctl_d_confs_etc = sorted(sysctl_d_confs_etc, key=lambda x: x.file_name) + + for conf in sysctl_d_confs_etc: + self.update(conf) + + if sysctl_conf: + self.update(sysctl_conf) + + def search(self, s_word): + found = dict() + for key, val in self.items(): + if re.search(s_word, key): + found[key] = val + + return found diff --git a/insights/combiners/tests/test_httpd_conf.py b/insights/combiners/tests/test_httpd_conf.py deleted file mode 100644 index b402b8ba48..0000000000 --- a/insights/combiners/tests/test_httpd_conf.py +++ /dev/null @@ -1,494 +0,0 @@ -from insights.parsers.httpd_conf import HttpdConf, ParsedData -from insights.combiners.httpd_conf import HttpdConfAll -from insights.tests import context_wrap - -HTTPD_CONF_1 = ''' -JustFotTest_NoSec "/var/www/cgi" -# prefork MPM - -ServerLimit 256 -ThreadsPerChild 16 -JustForTest "AB" -MaxClients 256 - - -IncludeOptional conf.d/*.conf -'''.strip() - -HTTPD_CONF_2 = ''' -JustForTest_NoSec "/var/www/cgi" -# prefork MPM - -ServerLimit 1024 -JustForTest "ABC" -MaxClients 1024 - -'''.strip() - -HTTPD_CONF_3 = ''' -# prefork MPM - -ServerLimit 256 -MaxClients 512 - -'''.strip() - -HTTPD_CONF_SHADOWTEST_1 = ''' -Foo 1A -Foo 1B -Foo 1C - -Foo 1xA -Foo 1xB -Foo 1xC -Bar 1A -Bar 1B -Bar 1C - - -IncludeOptional conf.d/*.conf -'''.strip() - -HTTPD_CONF_SHADOWTEST_2 = ''' -Foo 2A -Foo 2B -Foo 2C - -Foo 2xA -Foo 2xB -Foo 2xC -Bar 2A -Bar 2B -Bar 2C - -'''.strip() - -HTTPD_CONF_SHADOWTEST_3 = ''' -Foo 3A -Foo 3B -Foo 3C - -Foo 3xA -Foo 3xB -Foo 3xC -Bar 3A -Bar 3B -Bar 3C - -'''.strip() - - -HTTPD_CONF_MAIN_1 = ''' -ServerRoot "/etc/httpd" -Listen 80 - -# Load config files in the "/etc/httpd/conf.d" directory, if any. -IncludeOptional conf.d/*.conf -'''.strip() - -HTTPD_CONF_MAIN_2 = ''' -# Load config files in the "/etc/httpd/conf.d" directory, if any. -IncludeOptional conf.d/*.conf - -ServerRoot "/etc/httpd" -Listen 80 -'''.strip() - -HTTPD_CONF_MAIN_3 = ''' -ServerRoot "/etc/httpd" - -# Load config files in the "/etc/httpd/conf.d" directory, if any. -IncludeOptional conf.d/*.conf - -Listen 80 -'''.strip() - -HTTPD_CONF_FILE_1 = ''' -ServerRoot "/home/skontar/httpd" -Listen 8080 -'''.strip() - -HTTPD_CONF_FILE_2 = ''' -ServerRoot "/home/skontar/www" -'''.strip() - -HTTPD_CONF_MORE = ''' -UserDir disable -UserDir enable bob -'''.strip() - -HTTPD_CONF_NEST_1 = """ - - - Options FollowSymLinks - AllowOverride None - - - php_admin_flag safe_mode Off - php_admin_value register_globals 0 - - DirectoryIndex index.php - - RewriteEngine On - RewriteRule .* /index.php - - - RewriteEngine Off - - - - - Order allow,deny - Deny from all - - - Order deny,allow - - - - DocumentRoot /var/www/example - ServerName www.example.com - ServerAlias admin.example.com - - - - - - Order allow,deny - Deny from all - - - - - - RewriteEngine Off - -LogLevel warn -DocumentRoot "/var/www/html_cgi" -IncludeOptional conf.d/*.conf -EnableSendfile on -""".strip() - -HTTPD_CONF_NEST_2 = """ -DocumentRoot "/var/www/html" - - - - - Order allow,deny - Deny from all - - - Order deny,allow - - - - DocumentRoot /var/www/example1 - ServerName www.example1.com - ServerAlias admin.example1.com - - - - - - Order deny,allow - Allow from all - - - Order deny,allow - - - - - - RewriteEngine On - -EnableSendfile off -""".strip() - -HTTPD_CONF_NEST_3 = """ - - - Testphp php5_v3_1 - - Testphp php4_v3_1 - - Testphp php5_v3_2 - - - - Testphp php5_3_a - - Testphp php4_3_a - - -""".strip() - -HTTPD_CONF_NEST_4 = """ - - - Testphp php5_v4_1 - - Testphp php4_v4_1 - - Testphp php5_v4_2 - - - - Testphp php5_4_b - - Testphp php4_4_b - - -""".strip() - - -def test_active_httpd_directory(): - httpd1 = HttpdConf(context_wrap(HTTPD_CONF_NEST_1, path='/etc/httpd/conf/httpd.conf')) - httpd2 = HttpdConf(context_wrap(HTTPD_CONF_NEST_2, path='/etc/httpd/conf.d/00-z.conf')) - result = HttpdConfAll([httpd1, httpd2]) - assert result.get_section_list("Directory") == [(('Directory', '/var/www/example'), 'httpd.conf', '/etc/httpd/conf/httpd.conf')] - assert result.get_section_list("asdf") == [] - assert result.get_section_list(123456) == [] - - -def test_active_httpd_nest_1(): - httpd1 = HttpdConf(context_wrap(HTTPD_CONF_NEST_1, path='/etc/httpd/conf/httpd.conf')) - httpd2 = HttpdConf(context_wrap(HTTPD_CONF_NEST_2, path='/etc/httpd/conf.d/00-z.conf')) - result = HttpdConfAll([httpd1, httpd2]) - assert result.get_setting_list('Order1', ('FilesMatch', 'php')) == [] - assert result.get_setting_list('Order', ('FilesMatch', 'pdf')) == [] - php_fm_order = result.get_setting_list('Order', section=('FilesMatch', 'php')) - assert { - ('FilesMatch', '".php[45]?$"'): [ - ('allow,deny', 'Order allow,deny', 'FilesMatch', '".php[45]?$"', '00-z.conf', '/etc/httpd/conf.d/00-z.conf'), - ('deny,allow', 'Order deny,allow', 'FilesMatch', '".php[45]?$"', '00-z.conf', '/etc/httpd/conf.d/00-z.conf')] - } in php_fm_order - assert { - ('FilesMatch', '".php[45]"'): [ - ('allow,deny', 'Order allow,deny', 'FilesMatch', '".php[45]"', 'httpd.conf', '/etc/httpd/conf/httpd.conf'), - ('deny,allow', 'Order deny,allow', 'FilesMatch', '".php[45]"', '00-z.conf', '/etc/httpd/conf.d/00-z.conf')], - } in php_fm_order - assert { - ('FilesMatch', '".php[45]?$"'): [ - ('allow,deny', 'Order allow,deny', 'FilesMatch', '".php[45]?$"', 'httpd.conf', '/etc/httpd/conf/httpd.conf'), - ('deny,allow', 'Order deny,allow', 'FilesMatch', '".php[45]?$"', 'httpd.conf', '/etc/httpd/conf/httpd.conf')] - } in php_fm_order - re_im = result.get_setting_list('RewriteEngine', 'IfModule') - assert { - ('IfModule', 'mod_rewrite.c'): [ - ('On', 'RewriteEngine On', 'IfModule', 'mod_rewrite.c', 'httpd.conf', '/etc/httpd/conf/httpd.conf'), - ('Off', 'RewriteEngine Off', 'IfModule', 'mod_rewrite.c', 'httpd.conf', '/etc/httpd/conf/httpd.conf')] - } in re_im - assert { - ('IfModule', 'mod_rewrite.c'): [ - ('Off', 'RewriteEngine Off', 'IfModule', 'mod_rewrite.c', 'httpd.conf', '/etc/httpd/conf/httpd.conf'), - ('On', 'RewriteEngine On', 'IfModule', 'mod_rewrite.c', '00-z.conf', '/etc/httpd/conf.d/00-z.conf')] - } in re_im - assert sorted(result.get_setting_list('EnableSendfile')) == sorted([ - ('off', 'EnableSendfile off', None, None, '00-z.conf', '/etc/httpd/conf.d/00-z.conf'), - ('on', 'EnableSendfile on', None, None, 'httpd.conf', '/etc/httpd/conf/httpd.conf')]) - assert result.get_setting_list('LogLevel') == [ - ('warn', 'LogLevel warn', None, None, 'httpd.conf', '/etc/httpd/conf/httpd.conf')] - assert result.get_setting_list('LogLevel1') == [] - - assert result.get_active_setting('Order1', ('FilesMatch', 'php')) == [] - assert result.get_active_setting('Order', ('FilesMatch', 'pdf')) == [] - assert len(result.get_active_setting('Order', ('FilesMatch', '.php[45]?$'))) == 2 - assert len(result.get_active_setting('Order', ('FilesMatch',))) == 4 - assert len(result.get_active_setting('Order', ('FilesMatch', '.php[45]'))) == 3 - assert sorted(result.get_active_setting('Order', section=('FilesMatch', 'php'))) == sorted([ - ('deny,allow', 'Order deny,allow', 'FilesMatch', '".php[45]?$"', '00-z.conf', '/etc/httpd/conf.d/00-z.conf'), - ('deny,allow', 'Order deny,allow', 'FilesMatch', '".php[45]"', '00-z.conf', '/etc/httpd/conf.d/00-z.conf'), - ('deny,allow', 'Order deny,allow', 'FilesMatch', '".php[45]?$"', 'httpd.conf', '/etc/httpd/conf/httpd.conf')]) - assert sorted(result.get_active_setting('RewriteEngine', section='IfModule')) == sorted([ - ('Off', 'RewriteEngine Off', 'IfModule', 'mod_rewrite.c', 'httpd.conf', '/etc/httpd/conf/httpd.conf'), - ('On', 'RewriteEngine On', 'IfModule', 'mod_rewrite.c', '00-z.conf', '/etc/httpd/conf.d/00-z.conf')]) - assert result.get_active_setting('EnableSendfile').line == 'EnableSendfile on' - assert result.get_active_setting('Deny', ('FilesMatch', 'test')) == [] - assert result.get_active_setting('Allow', ('FilesMatch', 'test'))[0].value == 'from all' - assert result.get_active_setting('Deny', section=('IfModule',)) == [] - assert result.get_active_setting('MaxClients', section=('IfModule', 'prefork')) == [] - assert result.get_active_setting('RewriteRule', section=('IfModule', 'mod_rewrite.c'))[0].line == "RewriteRule .* /index.php" - assert result.get_active_setting("DocumentRoot").value == '/var/www/html' - assert result.get_active_setting('RewriteRule', section=('IfModule', 'mod_rewrite.c', 'invalid_test')) == [] - assert result.get_active_setting('LogLevel') == ('warn', 'LogLevel warn', None, None, 'httpd.conf', '/etc/httpd/conf/httpd.conf') - assert result.get_active_setting('LogLevel1') is None - - -def test_active_httpd_nest_2(): - httpd1 = HttpdConf(context_wrap(HTTPD_CONF_NEST_3, path='/etc/httpd/conf/httpd.conf')) - httpd2 = HttpdConf(context_wrap(HTTPD_CONF_NEST_4, path='/etc/httpd/conf.d/00-z.conf')) - result = HttpdConfAll([httpd1, httpd2]) - testphp_im = result.get_setting_list('Testphp', 'IfModule') - assert {('IfModule', '!php5_module'): [ - ('php5_v3_1', 'Testphp php5_v3_1', 'IfModule', '!php5_module', 'httpd.conf', '/etc/httpd/conf/httpd.conf'), - ('php5_v3_2', 'Testphp php5_v3_2', 'IfModule', '!php5_module', 'httpd.conf', '/etc/httpd/conf/httpd.conf') - ]} in testphp_im - assert {('IfModule', '!php4_module'): [ - ('php4_v3_1', 'Testphp php4_v3_1', 'IfModule', '!php4_module', 'httpd.conf', '/etc/httpd/conf/httpd.conf') - ]} in testphp_im - assert {('IfModule', '!php5_module'): [ - ('php5_v4_1', 'Testphp php5_v4_1', 'IfModule', '!php5_module', '00-z.conf', '/etc/httpd/conf.d/00-z.conf'), - ('php5_v4_2', 'Testphp php5_v4_2', 'IfModule', '!php5_module', '00-z.conf', '/etc/httpd/conf.d/00-z.conf') - ]} in testphp_im - assert {('IfModule', '!php4_module'): [ - ('php4_v4_1', 'Testphp php4_v4_1', 'IfModule', '!php4_module', '00-z.conf', '/etc/httpd/conf.d/00-z.conf') - ]} in testphp_im - assert {('IfModule', '!php5_module'): [ - ('php5_3_a', 'Testphp php5_3_a', 'IfModule', '!php5_module', 'httpd.conf', '/etc/httpd/conf/httpd.conf'), - ('php5_4_b', 'Testphp php5_4_b', 'IfModule', '!php5_module', '00-z.conf', '/etc/httpd/conf.d/00-z.conf') - ]} in testphp_im - assert {('IfModule', '!php4_module'): [ - ('php4_3_a', 'Testphp php4_3_a', 'IfModule', '!php4_module', 'httpd.conf', '/etc/httpd/conf/httpd.conf'), - ('php4_4_b', 'Testphp php4_4_b', 'IfModule', '!php4_module', '00-z.conf', '/etc/httpd/conf.d/00-z.conf') - ]} in testphp_im - - -def test_active_httpd(): - httpd1 = HttpdConf(context_wrap(HTTPD_CONF_1, path='/etc/httpd/conf/httpd.conf')) - httpd2 = HttpdConf(context_wrap(HTTPD_CONF_2, path='/etc/httpd/conf.d/00-z.conf')) - httpd3 = HttpdConf(context_wrap(HTTPD_CONF_3, path='/etc/httpd/conf.d/z-z.conf')) - - result = HttpdConfAll([httpd1, httpd2, httpd3]) - assert result.get_active_setting('MaxClients', section=('IfModule', 'prefork.c'))[0].value == '512' - assert result.get_active_setting('MaxClients', section=('IfModule', 'prefork.c'))[0].file_path == '/etc/httpd/conf.d/z-z.conf' - assert result.get_active_setting('ThreadsPerChild', section=('IfModule', - 'prefork.c'))[0].value == '16' - assert result.get_active_setting("MaxClients", ("IfModule", "prefork")) == [ - ParsedData(value='512', line='MaxClients 512', - section='IfModule', section_name='prefork.c', - file_name='z-z.conf', file_path='/etc/httpd/conf.d/z-z.conf')] - assert result.get_active_setting('ServerLimit', section=('IfModule', 'prefork.c'))[0].value == '256' - assert result.get_active_setting('JustForTest', section=('IfModule', 'prefork.c'))[-1].file_name == '00-z.conf' - assert result.get_active_setting('JustForTest_NoSec').line == 'JustForTest_NoSec "/var/www/cgi"' - - -def test_shadowing(): - httpd1 = HttpdConf(context_wrap(HTTPD_CONF_SHADOWTEST_1, path='/etc/httpd/conf/httpd.conf')) - httpd2 = HttpdConf(context_wrap(HTTPD_CONF_SHADOWTEST_2, path='/etc/httpd/conf.d/00-z.conf')) - httpd3 = HttpdConf(context_wrap(HTTPD_CONF_SHADOWTEST_3, path='/etc/httpd/conf.d/z-z.conf')) - - result = HttpdConfAll([httpd1, httpd2, httpd3]) - - # get_setting_list returns ALL matching data - - assert result.get_setting_list('Foo') == [ - ParsedData('1A', 'Foo 1A', None, None, 'httpd.conf', '/etc/httpd/conf/httpd.conf'), - ParsedData('1B', 'Foo 1B', None, None, 'httpd.conf', '/etc/httpd/conf/httpd.conf'), - ParsedData('1C', 'Foo 1C', None, None, 'httpd.conf', '/etc/httpd/conf/httpd.conf'), - ParsedData('2A', 'Foo 2A', None, None, '00-z.conf', '/etc/httpd/conf.d/00-z.conf'), - ParsedData('2B', 'Foo 2B', None, None, '00-z.conf', '/etc/httpd/conf.d/00-z.conf'), - ParsedData('2C', 'Foo 2C', None, None, '00-z.conf', '/etc/httpd/conf.d/00-z.conf'), - ParsedData('3A', 'Foo 3A', None, None, 'z-z.conf', '/etc/httpd/conf.d/z-z.conf'), - ParsedData('3B', 'Foo 3B', None, None, 'z-z.conf', '/etc/httpd/conf.d/z-z.conf'), - ParsedData('3C', 'Foo 3C', None, None, 'z-z.conf', '/etc/httpd/conf.d/z-z.conf'), - ] - assert result.get_setting_list('Bar', section=('IfModule', 'prefork.c')) == [ - {('IfModule', 'prefork.c'): [ - ParsedData('1A', 'Bar 1A', 'IfModule', 'prefork.c', 'httpd.conf', '/etc/httpd/conf/httpd.conf'), - ParsedData('1B', 'Bar 1B', 'IfModule', 'prefork.c', 'httpd.conf', '/etc/httpd/conf/httpd.conf'), - ParsedData('1C', 'Bar 1C', 'IfModule', 'prefork.c', 'httpd.conf', '/etc/httpd/conf/httpd.conf'), - ParsedData('3A', 'Bar 3A', 'IfModule', 'prefork.c', 'z-z.conf', '/etc/httpd/conf.d/z-z.conf'), - ParsedData('3B', 'Bar 3B', 'IfModule', 'prefork.c', 'z-z.conf', '/etc/httpd/conf.d/z-z.conf'), - ParsedData('3C', 'Bar 3C', 'IfModule', 'prefork.c', 'z-z.conf', '/etc/httpd/conf.d/z-z.conf'), - ], - }, - {('IfModule', 'ASDF.prefork.c.ASDF'): [ - ParsedData('2A', 'Bar 2A', 'IfModule', 'ASDF.prefork.c.ASDF', '00-z.conf', '/etc/httpd/conf.d/00-z.conf'), - ParsedData('2B', 'Bar 2B', 'IfModule', 'ASDF.prefork.c.ASDF', '00-z.conf', '/etc/httpd/conf.d/00-z.conf'), - ParsedData('2C', 'Bar 2C', 'IfModule', 'ASDF.prefork.c.ASDF', '00-z.conf', '/etc/httpd/conf.d/00-z.conf'), - ], - }, - ] - assert result.get_setting_list('Bar') == [] - - # get_active_setting returns the last value - - assert result.get_active_setting('Foo') == ('3C', 'Foo 3C', None, None, 'z-z.conf', '/etc/httpd/conf.d/z-z.conf') - assert result.get_active_setting('Bar', section=('IfModule', 'prefork.c')) == [ - ('3C', 'Bar 3C', 'IfModule', 'prefork.c', 'z-z.conf', '/etc/httpd/conf.d/z-z.conf'), - ('2C', 'Bar 2C', 'IfModule', 'ASDF.prefork.c.ASDF', '00-z.conf', '/etc/httpd/conf.d/00-z.conf'), - ] - assert result.get_active_setting('Bar') is None - - -def test_httpd_splits(): - httpd1 = HttpdConf(context_wrap(HTTPD_CONF_MAIN_1, path='/etc/httpd/conf/httpd.conf')) - httpd2 = HttpdConf(context_wrap(HTTPD_CONF_FILE_1, path='/etc/httpd/conf.d/00-a.conf')) - httpd3 = HttpdConf(context_wrap(HTTPD_CONF_FILE_2, path='/etc/httpd/conf.d/01-b.conf')) - result = HttpdConfAll([httpd1, httpd2, httpd3]) - assert result.get_active_setting('ServerRoot').value == '/home/skontar/www' - assert result.get_active_setting('ServerRoot').line == 'ServerRoot "/home/skontar/www"' - assert result.get_active_setting('ServerRoot').file_name == '01-b.conf' - assert result.get_active_setting('ServerRoot').file_path == '/etc/httpd/conf.d/01-b.conf' - assert result.get_active_setting('Listen').value == '8080' - assert result.get_active_setting('Listen').line == 'Listen 8080' - assert result.get_active_setting('Listen').file_name == '00-a.conf' - assert result.get_active_setting('Listen').file_path == '/etc/httpd/conf.d/00-a.conf' - - httpd1 = HttpdConf(context_wrap(HTTPD_CONF_MAIN_2, path='/etc/httpd/conf/httpd.conf')) - httpd2 = HttpdConf(context_wrap(HTTPD_CONF_FILE_1, path='/etc/httpd/conf.d/00-a.conf')) - httpd3 = HttpdConf(context_wrap(HTTPD_CONF_FILE_2, path='/etc/httpd/conf.d/01-b.conf')) - - result = HttpdConfAll([httpd1, httpd2, httpd3]) - assert result.get_active_setting('ServerRoot').value == '/etc/httpd' - assert result.get_active_setting('ServerRoot').line == 'ServerRoot "/etc/httpd"' - assert result.get_active_setting('ServerRoot').file_name == 'httpd.conf' - assert result.get_active_setting('ServerRoot').file_path == '/etc/httpd/conf/httpd.conf' - assert result.get_active_setting('Listen').value == '80' - assert result.get_active_setting('Listen').line == 'Listen 80' - assert result.get_active_setting('Listen').file_name == 'httpd.conf' - assert result.get_active_setting('Listen').file_path == '/etc/httpd/conf/httpd.conf' - - httpd1 = HttpdConf(context_wrap(HTTPD_CONF_MAIN_3, path='/etc/httpd/conf/httpd.conf')) - httpd2 = HttpdConf(context_wrap(HTTPD_CONF_FILE_1, path='/etc/httpd/conf.d/00-a.conf')) - httpd3 = HttpdConf(context_wrap(HTTPD_CONF_FILE_2, path='/etc/httpd/conf.d/01-b.conf')) - - result = HttpdConfAll([httpd1, httpd2, httpd3]) - assert result.get_active_setting('ServerRoot').value == '/home/skontar/www' - assert result.get_active_setting('ServerRoot').line == 'ServerRoot "/home/skontar/www"' - assert result.get_active_setting('ServerRoot').file_name == '01-b.conf' - assert result.get_active_setting('ServerRoot').file_path == '/etc/httpd/conf.d/01-b.conf' - assert result.get_active_setting('Listen').value == '80' - assert result.get_active_setting('Listen').line == 'Listen 80' - assert result.get_active_setting('Listen').file_name == 'httpd.conf' - assert result.get_active_setting('Listen').file_path == '/etc/httpd/conf/httpd.conf' - - # Test is data from inactive configs are also stored - assert [a.file_name for a in result.config_data] == ['httpd.conf', '00-a.conf', '01-b.conf', 'httpd.conf'] - assert result.config_data[1].file_name == '00-a.conf' - assert result.config_data[1].file_path == '/etc/httpd/conf.d/00-a.conf' - assert result.config_data[1].full_data_dict['Listen'][0].value == '8080' - assert result.config_data[1].full_data_dict['Listen'][0].line == 'Listen 8080' - - -def test_httpd_no_main_config(): - httpd2 = HttpdConf(context_wrap(HTTPD_CONF_FILE_1, path='/etc/httpd/conf.d/00-a.conf')) - httpd3 = HttpdConf(context_wrap(HTTPD_CONF_FILE_2, path='/etc/httpd/conf.d/01-b.conf')) - result = HttpdConfAll([httpd2, httpd3]) - assert [a.file_name for a in result.config_data] == ['00-a.conf', '01-b.conf'] - - -def test_httpd_one_file_overwrites(): - httpd = HttpdConf(context_wrap(HTTPD_CONF_MORE, path='/etc/httpd/conf/httpd.conf')) - result = HttpdConfAll([httpd]) - - active_setting = result.get_active_setting('UserDir') - assert active_setting.value == 'enable bob' - assert active_setting.line == 'UserDir enable bob' - assert active_setting.file_path == '/etc/httpd/conf/httpd.conf' - assert active_setting.file_name == 'httpd.conf' - - setting_list = result.get_setting_list('UserDir') - assert len(setting_list) == 2 - assert setting_list[0].value == 'disable' - assert setting_list[0].line == 'UserDir disable' - assert setting_list[0].file_path == '/etc/httpd/conf/httpd.conf' - assert setting_list[0].file_name == 'httpd.conf' - assert setting_list[0].section is None - assert setting_list[1].value == 'enable bob' - assert setting_list[1].line == 'UserDir enable bob' - assert setting_list[1].file_path == '/etc/httpd/conf/httpd.conf' - assert setting_list[1].file_name == 'httpd.conf' - assert setting_list[1].section_name is None diff --git a/insights/combiners/tests/test_modinfo.py b/insights/combiners/tests/test_modinfo.py deleted file mode 100644 index 9b104db10c..0000000000 --- a/insights/combiners/tests/test_modinfo.py +++ /dev/null @@ -1,215 +0,0 @@ -from insights.parsers.modinfo import ModInfoEach, ModInfoAll -from insights.combiners.modinfo import ModInfo -from insights.parsers.tests.test_modinfo import ( - MODINFO_I40E, MODINFO_INTEL, - MODINFO_BNX2X, MODINFO_IGB, MODINFO_IXGBE, - MODINFO_VMXNET3, MODINFO_VETH) -from insights.tests import context_wrap -import doctest -from insights.combiners import modinfo -from insights.core.dr import SkipComponent -import pytest - - -def test_modinfo_each(): - - with pytest.raises(SkipComponent): - ModInfo({}, []) - - modinfo_i40e = ModInfoEach(context_wrap(MODINFO_I40E)) - modinfo_intel = ModInfoEach(context_wrap(MODINFO_INTEL)) - modinfo_bnx2x = ModInfoEach(context_wrap(MODINFO_BNX2X)) - modinfo_igb = ModInfoEach(context_wrap(MODINFO_IGB)) - modinfo_ixgbe = ModInfoEach(context_wrap(MODINFO_IXGBE)) - modinfo_vmxnet3 = ModInfoEach(context_wrap(MODINFO_VMXNET3)) - modinfo_veth = ModInfoEach(context_wrap(MODINFO_VETH)) - comb = ModInfo( - None, [ - modinfo_i40e, - modinfo_intel, - modinfo_bnx2x, - modinfo_igb, - modinfo_ixgbe, - modinfo_vmxnet3, - modinfo_veth, - ] - ) - assert sorted(comb.retpoline_y) == sorted(['aesni-intel', 'i40e', 'vmxnet3']) - assert sorted(comb.retpoline_n) == sorted(['bnx2x']) - assert sorted(comb.keys()) == sorted(['i40e', 'aesni-intel', 'bnx2x', 'vmxnet3', 'igb', 'ixgbe', 'veth']) - - modinfo_obj = comb['i40e'] - assert modinfo_obj.module_name == 'i40e' - assert modinfo_obj.module_version == '2.3.2-k' - assert modinfo_obj.module_deps == ['ptp'] - assert modinfo_obj.module_signer == 'Red Hat Enterprise Linux kernel signing key' - assert len(modinfo_obj['alias']) == 2 - assert modinfo_obj['sig_key'] == '81:7C:CB:07:72:4E:7F:B8:15:24:10:F9:27:2D:AA:CF:80:3E:CE:59' - assert modinfo_obj['vermagic'] == '3.10.0-993.el7.x86_64 SMP mod_unload modversions' - assert sorted(modinfo_obj['parm']) == sorted(['debug:Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX) (uint)', - 'int_mode: Force interrupt mode other than MSI-X (1 INT#x; 2 MSI) (int)']) - assert modinfo_obj['description'] == 'Intel(R) Ethernet Connection XL710 Network Driver' - assert ('signer' in modinfo_obj) is True - assert modinfo_obj.module_path == "/lib/modules/3.10.0-993.el7.x86_64/kernel/drivers/net/ethernet/intel/i40e/i40e.ko.xz" - - modinfo_obj = comb['aesni-intel'] - assert len(modinfo_obj['alias']) == 5 - assert sorted(modinfo_obj['alias']) == sorted(['aes', 'crypto-aes', 'crypto-fpu', 'fpu', 'x86cpu:vendor:*:family:*:model:*:feature:*0099*']) - assert ('parm' in modinfo_obj) is False - assert modinfo_obj.module_name == 'aesni-intel' - assert modinfo_obj['description'] == 'Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized' - assert modinfo_obj['rhelversion'] == '7.7' - assert modinfo_obj.module_signer == 'Red Hat Enterprise Linux kernel signing key' - assert modinfo_obj.module_deps == ['glue_helper', 'lrw', 'cryptd', 'ablk_helper'] - assert modinfo_obj['sig_key'] == '81:7C:CB:07:72:4E:7F:B8:15:24:10:F9:27:2D:AA:CF:80:3E:CE:59' - - modinfo_obj = comb['bnx2x'] - assert len(modinfo_obj['alias']) == 24 - assert len(modinfo_obj['parm']) == 6 - assert len(modinfo_obj['firmware']) == 3 - assert sorted(modinfo_obj['firmware']) == sorted(['bnx2x/bnx2x-e2-7.13.1.0.fw', 'bnx2x/bnx2x-e1h-7.13.1.0.fw', 'bnx2x/bnx2x-e1-7.13.1.0.fw']) - assert modinfo_obj.module_name == 'bnx2x' - assert modinfo_obj.module_path == '/lib/modules/3.10.0-514.el7.x86_64/kernel/drivers/net/ethernet/broadcom/bnx2x/bnx2x.ko' - assert modinfo_obj.module_signer == 'Red Hat Enterprise Linux kernel signing key' - assert sorted(modinfo_obj.module_deps) == sorted(['mdio', 'libcrc32c', 'ptp']) - - modinfo_obj = comb['igb'] - assert modinfo_igb.get('alias') == 'pci:v00008086d000010D6sv*sd*bc*sc*i*' - assert modinfo_igb.module_name == 'igb' - assert modinfo_igb.module_path == '/lib/modules/3.10.0-327.10.1.el7.jump7.x86_64/kernel/drivers/net/ethernet/intel/igb/igb.ko' - - modinfo_obj = comb['ixgbe'] - assert modinfo_ixgbe.get('alias') == 'pci:v00008086d000015CEsv*sd*bc*sc*i*' - assert modinfo_ixgbe.module_name == 'ixgbe' - assert modinfo_ixgbe.module_path == '/lib/modules/3.10.0-514.6.1.el7.jump3.x86_64/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe.ko' - - modinfo_drv = comb['vmxnet3'] - assert modinfo_drv.get('alias') == 'pci:v000015ADd000007B0sv*sd*bc*sc*i*' - assert len(modinfo_drv.module_parm) == 0 - assert len(modinfo_drv.module_firmware) == 0 - assert modinfo_drv.module_name == 'vmxnet3' - assert modinfo_drv.module_path == '/lib/modules/3.10.0-957.10.1.el7.x86_64/kernel/drivers/net/vmxnet3/vmxnet3.ko.xz' - - modinfo_drv = comb['veth'] - assert modinfo_drv.module_name == 'veth' - assert modinfo_drv.module_path == '/lib/modules/3.10.0-327.el7.x86_64/kernel/drivers/net/veth.ko' - assert modinfo_drv.module_signer == 'Red Hat Enterprise Linux kernel signing key' - - -def test_modinfo_all(): - - with pytest.raises(SkipComponent): - ModInfo({}, []) - - context = context_wrap( - '{0}\n{1}\n{2}\n{3}\n{4}\n{5}'.format( - MODINFO_I40E, - MODINFO_INTEL, - MODINFO_BNX2X, - MODINFO_IGB, - MODINFO_IXGBE, - MODINFO_VMXNET3, - # MODINFO_VETH <- Remove from ModInfAll - ) - ) - - modinfo_i40e = ModInfoEach(context_wrap(MODINFO_I40E)) - modinfo_intel = ModInfoEach(context_wrap(MODINFO_INTEL)) - modinfo_bnx2x = ModInfoEach(context_wrap(MODINFO_BNX2X)) - modinfo_igb = ModInfoEach(context_wrap(MODINFO_IGB)) - modinfo_ixgbe = ModInfoEach(context_wrap(MODINFO_IXGBE)) - modinfo_vmxnet3 = ModInfoEach(context_wrap(MODINFO_VMXNET3)) - modinfo_veth = ModInfoEach(context_wrap(MODINFO_VETH)) - comb = ModInfo( - ModInfoAll(context), - [ - modinfo_i40e, - modinfo_intel, - modinfo_bnx2x, - modinfo_igb, - modinfo_ixgbe, - modinfo_vmxnet3, - modinfo_veth, # <- But leave in ModInfoEach - ] - ) - - assert 'veth' not in comb # Check it here: not found - - assert sorted(comb.retpoline_y) == sorted(['aesni-intel', 'i40e', 'vmxnet3']) - assert sorted(comb.retpoline_n) == sorted(['bnx2x']) - assert sorted(comb.keys()) == sorted(['i40e', 'aesni-intel', 'bnx2x', 'vmxnet3', 'igb', 'ixgbe']) - - modinfo_obj = comb['i40e'] - assert modinfo_obj.module_name == 'i40e' - assert modinfo_obj.module_version == '2.3.2-k' - assert modinfo_obj.module_deps == ['ptp'] - assert modinfo_obj.module_signer == 'Red Hat Enterprise Linux kernel signing key' - assert len(modinfo_obj['alias']) == 2 - assert modinfo_obj['sig_key'] == '81:7C:CB:07:72:4E:7F:B8:15:24:10:F9:27:2D:AA:CF:80:3E:CE:59' - assert modinfo_obj['vermagic'] == '3.10.0-993.el7.x86_64 SMP mod_unload modversions' - assert sorted(modinfo_obj['parm']) == sorted(['debug:Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX) (uint)', - 'int_mode: Force interrupt mode other than MSI-X (1 INT#x; 2 MSI) (int)']) - assert modinfo_obj['description'] == 'Intel(R) Ethernet Connection XL710 Network Driver' - assert ('signer' in modinfo_obj) is True - assert modinfo_obj.module_path == "/lib/modules/3.10.0-993.el7.x86_64/kernel/drivers/net/ethernet/intel/i40e/i40e.ko.xz" - - modinfo_obj = comb['aesni-intel'] - assert len(modinfo_obj['alias']) == 5 - assert sorted(modinfo_obj['alias']) == sorted(['aes', 'crypto-aes', 'crypto-fpu', 'fpu', 'x86cpu:vendor:*:family:*:model:*:feature:*0099*']) - assert ('parm' in modinfo_obj) is False - assert modinfo_obj.module_name == 'aesni-intel' - assert modinfo_obj['description'] == 'Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized' - assert modinfo_obj['rhelversion'] == '7.7' - assert modinfo_obj.module_signer == 'Red Hat Enterprise Linux kernel signing key' - assert modinfo_obj.module_deps == ['glue_helper', 'lrw', 'cryptd', 'ablk_helper'] - assert modinfo_obj['sig_key'] == '81:7C:CB:07:72:4E:7F:B8:15:24:10:F9:27:2D:AA:CF:80:3E:CE:59' - - modinfo_obj = comb['bnx2x'] - assert len(modinfo_obj['alias']) == 24 - assert len(modinfo_obj['parm']) == 6 - assert len(modinfo_obj['firmware']) == 3 - assert sorted(modinfo_obj['firmware']) == sorted(['bnx2x/bnx2x-e2-7.13.1.0.fw', 'bnx2x/bnx2x-e1h-7.13.1.0.fw', 'bnx2x/bnx2x-e1-7.13.1.0.fw']) - assert modinfo_obj.module_name == 'bnx2x' - assert modinfo_obj.module_path == '/lib/modules/3.10.0-514.el7.x86_64/kernel/drivers/net/ethernet/broadcom/bnx2x/bnx2x.ko' - assert modinfo_obj.module_signer == 'Red Hat Enterprise Linux kernel signing key' - assert sorted(modinfo_obj.module_deps) == sorted(['mdio', 'libcrc32c', 'ptp']) - - modinfo_obj = comb['igb'] - assert modinfo_igb.get('alias') == 'pci:v00008086d000010D6sv*sd*bc*sc*i*' - assert modinfo_igb.module_name == 'igb' - assert modinfo_igb.module_path == '/lib/modules/3.10.0-327.10.1.el7.jump7.x86_64/kernel/drivers/net/ethernet/intel/igb/igb.ko' - - modinfo_obj = comb['ixgbe'] - assert modinfo_ixgbe.get('alias') == 'pci:v00008086d000015CEsv*sd*bc*sc*i*' - assert modinfo_ixgbe.module_name == 'ixgbe' - assert modinfo_ixgbe.module_path == '/lib/modules/3.10.0-514.6.1.el7.jump3.x86_64/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe.ko' - - modinfo_drv = comb['vmxnet3'] - assert modinfo_drv.get('alias') == 'pci:v000015ADd000007B0sv*sd*bc*sc*i*' - assert len(modinfo_drv.module_parm) == 0 - assert len(modinfo_drv.module_firmware) == 0 - assert modinfo_drv.module_name == 'vmxnet3' - assert modinfo_drv.module_path == '/lib/modules/3.10.0-957.10.1.el7.x86_64/kernel/drivers/net/vmxnet3/vmxnet3.ko.xz' - - -def test_modinfo_doc_examples(): - modinfo_i40e = ModInfoEach(context_wrap(MODINFO_I40E)) - modinfo_intel = ModInfoEach(context_wrap(MODINFO_INTEL)) - modinfo_bnx2x = ModInfoEach(context_wrap(MODINFO_BNX2X)) - modinfo_igb = ModInfoEach(context_wrap(MODINFO_IGB)) - modinfo_ixgbe = ModInfoEach(context_wrap(MODINFO_IXGBE)) - modinfo_vmxnet3 = ModInfoEach(context_wrap(MODINFO_VMXNET3)) - modinfo_veth = ModInfoEach(context_wrap(MODINFO_VETH)) - comb = ModInfo( - None, [ - modinfo_i40e, - modinfo_intel, - modinfo_bnx2x, - modinfo_igb, - modinfo_ixgbe, - modinfo_vmxnet3, - modinfo_veth] - ) - env = {'modinfo_obj': comb} - failed, total = doctest.testmod(modinfo, globs=env) - assert failed == 0 diff --git a/insights/combiners/tests/test_package_provides_httpd.py b/insights/combiners/tests/test_package_provides_httpd.py deleted file mode 100644 index c01be81926..0000000000 --- a/insights/combiners/tests/test_package_provides_httpd.py +++ /dev/null @@ -1,44 +0,0 @@ -import doctest - -from insights.combiners import package_provides_httpd -from insights.parsers.package_provides_httpd import PackageProvidesHttpd -from insights.combiners.package_provides_httpd import PackageProvidesHttpdAll -from insights.tests import context_wrap - - -PACKAGE_COMMAND_MATCH_1 = """ -/opt/rh/httpd24/root/usr/sbin/httpd httpd24-httpd-2.4.34-7.el7.x86_64 -""" - -PACKAGE_COMMAND_MATCH_2 = """ -/usr/sbin/httpd httpd-2.4.6-88.el7.x86_64 -""" - -PACKAGE_COMMAND_MATCH_3 = """ -/opt/rh/jbcs-httpd24/root/usr/sbin/httpd jbcs-httpd24-httpd-2.4.34-7.el7.x86_64 -""" - - -def test_packages_provide_httpd(): - pack1 = PackageProvidesHttpd(context_wrap(PACKAGE_COMMAND_MATCH_1)) - pack2 = PackageProvidesHttpd(context_wrap(PACKAGE_COMMAND_MATCH_2)) - pack3 = PackageProvidesHttpd(context_wrap(PACKAGE_COMMAND_MATCH_3)) - result = PackageProvidesHttpdAll([pack1, pack2, pack3]) - assert sorted(result.running_httpds) == sorted( - ['/opt/rh/httpd24/root/usr/sbin/httpd', - '/usr/sbin/httpd', '/opt/rh/jbcs-httpd24/root/usr/sbin/httpd']) - assert result["/usr/sbin/httpd"] == "httpd-2.4.6-88.el7.x86_64" - assert result.get_package("/opt/rh/httpd24/root/usr/sbin/httpd") == "httpd24-httpd-2.4.34-7.el7.x86_64" - assert result.get("/opt/rh/httpd24/root/usr/sbin/httpd") == "httpd24-httpd-2.4.34-7.el7.x86_64" - assert result.get_package("/usr/lib/httpd") is None - assert result.get("/usr/lib/httpd") is None - - -def test_doc_examples(): - pack1 = PackageProvidesHttpd(context_wrap(PACKAGE_COMMAND_MATCH_1)) - pack2 = PackageProvidesHttpd(context_wrap(PACKAGE_COMMAND_MATCH_2)) - env = { - 'packages': package_provides_httpd.PackageProvidesHttpdAll([pack1, pack2]), - } - failed, _ = doctest.testmod(package_provides_httpd, globs=env) - assert failed == 0 diff --git a/insights/combiners/tests/test_package_provides_java.py b/insights/combiners/tests/test_package_provides_java.py deleted file mode 100644 index f87e9dc0b4..0000000000 --- a/insights/combiners/tests/test_package_provides_java.py +++ /dev/null @@ -1,30 +0,0 @@ -from insights.parsers.package_provides_java import PackageProvidesJava -from insights.combiners.package_provides_java import PackageProvidesJavaAll -from insights.tests import context_wrap - -PACKAGE_COMMAND_MATCH_1 = """ -/usr/lib/jvm/jre/bin/java java-1.8.0-openjdk-headless-1.8.0.141-3.b16.el6_9.x86_64 -""" -PACKAGE_COMMAND_MATCH_2 = """ -/usr/lib/jvm/java-1.8.0-openjdk-headless-1.8.0.141-3.b16.el6_9.x86_64/bin/java java-1.8.0-openjdk-headless-1.8.0.141-3.b16.el6_9.x86_64 -""" - -PACKAGE_COMMAND_MATCH_3 = """ -java java-1.8.0-openjdk-headless-1.8.0.141-3.b16.el6_9.x86_64 -""" - - -def test_packages_provide_java(): - pack1 = PackageProvidesJava(context_wrap(PACKAGE_COMMAND_MATCH_1)) - pack2 = PackageProvidesJava(context_wrap(PACKAGE_COMMAND_MATCH_2)) - pack3 = PackageProvidesJava(context_wrap(PACKAGE_COMMAND_MATCH_3)) - result = PackageProvidesJavaAll([pack1, pack2, pack3]) - assert sorted(result.running_javas) == sorted( - ['/usr/lib/jvm/jre/bin/java', - '/usr/lib/jvm/java-1.8.0-openjdk-headless-1.8.0.141-3.b16.el6_9.x86_64/bin/java', 'java']) - assert result[ - "/usr/lib/jvm/java-1.8.0-openjdk-headless-1.8.0.141-3.b16.el6_9.x86_64/bin/java"] == "java-1.8.0-openjdk-headless-1.8.0.141-3.b16.el6_9.x86_64" - assert result.get_package("/usr/lib/jvm/jre/bin/java") == "java-1.8.0-openjdk-headless-1.8.0.141-3.b16.el6_9.x86_64" - assert result.get("/usr/lib/jvm/jre/bin/java") == "java-1.8.0-openjdk-headless-1.8.0.141-3.b16.el6_9.x86_64" - assert result.get_package("/usr/lib/jre/bin/java") is None - assert result.get("/usr/lib/jre/bin/java") is None diff --git a/insights/combiners/tests/test_redhat_release.py b/insights/combiners/tests/test_redhat_release.py deleted file mode 100644 index 3b47e5252c..0000000000 --- a/insights/combiners/tests/test_redhat_release.py +++ /dev/null @@ -1,97 +0,0 @@ -from insights.parsers.uname import Uname -from insights.parsers.redhat_release import RedhatRelease -from insights.combiners.redhat_release import redhat_release, RedHatRelease -from insights.combiners import redhat_release as rr -from insights.tests import context_wrap -from insights.parsers import SkipComponent -import pytest -import doctest - -UNAME = "Linux localhost.localdomain 3.10.0-327.rt56.204.el7.x86_64 #1 SMP PREEMPT RT Thu Oct 29 21:54:23 EDT 2015 x86_64 x86_64 x86_64 GNU/Linux" -BAD_UNAME = "Linux localhost.localdomain 2.6.24.7-101.el5rt.x86_64 #1 SMP PREEMPT RT Thu Oct 29 21:54:23 EDT 2015 x86_64 x86_64 x86_64 GNU/Linux" - -REDHAT_RELEASE = """ -Red Hat Enterprise Linux Server release 7.2 (Maipo) -""".strip() - -FEDORA = """ -Fedora release 23 (Twenty Three) -""".strip() - - -def test_uname(): - un = Uname(context_wrap(UNAME)) - expected = (7, 2) - result = redhat_release(None, un) - assert result.major == expected[0] - assert result.minor == expected[1] - - -def test_redhat_release(): - rel = RedhatRelease(context_wrap(REDHAT_RELEASE)) - expected = (7, 2) - result = redhat_release(rel, None) - assert result.major == expected[0] - assert result.minor == expected[1] - - -def test_both(): - un = Uname(context_wrap(UNAME)) - rel = RedhatRelease(context_wrap(REDHAT_RELEASE)) - expected = (7, 2) - result = redhat_release(rel, un) - assert result.major == expected[0] - assert result.minor == expected[1] - - -def test_RedHatRelease_uname(): - un = Uname(context_wrap(UNAME)) - expected = (7, 2) - result = RedHatRelease(un, None) - assert result.major == expected[0] - assert result.minor == expected[1] - assert result.rhel == result.rhel7 == '7.2' - assert result.rhel6 is None - - -def test_RedHatRelease_redhat_release(): - rel = RedhatRelease(context_wrap(REDHAT_RELEASE)) - expected = (7, 2) - result = RedHatRelease(None, rel) - assert result.major == expected[0] - assert result.minor == expected[1] - assert result.rhel == result.rhel7 == '7.2' - assert result.rhel8 is None - - -def test_RedHatRelease_both(): - un = Uname(context_wrap(UNAME)) - rel = RedhatRelease(context_wrap(REDHAT_RELEASE)) - expected = (7, 2) - result = RedHatRelease(un, rel) - assert result.major == expected[0] - assert result.minor == expected[1] - assert result.rhel == result.rhel7 == '7.2' - assert result.rhel6 is None - assert result.rhel8 is None - - -def test_raise(): - un = Uname(context_wrap(BAD_UNAME)) - with pytest.raises(SkipComponent): - redhat_release(None, un) - - with pytest.raises(SkipComponent): - RedHatRelease(un, None) - - with pytest.raises(SkipComponent): - RedHatRelease(None, None) - - -def test_doc_examples(): - env = { - 'rh_release': redhat_release(None, Uname(context_wrap(UNAME))), - 'rh_rel': RedHatRelease(Uname(context_wrap(UNAME)), None), - } - failed, total = doctest.testmod(rr, globs=env) - assert failed == 0 diff --git a/insights/combiners/tests/test_uptime.py b/insights/combiners/tests/test_uptime.py deleted file mode 100644 index e9778f3695..0000000000 --- a/insights/combiners/tests/test_uptime.py +++ /dev/null @@ -1,66 +0,0 @@ -import datetime -from insights.parsers.uptime import Uptime -from insights.parsers.facter import Facter -from insights.combiners.uptime import uptime -from insights.tests import context_wrap - -UPTIME1 = " 14:28:24 up 5:55, 4 users, load average: 0.04, 0.03, 0.05" -UPTIME2 = " 10:55:22 up 40 days, 3 min, 1 user, load average: 0.49, 0.12, 0.04" -UPTIME3 = """ -COMMAND> facts -uptime => 21 days -uptime_days => 21 -uptime_hours => 525 -uptime_seconds => 1893598 -""".strip() - - -def total_seconds(time_delta): - return (time_delta.days * 24 * 60 * 60) + time_delta.seconds - - -def test_get_uptime_uptime1(): - ut = Uptime(context_wrap(UPTIME1)) - upt = uptime(ut, None) - assert upt.currtime == '14:28:24' - assert upt.updays == "" - assert upt.uphhmm == '5:55' - assert upt.users == '4' - assert upt.loadavg == ['0.04', '0.03', '0.05'] - c = datetime.timedelta(days=0, hours=5, minutes=55) - assert total_seconds(upt.uptime) == total_seconds(c) - - -def test_get_uptime_uptime2(): - ut = Uptime(context_wrap(UPTIME2)) - upt = uptime(ut, None) - assert upt.currtime == '10:55:22' - assert upt.updays == '40' - assert upt.uphhmm == '00:03' - assert upt.users == '1' - assert upt.loadavg == ['0.49', '0.12', '0.04'] - c = datetime.timedelta(days=40, hours=0, minutes=3) - assert total_seconds(upt.uptime) == total_seconds(c) - - -def test_get_facter_uptime(): - ft = Facter(context_wrap(UPTIME3)) - upt = uptime(None, ft) - assert upt.updays == "21" - assert upt.uphhmm == '21:59' - assert upt.loadavg is None - c = datetime.timedelta(days=0, hours=0, minutes=0, seconds=1893598) - assert total_seconds(upt.uptime) == total_seconds(c) - - -def test_get_both_uptime(): - ut = Uptime(context_wrap(UPTIME2)) - ft = Facter(context_wrap(UPTIME3)) - upt = uptime(ut, ft) - assert upt.currtime == '10:55:22' - assert upt.updays == '40' - assert upt.uphhmm == '00:03' - assert upt.users == '1' - assert upt.loadavg == ['0.49', '0.12', '0.04'] - c = datetime.timedelta(days=40, hours=0, minutes=3) - assert total_seconds(upt.uptime) == total_seconds(c) diff --git a/insights/combiners/uptime.py b/insights/combiners/uptime.py deleted file mode 100644 index f02859660c..0000000000 --- a/insights/combiners/uptime.py +++ /dev/null @@ -1,60 +0,0 @@ -""" -Uptime -====== - -Combiner for uptime information. It uses the results of the ``Uptime`` -parser and the ``Facter`` parser to get the uptime information. ``Uptime`` is -the preferred source of data. - -Examples: - >>> ut = shared[uptime] - >>> ut.updays - 21 - >>> ht.uptime - 03:45 - -""" -from __future__ import division -from collections import namedtuple -from insights.core.plugins import combiner -from insights.parsers.uptime import Uptime as upt -from insights.parsers.facter import Facter - -Uptime = namedtuple("Uptime", - field_names=[ - "currtime", "updays", "uphhmm", - "users", "loadavg", "uptime"]) -"""namedtuple: Type for storing the uptime information.""" - - -@combiner([upt, Facter]) -def uptime(ut, facter): - """Check uptime and facts to get the uptime information. - - Prefer uptime to facts. - - Returns: - insights.combiners.uptime.Uptime: A named tuple with `currtime`, - `updays`, `uphhmm`, `users`, `loadavg` and `uptime` components. - - Raises: - Exception: If no data is available from both of the parsers. - """ - - ut = ut - if ut and ut.loadavg: - return Uptime(ut.currtime, ut.updays, ut.uphhmm, - ut.users, ut.loadavg, ut.uptime) - ft = facter - if ft and hasattr(ft, 'uptime_seconds'): - import datetime - secs = int(ft.uptime_seconds) - up_dd = secs // (3600 * 24) - up_hh = (secs % (3600 * 24)) // 3600 - up_mm = (secs % 3600) // 60 - updays = str(up_dd) if up_dd > 0 else '' - uphhmm = '%02d:%02d' % (up_hh, up_mm) - up_time = datetime.timedelta(seconds=secs) - return Uptime(None, updays, uphhmm, None, None, up_time) - - raise Exception("Unable to get uptime information.") diff --git a/insights/command_parser.py b/insights/command_parser.py index 2a36d95686..9bc2be549c 100644 --- a/insights/command_parser.py +++ b/insights/command_parser.py @@ -17,7 +17,8 @@ collect Collect all specs against the client and create an Insights archive. inspect Execute component and shell out to ipython for evaluation. info View info and docs for Insights Core components. - ocpshell Interactive evaluation of archives, directories, or individual yaml files. + ocpshell Interactive evaluation of archives or directories from OCP, or individual yaml files. + shell Interactive evaluation of archives and directories. run Run insights-core against host or an archive. version Show Insights Core version information and exit. """ @@ -76,6 +77,10 @@ def ocpshell(self): from .ocpshell import main as ocpshell_main ocpshell_main() + def shell(self): + from .shell import main as shell_main + shell_main() + def run(self): from insights import run if "" not in sys.path: diff --git a/insights/compliance_obfuscations.yaml b/insights/compliance_obfuscations.yaml new file mode 100644 index 0000000000..80c01c64df --- /dev/null +++ b/insights/compliance_obfuscations.yaml @@ -0,0 +1,16 @@ +# This file contains a structure of xpaths for obfuscations in compliance +obfuscate: + - './/{http://checklists.nist.gov/xccdf/1.2}target-address' + - './/{http://checklists.nist.gov/xccdf/1.2}target-facts/{http://checklists.nist.gov/xccdf/1.2}fact[@name="urn:xccdf:fact:asset:identifier:ipv4"]' + - './/{http://checklists.nist.gov/xccdf/1.2}target-facts/{http://checklists.nist.gov/xccdf/1.2}fact[@name="urn:xccdf:fact:asset:identifier:ipv6"]' + - './/{http://scap.nist.gov/schema/asset-identification/1.1}ip-address/{http://scap.nist.gov/schema/asset-identification/1.1}ip-v4' + - './/{http://scap.nist.gov/schema/asset-identification/1.1}ip-address/{http://scap.nist.gov/schema/asset-identification/1.1}ip-v6' + - './/{http://oval.mitre.org/XMLSchema/oval-system-characteristics-5}system_info/{http://oval.mitre.org/XMLSchema/oval-system-characteristics-5}interfaces/{http://oval.mitre.org/XMLSchema/oval-system-characteristics-5}interface/{http://oval.mitre.org/XMLSchema/oval-system-characteristics-5}ip_address' +obfuscate_hostname: + - './/{http://scap.nist.gov/schema/asset-identification/1.1}fqdn' + - './/{http://scap.nist.gov/schema/asset-identification/1.1}hostname' + - './/{http://checklists.nist.gov/xccdf/1.2}target' + - './/{http://checklists.nist.gov/xccdf/1.2}target-facts/{http://checklists.nist.gov/xccdf/1.2}fact[@name="urn:xccdf:fact:asset:identifier:fqdn"]' + - './/{http://checklists.nist.gov/xccdf/1.2}target-facts/{http://checklists.nist.gov/xccdf/1.2}fact[@name="urn:xccdf:fact:asset:identifier:host_name"]' + - './/{http://oval.mitre.org/XMLSchema/oval-system-characteristics-5}system_info/{http://oval.mitre.org/XMLSchema/oval-system-characteristics-5}primary_host_name' + - './/{http://oval.mitre.org/XMLSchema/oval-system-characteristics-5#unix}uname_item/{http://oval.mitre.org/XMLSchema/oval-system-characteristics-5#unix}node_name' diff --git a/insights/components/ceph.py b/insights/components/ceph.py new file mode 100644 index 0000000000..d4921343c1 --- /dev/null +++ b/insights/components/ceph.py @@ -0,0 +1,28 @@ +""" +Component identifies Ceph Monitor +================================= + +The ``Is*`` component in this module is valid if the +:py:class:`insights.combiners.ps.Ps` combiner indicates +the host is a Ceph monitor. Otherwise, it raises a +:py:class:`insights.core.exceptions.SkipComponent` to prevent dependent components from +executing. + +""" +from insights.combiners.ps import Ps +from insights.core.exceptions import SkipComponent +from insights.core.plugins import component + + +@component(Ps) +class IsCephMonitor(object): + """ + This component uses ``Ps`` combiner to determine if the host is a Ceph + monitor or not. If not Ceph monitor, it raises ``SkipComponent``. + + Raises: + SkipComponent: When it's not a Ceph monitor. + """ + def __init__(self, ps): + if not ps.search(COMMAND_NAME__contains='ceph-mon'): + raise SkipComponent("Not Ceph Monitor") diff --git a/insights/components/cloud_provider.py b/insights/components/cloud_provider.py new file mode 100644 index 0000000000..397bd65c63 --- /dev/null +++ b/insights/components/cloud_provider.py @@ -0,0 +1,59 @@ +""" +Components identify Cloud Provider +=================================== + +The ``Is*`` component in this module is valid if the +:py:class:`insights.combiners.cloud_provider.CloudProvider` combiner indicates +the host is from the specific Cloud Provider. Otherwise, it raises a +:py:class:`insights.core.exceptions.SkipComponent` to prevent dependent components from +executing. + +""" +from insights.combiners.cloud_provider import CloudProvider +from insights.core.exceptions import SkipComponent +from insights.core.plugins import component + + +@component(CloudProvider) +class IsAWS(object): + """ + This component uses ``CloudProvider`` combiner to determine the cloud + provider of the instance. + It checks if AWS, if not AWS it raises ``SkipComponent``. + + Raises: + SkipComponent: When it's not an instance from AWS. + """ + def __init__(self, cp): + if not cp or cp.cloud_provider != CloudProvider.AWS: + raise SkipComponent("Not AWS instance") + + +@component(CloudProvider) +class IsAzure(object): + """ + This component uses ``CloudProvider`` combiner to determine the cloud + provider of the instance. + It checks if Azure, if not Azure it raises ``SkipComponent``. + + Raises: + SkipComponent: When it's not an instance from Azure. + """ + def __init__(self, cp): + if not cp or cp.cloud_provider != CloudProvider.AZURE: + raise SkipComponent("Not Azure instance") + + +@component(CloudProvider) +class IsGCP(object): + """ + This component uses ``CloudProvider`` combiner to determine the cloud + provider of the instance. + It checks if Google Cloud Platform (GCP), if not GCP it raises ``SkipComponent``. + + Raises: + SkipComponent: When it's not an instance from GCP. + """ + def __init__(self, cp): + if not cp or cp.cloud_provider != CloudProvider.GOOGLE: + raise SkipComponent("Not Google Cloud Platform instance") diff --git a/insights/components/cryptsetup.py b/insights/components/cryptsetup.py new file mode 100644 index 0000000000..636cc37c7d --- /dev/null +++ b/insights/components/cryptsetup.py @@ -0,0 +1,55 @@ +""" +HasCryptsetupWithTokens, HasCryptsetupWithoutTokens +=================================================== + +The ``HasCryptsetupWithTokens``/``HasCryptsetupWithoutTokens`` component uses +``InstalledRpms`` parser to determine if cryptsetup package is installed and if +it has tokens support (since version 2.4.0), if not it raises ``SkipComponent`` +so that the dependent component will not fire. Can be added as a dependency of +a parser so that the parser only fires if the ``cryptsetup`` dependency and +token support is met. +""" +from insights.core.exceptions import SkipComponent +from insights.core.plugins import component +from insights.parsers.installed_rpms import InstalledRpms, InstalledRpm + + +@component(InstalledRpms) +class HasCryptsetupWithTokens(object): + """The ``HasCryptsetupWithTokens`` component uses ``InstalledRpms`` parser + to determine if cryptsetup package is installed and if it has tokens + support (since version 2.4.0), if not it raises ``SkipComponent`` + + Raises: + SkipComponent: When ``cryptsetup`` package is strictly less than 2.4.0, + or when cryptsetup package is not installed + """ + def __init__(self, rpms): + rpm = rpms.get_max("cryptsetup") + + if rpm is None: + raise SkipComponent("cryptsetup package is not installed") + + if rpm < InstalledRpm("cryptsetup-2.4.0-0"): + raise SkipComponent("cryptsetup package with token support is not installed") + + +@component(InstalledRpms) +class HasCryptsetupWithoutTokens(object): + """The ``HasCryptsetupWithoutTokens`` component uses ``InstalledRpms`` + parser to determine if cryptsetup package is installed and if it does not + have tokens support (below version 2.4.0), if not it raises + ``SkipComponent`` + + Raises: + SkipComponent: When ``cryptsetup`` package is at least 2.4.0, or when + cryptsetup package is not installed + """ + def __init__(self, rpms): + rpm = rpms.get_max("cryptsetup") + + if rpm is None: + raise SkipComponent("cryptsetup package is not installed") + + if rpm >= InstalledRpm("cryptsetup-2.4.0-0"): + raise SkipComponent("cryptsetup package with token support is installed") diff --git a/insights/components/openstack.py b/insights/components/openstack.py index 7c002f6b6f..76f9941e95 100644 --- a/insights/components/openstack.py +++ b/insights/components/openstack.py @@ -8,9 +8,9 @@ a dependency of a parser so that the parser only fires if the ``IsIsOpenStackCompute`` dependency is met. """ +from insights.core.exceptions import SkipComponent from insights.core.plugins import component from insights.parsers.ps import PsAuxcww -from insights.core.dr import SkipComponent @component(PsAuxcww) diff --git a/insights/components/rhel_version.py b/insights/components/rhel_version.py index 2ae3264871..2f7640ab57 100644 --- a/insights/components/rhel_version.py +++ b/insights/components/rhel_version.py @@ -1,62 +1,102 @@ """ -IsRhel6, IsRhel7 and IsRhel8 -=============================== +IsRhel6, IsRhel7, IsRhel8, and IsRhel9 +====================================== An ``IsRhel*`` component is valid if the :py:class:`insights.combiners.redhat_release.RedHatRelease` combiner indicates the major RHEL version represented by the component. Otherwise, it raises a -:py:class:`insights.core.dr.SkipComponent` to prevent dependent components from +:py:class:`insights.core.exceptions.SkipComponent` to prevent dependent components from executing. In particular, an ``IsRhel*`` component can be added as a dependency of a parser to limit it to a given version. """ - -from insights.core.plugins import component from insights.combiners.redhat_release import RedHatRelease -from insights.core.dr import SkipComponent +from insights.core.exceptions import SkipComponent +from insights.core.plugins import component + + +class IsRhel(object): + """ + This component uses ``RedHatRelease`` combiner to determine the RHEL + major version. It then checks if the major version matches the version + argument, if it doesn't it raises ``SkipComponent``. + + Attributes: + minor (int): The minor version of RHEL. + + Raises: + SkipComponent: When RHEL major version does not match version. + """ + def __init__(self, rhel, version=None): + if rhel.major != version: + raise SkipComponent("Not RHEL{vers}".format(vers=version)) + self.minor = rhel.minor @component(RedHatRelease) -class IsRhel6(object): +class IsRhel6(IsRhel): """ This component uses ``RedHatRelease`` combiner to determine RHEL version. It checks if RHEL6, if not RHEL6 it raises ``SkipComponent``. + Attributes: + minor (int): The minor version of RHEL 6. + Raises: SkipComponent: When RHEL version is not RHEL6. """ def __init__(self, rhel): - if rhel.major != 6: - raise SkipComponent('Not RHEL6') + super(IsRhel6, self).__init__(rhel, 6) @component(RedHatRelease) -class IsRhel7(object): +class IsRhel7(IsRhel): """ This component uses ``RedHatRelease`` combiner - to determine RHEL version. It checks if RHEL7, if not \ + to determine RHEL version. It checks if RHEL7, if not RHEL7 it raises ``SkipComponent``. + Attributes: + minor (int): The minor version of RHEL 7. + Raises: SkipComponent: When RHEL version is not RHEL7. """ def __init__(self, rhel): - if rhel.major != 7: - raise SkipComponent('Not RHEL7') + super(IsRhel7, self).__init__(rhel, 7) @component(RedHatRelease) -class IsRhel8(object): +class IsRhel8(IsRhel): """ This component uses ``RedhatRelease`` combiner to determine RHEL version. It checks if RHEL8, if not RHEL8 it raises ``SkipComponent``. + Attributes: + minor (int): The minor version of RHEL 8. + Raises: SkipComponent: When RHEL version is not RHEL8. """ def __init__(self, rhel): - if rhel.major != 8: - raise SkipComponent('Not RHEL8') + super(IsRhel8, self).__init__(rhel, 8) + + +@component(RedHatRelease) +class IsRhel9(IsRhel): + """ + This component uses ``RedhatRelease`` combiner + to determine RHEL version. It checks if RHEL9, if not + RHEL9 it raises ``SkipComponent``. + + Attributes: + minor (int): The minor version of RHEL 9. + + Raises: + SkipComponent: When RHEL version is not RHEL9. + """ + def __init__(self, rhel): + super(IsRhel9, self).__init__(rhel, 9) diff --git a/insights/components/satellite.py b/insights/components/satellite.py new file mode 100644 index 0000000000..2436b90d94 --- /dev/null +++ b/insights/components/satellite.py @@ -0,0 +1,93 @@ +""" +Components identify Satellite or Satellite Capsule +================================================== + +An ``IsSatellite`` component is valid if the +:py:class:`insights.combiners.satellite_version.SatelliteVersion` combiner +indicates the host is a Satellite host, and also checks the Satellite major +or major and minor versions match the specified versions when they exist. +Otherwise, it raises a :py:class:`insights.core.exceptions.SkipComponent` to prevent +dependent components from executing. + +An ``IsCapsule`` component is valid if the +:py:class:`insights.combiners.satellite_version.CapsuleVersion` combiner +indicates the host is a Satellite Capsule host, and also checks the Satellite +Capsule major or major and minor versions match the specified versions when +they exist. Otherwise, it raises a :py:class:`insights.core.exceptions.SkipComponent` +to prevent dependent components from executing. +""" +from insights.combiners.satellite_version import SatelliteVersion, CapsuleVersion +from insights.core.exceptions import ParseException, SkipComponent +from insights.core.plugins import component + + +@component(SatelliteVersion) +class IsSatellite(object): + """ + This component uses ``SatelliteVersion`` combiner to determine if the host + is a Satellite host, and also if `major_ver` is passed, it checks if the + curent satellite major version match the argument, and if both `major_ver` + and `minor_ver` are passed, it checks if the current satellite major and + minor versions match both the arguments, and raises ``SkipComponent`` when + they do not match. + + Raises: + ParseException: When only minor_ver specified. + SkipComponent: When Satellite major or minor versions do not match + the arguments. + """ + def __init__(self, sat, major_ver=None, minor_ver=None): + if major_ver is not None: + if sat.major != major_ver: + raise SkipComponent("Not a Satellite {major_ver} host.".format( + major_ver=major_ver)) + if minor_ver is not None: + if sat.minor != minor_ver: + raise SkipComponent("Not a Satellite {major_ver}.{minor_ver} host.".format( + major_ver=major_ver, minor_ver=minor_ver)) + else: + if minor_ver is not None: + raise ParseException('Can not specify the minor_ver only.') + + +@component(CapsuleVersion) +class IsCapsule(object): + """ + This component uses ``CapsuleVersion`` combiner to determine if the host + is a Satellite Capsule host, and also if `major_ver` is passed, it checks + if the curent capsule major version match the argument, and if both `major_ver` + and `minor_ver` are passed, it checks if both the current capsule major and + minor versions match both the arguments, and raises ``SkipComponent`` when + they do not match. + + Raises: + ParseException: When only minor_ver specified. + SkipComponent: When the Satellite Capsule major or minor versions do + not match the arguments. + """ + def __init__(self, cap, major_ver=None, minor_ver=None): + if major_ver is not None: + if cap.major != major_ver: + raise SkipComponent("Not a Satellite Capsule {major_ver} host.".format( + major_ver=major_ver)) + if minor_ver is not None: + if cap.minor != minor_ver: + raise SkipComponent("Not a Satellite Capsule {major_ver}.{minor_ver} host.".format( + major_ver=major_ver, minor_ver=minor_ver)) + else: + if minor_ver is not None: + raise ParseException('Can not specify the minor_ver only.') + + +@component(SatelliteVersion) +class IsSatellite611(IsSatellite): + """ + This component uses ``SatelliteVersion`` combiner + to determine the Satellite version. It checks if the Satellite version is 6.11, + and raises ``SkipComponent`` when it isn't. + + Raises: + SkipComponent: When the Satellite version is not 6.11. + """ + def __init__(self, sat): + super(IsSatellite611, self).__init__(sat, 6, 11) diff --git a/insights/components/virtualization.py b/insights/components/virtualization.py new file mode 100644 index 0000000000..17d3fd0219 --- /dev/null +++ b/insights/components/virtualization.py @@ -0,0 +1,25 @@ +""" +Components identify system type with regard to virtualization +============================================================= + +The ``IsBareMetal`` component in this module is valid if the +:py:class:`insights.combiners.virt_what.VirtWhat` combiner indicates +the host is bare metal. +""" +from insights.combiners.virt_what import VirtWhat +from insights.core.exceptions import SkipComponent +from insights.core.plugins import component + + +@component(VirtWhat) +class IsBareMetal(object): + """ + This component uses ``VirtWhat`` combiner to determine the virtualization type. + It checks if the system is bare metal, otherwise it raises ``SkipComponent``. + + Raises: + SkipComponent: When system is a virtual machine. + """ + def __init__(self, virt): + if virt.is_virtual: + raise SkipComponent("Not a bare metal system.") diff --git a/insights/contrib/ConfigParser.py b/insights/contrib/ConfigParser.py index d09b2015ca..485d8282c7 100644 --- a/insights/contrib/ConfigParser.py +++ b/insights/contrib/ConfigParser.py @@ -1,758 +1,14 @@ -"""Configuration file parser. +from insights.parsr.iniparser import NoOptionError as NOE, NoSectionError as NSE -A setup file consists of sections, lead by a "[section]" header, -and followed by "name: value" entries, with continuations and such in -the style of RFC 822. -The option values can contain format strings which refer to other values in -the same section, or values in a special [DEFAULT] section. - -For example: - - something: %(dir)s/whatever - -would resolve the "%(dir)s" to the value of dir. All reference -expansions are done late, on demand. - -Intrinsic defaults can be specified by passing them into the -ConfigParser constructor as a dictionary. - -class: - -ConfigParser -- responsible for parsing a list of - configuration files, and managing the parsed database. - - methods: - - __init__(defaults=None) - create the parser and specify a dictionary of intrinsic defaults. The - keys must be strings, the values must be appropriate for %()s string - interpolation. Note that `__name__' is always an intrinsic default; - its value is the section's name. - - sections() - return all the configuration section names, sans DEFAULT - - has_section(section) - return whether the given section exists - - has_option(section, option) - return whether the given option exists in the given section - - options(section) - return list of configuration options for the named section - - read(filenames) - read and parse the list of named configuration files, given by - name. A single filename is also allowed. Non-existing files - are ignored. Return list of successfully read files. - - readfp(fp, filename=None) - read and parse one configuration file, given as a file object. - The filename defaults to fp.name; it is only used in error - messages (if fp has no `name' attribute, the string `' is used). - - get(section, option, raw=False, vars=None) - return a string value for the named option. All % interpolations are - expanded in the return values, based on the defaults passed into the - constructor and the DEFAULT section. Additional substitutions may be - provided using the `vars' argument, which must be a dictionary whose - contents override any pre-existing defaults. - - getint(section, options) - like get(), but convert value to an integer - - getfloat(section, options) - like get(), but convert value to a float - - getboolean(section, options) - like get(), but convert value to a boolean (currently case - insensitively defined as 0, false, no, off for False, and 1, true, - yes, on for True). Returns False or True. - - items(section, raw=False, vars=None) - return a list of tuples with (name, value) for each option - in the section. - - remove_section(section) - remove the given file section and all its options - - remove_option(section, option) - remove the given option from the given section - - set(section, option, value) - set the given option - - write(fp) - write the configuration state in .ini format -""" - -try: - from collections import OrderedDict as _default_dict -except ImportError: - # fallback for setup.py which hasn't yet built _collections - _default_dict = dict - -import re - -__all__ = ["NoSectionError", "DuplicateSectionError", "NoOptionError", - "InterpolationError", "InterpolationDepthError", - "InterpolationSyntaxError", "ParsingError", - "MissingSectionHeaderError", - "ConfigParser", "SafeConfigParser", "RawConfigParser", - "DEFAULTSECT", "MAX_INTERPOLATION_DEPTH"] - -DEFAULTSECT = "DEFAULT" - -MAX_INTERPOLATION_DEPTH = 10 - - - -# exception classes -class Error(Exception): - """Base class for ConfigParser exceptions.""" - - def _get_message(self): - """Getter for 'message'; needed only to override deprecation in - BaseException.""" - return self.__message - - def _set_message(self, value): - """Setter for 'message'; needed only to override deprecation in - BaseException.""" - self.__message = value - - # BaseException.message has been deprecated since Python 2.6. To prevent - # DeprecationWarning from popping up over this pre-existing attribute, use - # a new property that takes lookup precedence. - message = property(_get_message, _set_message) - - def __init__(self, msg=''): - self.message = msg - Exception.__init__(self, msg) - - def __repr__(self): - return self.message - - __str__ = __repr__ - -class NoSectionError(Error): - """Raised when no section matches a requested option.""" - - def __init__(self, section): - Error.__init__(self, 'No section: %r' % (section,)) - self.section = section - self.args = (section, ) - -class DuplicateSectionError(Error): - """Raised when a section is multiply-created.""" - - def __init__(self, section): - Error.__init__(self, "Section %r already exists" % section) - self.section = section - self.args = (section, ) - -class NoOptionError(Error): - """A requested option was not found.""" - - def __init__(self, option, section): - Error.__init__(self, "No option %r in section: %r" % - (option, section)) - self.option = option - self.section = section - self.args = (option, section) - -class InterpolationError(Error): - """Base class for interpolation-related exceptions.""" - - def __init__(self, option, section, msg): - Error.__init__(self, msg) - self.option = option - self.section = section - self.args = (option, section, msg) - -class InterpolationMissingOptionError(InterpolationError): - """A string substitution required a setting which was not available.""" - - def __init__(self, option, section, rawval, reference): - msg = ("Bad value substitution:\n" - "\tsection: [%s]\n" - "\toption : %s\n" - "\tkey : %s\n" - "\trawval : %s\n" - % (section, option, reference, rawval)) - InterpolationError.__init__(self, option, section, msg) - self.reference = reference - self.args = (option, section, rawval, reference) - -class InterpolationSyntaxError(InterpolationError): - """Raised when the source text into which substitutions are made - does not conform to the required syntax.""" - -class InterpolationDepthError(InterpolationError): - """Raised when substitutions are nested too deeply.""" - - def __init__(self, option, section, rawval): - msg = ("Value interpolation too deeply recursive:\n" - "\tsection: [%s]\n" - "\toption : %s\n" - "\trawval : %s\n" - % (section, option, rawval)) - InterpolationError.__init__(self, option, section, msg) - self.args = (option, section, rawval) - -class ParsingError(Error): - """Raised when a configuration file does not follow legal syntax.""" - - def __init__(self, filename): - Error.__init__(self, 'File contains parsing errors: %s' % filename) - self.filename = filename - self.errors = [] - self.args = (filename, ) - - def append(self, lineno, line): - self.errors.append((lineno, line)) - self.message += '\n\t[line %2d]: %s' % (lineno, line) - -class MissingSectionHeaderError(ParsingError): - """Raised when a key-value pair is found before any section header.""" - - def __init__(self, filename, lineno, line): - Error.__init__( - self, - 'File contains no section headers.\nfile: %s, line: %d\n%r' % - (filename, lineno, line)) - self.filename = filename - self.lineno = lineno - self.line = line - self.args = (filename, lineno, line) - - -class RawConfigParser: - def __init__(self, defaults=None, dict_type=_default_dict, - allow_no_value=False): - self._dict = dict_type - self._sections = self._dict() - self._defaults = self._dict() - if allow_no_value: - self._optcre = self.OPTCRE_NV - else: - self._optcre = self.OPTCRE - if defaults: - for key, value in defaults.items(): - self._defaults[self.optionxform(key)] = value - - def defaults(self): - return self._defaults - - def sections(self): - """Return a list of section names, excluding [DEFAULT]""" - # self._sections will never have [DEFAULT] in it - return self._sections.keys() - - def add_section(self, section): - """Create a new section in the configuration. - - Raise DuplicateSectionError if a section by the specified name - already exists. Raise ValueError if name is DEFAULT or any of it's - case-insensitive variants. - """ - if section.lower() == "default": - raise ValueError('Invalid section name: %s' % section) - - if section in self._sections: - raise DuplicateSectionError(section) - self._sections[section] = self._dict() - - def has_section(self, section): - """Indicate whether the named section is present in the configuration. - - The DEFAULT section is not acknowledged. - """ - return section in self._sections - - def options(self, section): - """Return a list of option names for the given section name.""" - try: - opts = self._sections[section].copy() - except KeyError: - raise NoSectionError(section) - opts.update(self._defaults) - if '__name__' in opts: - del opts['__name__'] - return opts.keys() - - def read(self, filenames): - """Read and parse a filename or a list of filenames. - - Files that cannot be opened are silently ignored; this is - designed so that you can specify a list of potential - configuration file locations (e.g. current directory, user's - home directory, systemwide directory), and all existing - configuration files in the list will be read. A single - filename may also be given. - - Return list of successfully read files. - """ - if isinstance(filenames, basestring): - filenames = [filenames] - read_ok = [] - for filename in filenames: - try: - fp = open(filename) - except IOError: - continue - self._read(fp, filename) - fp.close() - read_ok.append(filename) - return read_ok - - def readfp(self, fp, filename=None): - """Like read() but the argument must be a file-like object. - - The `fp' argument must have a `readline' method. Optional - second argument is the `filename', which if not given, is - taken from fp.name. If fp has no `name' attribute, `' is - used. - - """ - if filename is None: - try: - filename = fp.name - except AttributeError: - filename = '' - self._read(fp, filename) - - def get(self, section, option): - opt = self.optionxform(option) - if section not in self._sections: - if section != DEFAULTSECT: - raise NoSectionError(section) - if opt in self._defaults: - return self._defaults[opt] - else: - raise NoOptionError(option, section) - elif opt in self._sections[section]: - return self._sections[section][opt] - elif opt in self._defaults: - return self._defaults[opt] - else: - raise NoOptionError(option, section) - - def items(self, section): - try: - d2 = self._sections[section] - except KeyError: - if section != DEFAULTSECT: - raise NoSectionError(section) - d2 = self._dict() - d = self._defaults.copy() - d.update(d2) - if "__name__" in d: - del d["__name__"] - return d.items() - - def _get(self, section, conv, option): - return conv(self.get(section, option)) - - def getint(self, section, option): - return self._get(section, int, option) - - def getfloat(self, section, option): - return self._get(section, float, option) - - _boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True, - '0': False, 'no': False, 'false': False, 'off': False} - - def getboolean(self, section, option): - v = self.get(section, option) - if v.lower() not in self._boolean_states: - raise ValueError('Not a boolean: %s' % v) - return self._boolean_states[v.lower()] - - def optionxform(self, optionstr): - return optionstr.lower() - - def has_option(self, section, option): - """Check for the existence of a given option in a given section.""" - if not section or section == DEFAULTSECT: - option = self.optionxform(option) - return option in self._defaults - elif section not in self._sections: - return False - else: - option = self.optionxform(option) - return (option in self._sections[section] - or option in self._defaults) - - def set(self, section, option, value=None): - """Set an option.""" - if not section or section == DEFAULTSECT: - sectdict = self._defaults - else: - try: - sectdict = self._sections[section] - except KeyError: - raise NoSectionError(section) - sectdict[self.optionxform(option)] = value - - def write(self, fp): - """Write an .ini-format representation of the configuration state.""" - if self._defaults: - fp.write("[%s]\n" % DEFAULTSECT) - for (key, value) in self._defaults.items(): - fp.write("%s = %s\n" % (key, str(value).replace('\n', '\n\t'))) - fp.write("\n") - for section in self._sections: - fp.write("[%s]\n" % section) - for (key, value) in self._sections[section].items(): - if key == "__name__": - continue - if (value is not None) or (self._optcre == self.OPTCRE): - key = " = ".join((key, str(value).replace('\n', '\n\t'))) - fp.write("%s\n" % (key)) - fp.write("\n") - - def remove_option(self, section, option): - """Remove an option.""" - if not section or section == DEFAULTSECT: - sectdict = self._defaults - else: - try: - sectdict = self._sections[section] - except KeyError: - raise NoSectionError(section) - option = self.optionxform(option) - existed = option in sectdict - if existed: - del sectdict[option] - return existed - - def remove_section(self, section): - """Remove a file section.""" - existed = section in self._sections - if existed: - del self._sections[section] - return existed - - # - # Regular expressions for parsing section headers and options. - # - SECTCRE = re.compile( - r'\[' # [ - r'(?P
[^]]+)' # very permissive! - r'\]' # ] - ) - OPTCRE = re.compile( - r'(?P