From b03e5cbd9a4a0b5b04d5730eb5de6ebd77442002 Mon Sep 17 00:00:00 2001 From: Sarah Hoffmann Date: Tue, 24 Sep 2024 18:08:41 +0200 Subject: [PATCH 01/13] add pyproject.toml for package building Replaces requirements.txt. Needs sources to move to src/ to work properly. --- .gitignore | 4 +- cli.py | 23 ++----- pyproject.toml | 66 +++++++++++++++++++ requirements.txt | 3 - src/analyser/cli.py | 21 ++++++ {analyser => src/analyser}/config/__init__.py | 0 {analyser => src/analyser}/config/config.py | 0 .../analyser}/config/default.yaml | 0 {analyser => src/analyser}/core/__init__.py | 0 .../analyser}/core/assembler/__init__.py | 0 .../analyser}/core/assembler/pipe_factory.py | 0 .../core/assembler/pipeline_assembler.py | 0 {analyser => src/analyser}/core/core.py | 0 .../analyser}/core/deconstructor/__init__.py | 0 .../deconstructor/pipeline_deconstructor.py | 0 .../analyser}/core/dynamic_value/__init__.py | 0 .../core/dynamic_value/dynamic_value.py | 0 .../analyser}/core/dynamic_value/resolver.py | 0 .../analyser}/core/dynamic_value/switch.py | 0 .../analyser}/core/dynamic_value/variable.py | 0 .../analyser}/core/exceptions/__init__.py | 0 .../core/exceptions/yaml_syntax_exception.py | 0 .../analyser}/core/model/__init__.py | 0 .../analyser}/core/model/geometry.py | 0 {analyser => src/analyser}/core/model/node.py | 0 {analyser => src/analyser}/core/pipe.py | 0 .../analyser}/core/pipes/__init__.py | 0 .../core/pipes/data_fetching/__init__.py | 0 .../core/pipes/data_fetching/sql_processor.py | 0 .../core/pipes/data_processing/__init__.py | 0 .../data_processing/geometry_converter.py | 0 .../data_processing/loop_data_processor.py | 0 .../analyser}/core/pipes/filling_pipe.py | 0 .../core/pipes/output_formatters/__init__.py | 0 .../clusters_vt_formatter.py | 0 .../geojson_feature_converter.py | 0 .../output_formatters/geojson_formatter.py | 0 .../osmoscope_layer_formatter.py | 0 .../vector_tile_formatter.py | 0 .../pipes/rules_specific_pipes/__init__.py | 0 .../digits_filter.py | 0 .../custom_feature_converter.py | 0 .../custom_feature_converter.py | 0 .../custom_feature_converter.py | 0 .../same_wikidata/custom_feature_converter.py | 0 .../analyser}/core/qa_rule/__init__.py | 0 .../core/qa_rule/execution_context.py | 0 .../analyser}/core/yaml_logic/__init__.py | 0 .../analyser}/core/yaml_logic/yaml_loader.py | 0 .../analyser}/database/__init__.py | 0 .../analyser}/database/connection.py | 0 {analyser => src/analyser}/logger/logger.py | 0 {analyser => src/analyser}/logger/timer.py | 0 .../BA_way_not_part_relation.yaml | 0 .../addr_housenumber_no_digit.yaml | 0 .../addr_place_and_street.yaml | 0 .../addr_place_or_street_rank28.yaml | 0 .../addr_street_wrong_name.yaml | 0 .../bad_interpolations.yaml | 0 .../duplicate_label_role.yaml | 0 .../rules_specifications/no_admin_level.yaml | 0 .../place_nodes_close.yaml | 0 .../postcode_bad_format.yaml | 0 .../rules_specifications/same_wikidata.yaml | 0 tests/conftest.py | 6 ++ 65 files changed, 102 insertions(+), 21 deletions(-) mode change 100644 => 100755 cli.py create mode 100644 pyproject.toml delete mode 100644 requirements.txt create mode 100644 src/analyser/cli.py rename {analyser => src/analyser}/config/__init__.py (100%) rename {analyser => src/analyser}/config/config.py (100%) rename {analyser => src/analyser}/config/default.yaml (100%) rename {analyser => src/analyser}/core/__init__.py (100%) rename {analyser => src/analyser}/core/assembler/__init__.py (100%) rename {analyser => src/analyser}/core/assembler/pipe_factory.py (100%) rename {analyser => src/analyser}/core/assembler/pipeline_assembler.py (100%) rename {analyser => src/analyser}/core/core.py (100%) rename {analyser => src/analyser}/core/deconstructor/__init__.py (100%) rename {analyser => src/analyser}/core/deconstructor/pipeline_deconstructor.py (100%) rename {analyser => src/analyser}/core/dynamic_value/__init__.py (100%) rename {analyser => src/analyser}/core/dynamic_value/dynamic_value.py (100%) rename {analyser => src/analyser}/core/dynamic_value/resolver.py (100%) rename {analyser => src/analyser}/core/dynamic_value/switch.py (100%) rename {analyser => src/analyser}/core/dynamic_value/variable.py (100%) rename {analyser => src/analyser}/core/exceptions/__init__.py (100%) rename {analyser => src/analyser}/core/exceptions/yaml_syntax_exception.py (100%) rename {analyser => src/analyser}/core/model/__init__.py (100%) rename {analyser => src/analyser}/core/model/geometry.py (100%) rename {analyser => src/analyser}/core/model/node.py (100%) rename {analyser => src/analyser}/core/pipe.py (100%) rename {analyser => src/analyser}/core/pipes/__init__.py (100%) rename {analyser => src/analyser}/core/pipes/data_fetching/__init__.py (100%) rename {analyser => src/analyser}/core/pipes/data_fetching/sql_processor.py (100%) rename {analyser => src/analyser}/core/pipes/data_processing/__init__.py (100%) rename {analyser => src/analyser}/core/pipes/data_processing/geometry_converter.py (100%) rename {analyser => src/analyser}/core/pipes/data_processing/loop_data_processor.py (100%) rename {analyser => src/analyser}/core/pipes/filling_pipe.py (100%) rename {analyser => src/analyser}/core/pipes/output_formatters/__init__.py (100%) rename {analyser => src/analyser}/core/pipes/output_formatters/clusters_vt_formatter.py (100%) rename {analyser => src/analyser}/core/pipes/output_formatters/geojson_feature_converter.py (100%) rename {analyser => src/analyser}/core/pipes/output_formatters/geojson_formatter.py (100%) rename {analyser => src/analyser}/core/pipes/output_formatters/osmoscope_layer_formatter.py (100%) rename {analyser => src/analyser}/core/pipes/output_formatters/vector_tile_formatter.py (100%) rename {analyser => src/analyser}/core/pipes/rules_specific_pipes/__init__.py (100%) rename {analyser => src/analyser}/core/pipes/rules_specific_pipes/addr_house_number_no_digit/digits_filter.py (100%) rename {analyser => src/analyser}/core/pipes/rules_specific_pipes/addresses_pipes/custom_feature_converter.py (100%) rename {analyser => src/analyser}/core/pipes/rules_specific_pipes/duplicate_label_role/custom_feature_converter.py (100%) rename {analyser => src/analyser}/core/pipes/rules_specific_pipes/place_nodes_close/custom_feature_converter.py (100%) rename {analyser => src/analyser}/core/pipes/rules_specific_pipes/same_wikidata/custom_feature_converter.py (100%) rename {analyser => src/analyser}/core/qa_rule/__init__.py (100%) rename {analyser => src/analyser}/core/qa_rule/execution_context.py (100%) rename {analyser => src/analyser}/core/yaml_logic/__init__.py (100%) rename {analyser => src/analyser}/core/yaml_logic/yaml_loader.py (100%) rename {analyser => src/analyser}/database/__init__.py (100%) rename {analyser => src/analyser}/database/connection.py (100%) rename {analyser => src/analyser}/logger/logger.py (100%) rename {analyser => src/analyser}/logger/timer.py (100%) rename {analyser => src/analyser}/rules_specifications/BA_way_not_part_relation.yaml (100%) rename {analyser => src/analyser}/rules_specifications/addr_housenumber_no_digit.yaml (100%) rename {analyser => src/analyser}/rules_specifications/addr_place_and_street.yaml (100%) rename {analyser => src/analyser}/rules_specifications/addr_place_or_street_rank28.yaml (100%) rename {analyser => src/analyser}/rules_specifications/addr_street_wrong_name.yaml (100%) rename {analyser => src/analyser}/rules_specifications/bad_interpolations.yaml (100%) rename {analyser => src/analyser}/rules_specifications/duplicate_label_role.yaml (100%) rename {analyser => src/analyser}/rules_specifications/no_admin_level.yaml (100%) rename {analyser => src/analyser}/rules_specifications/place_nodes_close.yaml (100%) rename {analyser => src/analyser}/rules_specifications/postcode_bad_format.yaml (100%) rename {analyser => src/analyser}/rules_specifications/same_wikidata.yaml (100%) diff --git a/.gitignore b/.gitignore index 5786758..e6e8dc6 100755 --- a/.gitignore +++ b/.gitignore @@ -1,7 +1,9 @@ *.pyc +src/nominatim_data_analyser.egg-info +dist analyser/config/config.yaml .mason mason_packages -build \ No newline at end of file +build diff --git a/cli.py b/cli.py old mode 100644 new mode 100755 index 85874fb..4344dfd --- a/cli.py +++ b/cli.py @@ -1,20 +1,9 @@ -from analyser.core.core import Core -import argparse +#!/usr/bin/python3 +import sys +from pathlib import Path -parser = argparse.ArgumentParser(prog='nominatim-analyser') +sys.path.insert(1, str(Path(__file__, '..', 'src').resolve())) -parser.add_argument('--execute-all', action='store_true', help='Executes all the QA rules') -parser.add_argument('--filter', metavar='', nargs='+', help='Filters some QA rules so they are not executed.') -parser.add_argument('--execute-one', metavar='', action='store', type=str, help='Executes the given QA rule') +from analyser.cli import cli -args = parser.parse_args() - -#Executes all the QA rules. If a filter is given, these rules are excluded from the execution. -if args.execute_all: - if args.filter: - Core().execute_all(args.filter) - else: - Core().execute_all() -elif args.execute_one: - #Execute the given QA rule. - Core().execute_one(args.execute_one) \ No newline at end of file +sys.exit(cli()) diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..9b52f40 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,66 @@ +[build-system] +requires = ["setuptools"] +build-backend = "setuptools.build_meta" + +[project] +name = "nominatim-data-analyser" +authors = [ + {name = "Antonin Jolivat", email = "antonin.jolivat@gmail.com"} +] +maintainers = [ + {name = "Sarah Hoffmann", email = "lonvia@denofr.de"} +] +description = "QA Tool for Nominatim. Helps to improve the OpenStreetMap data quality and therefore the Nominatim search results." +readme = "README.md" +requires-python = ">=3.6" +license = {text = "GPL-2.0-or-later"} +classifiers = [ + "Programming Language :: Python :: 3", +] +dependencies = [ + "pyYAML==5.3.1", + "geojson==2.5.0", + "psycopg2==2.8.4" +] +version = "0.1.0" + +[project.urls] +Homepage = "https://github.com/osm-search/Nominatim-Data-Analyser" + +[project.optional-dependencies] +tests = [ + 'pytest', +] + +[tool.setuptools] +packages = ["analyser", + "analyser.database", + "analyser.config", + "analyser.logger", + "analyser.core", + "analyser.core.exceptions", + "analyser.core.yaml_logic", + "analyser.core.dynamic_value", + "analyser.core.deconstructor", + "analyser.core.model", + "analyser.core.pipes", + "analyser.core.pipes.rules_specific_pipes", + "analyser.core.pipes.rules_specific_pipes.place_nodes_close", + "analyser.core.pipes.rules_specific_pipes.addresses_pipes", + "analyser.core.pipes.rules_specific_pipes.addr_house_number_no_digit", + "analyser.core.pipes.rules_specific_pipes.duplicate_label_role", + "analyser.core.pipes.rules_specific_pipes.same_wikidata", + "analyser.core.pipes.output_formatters", + "analyser.core.pipes.data_processing", + "analyser.core.pipes.data_fetching", + "analyser.core.qa_rule", + "analyser.core.assembler", +] + +package-dir = {"" = "src"} + +[tool.setuptools.package-data] +analyser = ["rules_specifications/*.yaml"] + +[tool.pytest.ini_options] +testpaths = ["tests"] diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 5ad301f..0000000 --- a/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -geojson==2.5.0 -psycopg2==2.8.4 -PyYAML==5.3.1 \ No newline at end of file diff --git a/src/analyser/cli.py b/src/analyser/cli.py new file mode 100644 index 0000000..9b2f379 --- /dev/null +++ b/src/analyser/cli.py @@ -0,0 +1,21 @@ +from analyser.core.core import Core +import argparse + +def cli(): + parser = argparse.ArgumentParser(prog='nominatim-analyser') + + parser.add_argument('--execute-all', action='store_true', help='Executes all the QA rules') + parser.add_argument('--filter', metavar='', nargs='+', help='Filters some QA rules so they are not executed.') + parser.add_argument('--execute-one', metavar='', action='store', type=str, help='Executes the given QA rule') + + args = parser.parse_args() + + #Executes all the QA rules. If a filter is given, these rules are excluded from the execution. + if args.execute_all: + if args.filter: + Core().execute_all(args.filter) + else: + Core().execute_all() + elif args.execute_one: + #Execute the given QA rule. + Core().execute_one(args.execute_one) diff --git a/analyser/config/__init__.py b/src/analyser/config/__init__.py similarity index 100% rename from analyser/config/__init__.py rename to src/analyser/config/__init__.py diff --git a/analyser/config/config.py b/src/analyser/config/config.py similarity index 100% rename from analyser/config/config.py rename to src/analyser/config/config.py diff --git a/analyser/config/default.yaml b/src/analyser/config/default.yaml similarity index 100% rename from analyser/config/default.yaml rename to src/analyser/config/default.yaml diff --git a/analyser/core/__init__.py b/src/analyser/core/__init__.py similarity index 100% rename from analyser/core/__init__.py rename to src/analyser/core/__init__.py diff --git a/analyser/core/assembler/__init__.py b/src/analyser/core/assembler/__init__.py similarity index 100% rename from analyser/core/assembler/__init__.py rename to src/analyser/core/assembler/__init__.py diff --git a/analyser/core/assembler/pipe_factory.py b/src/analyser/core/assembler/pipe_factory.py similarity index 100% rename from analyser/core/assembler/pipe_factory.py rename to src/analyser/core/assembler/pipe_factory.py diff --git a/analyser/core/assembler/pipeline_assembler.py b/src/analyser/core/assembler/pipeline_assembler.py similarity index 100% rename from analyser/core/assembler/pipeline_assembler.py rename to src/analyser/core/assembler/pipeline_assembler.py diff --git a/analyser/core/core.py b/src/analyser/core/core.py similarity index 100% rename from analyser/core/core.py rename to src/analyser/core/core.py diff --git a/analyser/core/deconstructor/__init__.py b/src/analyser/core/deconstructor/__init__.py similarity index 100% rename from analyser/core/deconstructor/__init__.py rename to src/analyser/core/deconstructor/__init__.py diff --git a/analyser/core/deconstructor/pipeline_deconstructor.py b/src/analyser/core/deconstructor/pipeline_deconstructor.py similarity index 100% rename from analyser/core/deconstructor/pipeline_deconstructor.py rename to src/analyser/core/deconstructor/pipeline_deconstructor.py diff --git a/analyser/core/dynamic_value/__init__.py b/src/analyser/core/dynamic_value/__init__.py similarity index 100% rename from analyser/core/dynamic_value/__init__.py rename to src/analyser/core/dynamic_value/__init__.py diff --git a/analyser/core/dynamic_value/dynamic_value.py b/src/analyser/core/dynamic_value/dynamic_value.py similarity index 100% rename from analyser/core/dynamic_value/dynamic_value.py rename to src/analyser/core/dynamic_value/dynamic_value.py diff --git a/analyser/core/dynamic_value/resolver.py b/src/analyser/core/dynamic_value/resolver.py similarity index 100% rename from analyser/core/dynamic_value/resolver.py rename to src/analyser/core/dynamic_value/resolver.py diff --git a/analyser/core/dynamic_value/switch.py b/src/analyser/core/dynamic_value/switch.py similarity index 100% rename from analyser/core/dynamic_value/switch.py rename to src/analyser/core/dynamic_value/switch.py diff --git a/analyser/core/dynamic_value/variable.py b/src/analyser/core/dynamic_value/variable.py similarity index 100% rename from analyser/core/dynamic_value/variable.py rename to src/analyser/core/dynamic_value/variable.py diff --git a/analyser/core/exceptions/__init__.py b/src/analyser/core/exceptions/__init__.py similarity index 100% rename from analyser/core/exceptions/__init__.py rename to src/analyser/core/exceptions/__init__.py diff --git a/analyser/core/exceptions/yaml_syntax_exception.py b/src/analyser/core/exceptions/yaml_syntax_exception.py similarity index 100% rename from analyser/core/exceptions/yaml_syntax_exception.py rename to src/analyser/core/exceptions/yaml_syntax_exception.py diff --git a/analyser/core/model/__init__.py b/src/analyser/core/model/__init__.py similarity index 100% rename from analyser/core/model/__init__.py rename to src/analyser/core/model/__init__.py diff --git a/analyser/core/model/geometry.py b/src/analyser/core/model/geometry.py similarity index 100% rename from analyser/core/model/geometry.py rename to src/analyser/core/model/geometry.py diff --git a/analyser/core/model/node.py b/src/analyser/core/model/node.py similarity index 100% rename from analyser/core/model/node.py rename to src/analyser/core/model/node.py diff --git a/analyser/core/pipe.py b/src/analyser/core/pipe.py similarity index 100% rename from analyser/core/pipe.py rename to src/analyser/core/pipe.py diff --git a/analyser/core/pipes/__init__.py b/src/analyser/core/pipes/__init__.py similarity index 100% rename from analyser/core/pipes/__init__.py rename to src/analyser/core/pipes/__init__.py diff --git a/analyser/core/pipes/data_fetching/__init__.py b/src/analyser/core/pipes/data_fetching/__init__.py similarity index 100% rename from analyser/core/pipes/data_fetching/__init__.py rename to src/analyser/core/pipes/data_fetching/__init__.py diff --git a/analyser/core/pipes/data_fetching/sql_processor.py b/src/analyser/core/pipes/data_fetching/sql_processor.py similarity index 100% rename from analyser/core/pipes/data_fetching/sql_processor.py rename to src/analyser/core/pipes/data_fetching/sql_processor.py diff --git a/analyser/core/pipes/data_processing/__init__.py b/src/analyser/core/pipes/data_processing/__init__.py similarity index 100% rename from analyser/core/pipes/data_processing/__init__.py rename to src/analyser/core/pipes/data_processing/__init__.py diff --git a/analyser/core/pipes/data_processing/geometry_converter.py b/src/analyser/core/pipes/data_processing/geometry_converter.py similarity index 100% rename from analyser/core/pipes/data_processing/geometry_converter.py rename to src/analyser/core/pipes/data_processing/geometry_converter.py diff --git a/analyser/core/pipes/data_processing/loop_data_processor.py b/src/analyser/core/pipes/data_processing/loop_data_processor.py similarity index 100% rename from analyser/core/pipes/data_processing/loop_data_processor.py rename to src/analyser/core/pipes/data_processing/loop_data_processor.py diff --git a/analyser/core/pipes/filling_pipe.py b/src/analyser/core/pipes/filling_pipe.py similarity index 100% rename from analyser/core/pipes/filling_pipe.py rename to src/analyser/core/pipes/filling_pipe.py diff --git a/analyser/core/pipes/output_formatters/__init__.py b/src/analyser/core/pipes/output_formatters/__init__.py similarity index 100% rename from analyser/core/pipes/output_formatters/__init__.py rename to src/analyser/core/pipes/output_formatters/__init__.py diff --git a/analyser/core/pipes/output_formatters/clusters_vt_formatter.py b/src/analyser/core/pipes/output_formatters/clusters_vt_formatter.py similarity index 100% rename from analyser/core/pipes/output_formatters/clusters_vt_formatter.py rename to src/analyser/core/pipes/output_formatters/clusters_vt_formatter.py diff --git a/analyser/core/pipes/output_formatters/geojson_feature_converter.py b/src/analyser/core/pipes/output_formatters/geojson_feature_converter.py similarity index 100% rename from analyser/core/pipes/output_formatters/geojson_feature_converter.py rename to src/analyser/core/pipes/output_formatters/geojson_feature_converter.py diff --git a/analyser/core/pipes/output_formatters/geojson_formatter.py b/src/analyser/core/pipes/output_formatters/geojson_formatter.py similarity index 100% rename from analyser/core/pipes/output_formatters/geojson_formatter.py rename to src/analyser/core/pipes/output_formatters/geojson_formatter.py diff --git a/analyser/core/pipes/output_formatters/osmoscope_layer_formatter.py b/src/analyser/core/pipes/output_formatters/osmoscope_layer_formatter.py similarity index 100% rename from analyser/core/pipes/output_formatters/osmoscope_layer_formatter.py rename to src/analyser/core/pipes/output_formatters/osmoscope_layer_formatter.py diff --git a/analyser/core/pipes/output_formatters/vector_tile_formatter.py b/src/analyser/core/pipes/output_formatters/vector_tile_formatter.py similarity index 100% rename from analyser/core/pipes/output_formatters/vector_tile_formatter.py rename to src/analyser/core/pipes/output_formatters/vector_tile_formatter.py diff --git a/analyser/core/pipes/rules_specific_pipes/__init__.py b/src/analyser/core/pipes/rules_specific_pipes/__init__.py similarity index 100% rename from analyser/core/pipes/rules_specific_pipes/__init__.py rename to src/analyser/core/pipes/rules_specific_pipes/__init__.py diff --git a/analyser/core/pipes/rules_specific_pipes/addr_house_number_no_digit/digits_filter.py b/src/analyser/core/pipes/rules_specific_pipes/addr_house_number_no_digit/digits_filter.py similarity index 100% rename from analyser/core/pipes/rules_specific_pipes/addr_house_number_no_digit/digits_filter.py rename to src/analyser/core/pipes/rules_specific_pipes/addr_house_number_no_digit/digits_filter.py diff --git a/analyser/core/pipes/rules_specific_pipes/addresses_pipes/custom_feature_converter.py b/src/analyser/core/pipes/rules_specific_pipes/addresses_pipes/custom_feature_converter.py similarity index 100% rename from analyser/core/pipes/rules_specific_pipes/addresses_pipes/custom_feature_converter.py rename to src/analyser/core/pipes/rules_specific_pipes/addresses_pipes/custom_feature_converter.py diff --git a/analyser/core/pipes/rules_specific_pipes/duplicate_label_role/custom_feature_converter.py b/src/analyser/core/pipes/rules_specific_pipes/duplicate_label_role/custom_feature_converter.py similarity index 100% rename from analyser/core/pipes/rules_specific_pipes/duplicate_label_role/custom_feature_converter.py rename to src/analyser/core/pipes/rules_specific_pipes/duplicate_label_role/custom_feature_converter.py diff --git a/analyser/core/pipes/rules_specific_pipes/place_nodes_close/custom_feature_converter.py b/src/analyser/core/pipes/rules_specific_pipes/place_nodes_close/custom_feature_converter.py similarity index 100% rename from analyser/core/pipes/rules_specific_pipes/place_nodes_close/custom_feature_converter.py rename to src/analyser/core/pipes/rules_specific_pipes/place_nodes_close/custom_feature_converter.py diff --git a/analyser/core/pipes/rules_specific_pipes/same_wikidata/custom_feature_converter.py b/src/analyser/core/pipes/rules_specific_pipes/same_wikidata/custom_feature_converter.py similarity index 100% rename from analyser/core/pipes/rules_specific_pipes/same_wikidata/custom_feature_converter.py rename to src/analyser/core/pipes/rules_specific_pipes/same_wikidata/custom_feature_converter.py diff --git a/analyser/core/qa_rule/__init__.py b/src/analyser/core/qa_rule/__init__.py similarity index 100% rename from analyser/core/qa_rule/__init__.py rename to src/analyser/core/qa_rule/__init__.py diff --git a/analyser/core/qa_rule/execution_context.py b/src/analyser/core/qa_rule/execution_context.py similarity index 100% rename from analyser/core/qa_rule/execution_context.py rename to src/analyser/core/qa_rule/execution_context.py diff --git a/analyser/core/yaml_logic/__init__.py b/src/analyser/core/yaml_logic/__init__.py similarity index 100% rename from analyser/core/yaml_logic/__init__.py rename to src/analyser/core/yaml_logic/__init__.py diff --git a/analyser/core/yaml_logic/yaml_loader.py b/src/analyser/core/yaml_logic/yaml_loader.py similarity index 100% rename from analyser/core/yaml_logic/yaml_loader.py rename to src/analyser/core/yaml_logic/yaml_loader.py diff --git a/analyser/database/__init__.py b/src/analyser/database/__init__.py similarity index 100% rename from analyser/database/__init__.py rename to src/analyser/database/__init__.py diff --git a/analyser/database/connection.py b/src/analyser/database/connection.py similarity index 100% rename from analyser/database/connection.py rename to src/analyser/database/connection.py diff --git a/analyser/logger/logger.py b/src/analyser/logger/logger.py similarity index 100% rename from analyser/logger/logger.py rename to src/analyser/logger/logger.py diff --git a/analyser/logger/timer.py b/src/analyser/logger/timer.py similarity index 100% rename from analyser/logger/timer.py rename to src/analyser/logger/timer.py diff --git a/analyser/rules_specifications/BA_way_not_part_relation.yaml b/src/analyser/rules_specifications/BA_way_not_part_relation.yaml similarity index 100% rename from analyser/rules_specifications/BA_way_not_part_relation.yaml rename to src/analyser/rules_specifications/BA_way_not_part_relation.yaml diff --git a/analyser/rules_specifications/addr_housenumber_no_digit.yaml b/src/analyser/rules_specifications/addr_housenumber_no_digit.yaml similarity index 100% rename from analyser/rules_specifications/addr_housenumber_no_digit.yaml rename to src/analyser/rules_specifications/addr_housenumber_no_digit.yaml diff --git a/analyser/rules_specifications/addr_place_and_street.yaml b/src/analyser/rules_specifications/addr_place_and_street.yaml similarity index 100% rename from analyser/rules_specifications/addr_place_and_street.yaml rename to src/analyser/rules_specifications/addr_place_and_street.yaml diff --git a/analyser/rules_specifications/addr_place_or_street_rank28.yaml b/src/analyser/rules_specifications/addr_place_or_street_rank28.yaml similarity index 100% rename from analyser/rules_specifications/addr_place_or_street_rank28.yaml rename to src/analyser/rules_specifications/addr_place_or_street_rank28.yaml diff --git a/analyser/rules_specifications/addr_street_wrong_name.yaml b/src/analyser/rules_specifications/addr_street_wrong_name.yaml similarity index 100% rename from analyser/rules_specifications/addr_street_wrong_name.yaml rename to src/analyser/rules_specifications/addr_street_wrong_name.yaml diff --git a/analyser/rules_specifications/bad_interpolations.yaml b/src/analyser/rules_specifications/bad_interpolations.yaml similarity index 100% rename from analyser/rules_specifications/bad_interpolations.yaml rename to src/analyser/rules_specifications/bad_interpolations.yaml diff --git a/analyser/rules_specifications/duplicate_label_role.yaml b/src/analyser/rules_specifications/duplicate_label_role.yaml similarity index 100% rename from analyser/rules_specifications/duplicate_label_role.yaml rename to src/analyser/rules_specifications/duplicate_label_role.yaml diff --git a/analyser/rules_specifications/no_admin_level.yaml b/src/analyser/rules_specifications/no_admin_level.yaml similarity index 100% rename from analyser/rules_specifications/no_admin_level.yaml rename to src/analyser/rules_specifications/no_admin_level.yaml diff --git a/analyser/rules_specifications/place_nodes_close.yaml b/src/analyser/rules_specifications/place_nodes_close.yaml similarity index 100% rename from analyser/rules_specifications/place_nodes_close.yaml rename to src/analyser/rules_specifications/place_nodes_close.yaml diff --git a/analyser/rules_specifications/postcode_bad_format.yaml b/src/analyser/rules_specifications/postcode_bad_format.yaml similarity index 100% rename from analyser/rules_specifications/postcode_bad_format.yaml rename to src/analyser/rules_specifications/postcode_bad_format.yaml diff --git a/analyser/rules_specifications/same_wikidata.yaml b/src/analyser/rules_specifications/same_wikidata.yaml similarity index 100% rename from analyser/rules_specifications/same_wikidata.yaml rename to src/analyser/rules_specifications/same_wikidata.yaml diff --git a/tests/conftest.py b/tests/conftest.py index 1feeb55..45e716a 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,5 +1,11 @@ +import sys +from pathlib import Path import psycopg2 import pytest + +# always test against source +sys.path.insert(1, str(Path(__file__, '..', '..', 'src').resolve())) + from analyser.config import Config from analyser.core.pipes import FillingPipe from analyser.core.pipes.data_fetching.sql_processor import SQLProcessor From 58af81f07920a20e9dfa7f7311d6d15c3e953d10 Mon Sep 17 00:00:00 2001 From: Sarah Hoffmann Date: Tue, 24 Sep 2024 19:26:39 +0200 Subject: [PATCH 02/13] add cli as script --- pyproject.toml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index 9b52f40..038aaa0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -62,5 +62,8 @@ package-dir = {"" = "src"} [tool.setuptools.package-data] analyser = ["rules_specifications/*.yaml"] +[project.scripts] +nominatim-data-analyser = "analyser.cli:cli" + [tool.pytest.ini_options] testpaths = ["tests"] From f5ad7338c97ad585def26e2d72c2f889ad634ba9 Mon Sep 17 00:00:00 2001 From: Sarah Hoffmann Date: Tue, 24 Sep 2024 19:28:15 +0200 Subject: [PATCH 03/13] remove exact version requirements for dependencies --- pyproject.toml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 038aaa0..43a784f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -18,9 +18,9 @@ classifiers = [ "Programming Language :: Python :: 3", ] dependencies = [ - "pyYAML==5.3.1", - "geojson==2.5.0", - "psycopg2==2.8.4" + "pyYAML", + "geojson", + "psycopg2" ] version = "0.1.0" From d2d3bd06cba6a4e60a4cc7bdf5c7ac18bfe619f9 Mon Sep 17 00:00:00 2001 From: Sarah Hoffmann Date: Tue, 24 Sep 2024 20:23:35 +0200 Subject: [PATCH 04/13] convert all imports to relative imports --- src/analyser/cli.py | 2 +- src/analyser/config/config.py | 2 +- src/analyser/core/assembler/pipe_factory.py | 10 ++++------ src/analyser/core/assembler/pipeline_assembler.py | 10 +++++----- src/analyser/core/core.py | 10 +++++----- .../core/deconstructor/pipeline_deconstructor.py | 2 +- src/analyser/core/dynamic_value/resolver.py | 2 +- src/analyser/core/dynamic_value/switch.py | 4 ++-- src/analyser/core/dynamic_value/variable.py | 4 ++-- src/analyser/core/pipe.py | 6 +++--- src/analyser/core/pipes/data_fetching/sql_processor.py | 8 ++++---- .../core/pipes/data_processing/geometry_converter.py | 9 ++++----- .../core/pipes/data_processing/loop_data_processor.py | 4 ++-- src/analyser/core/pipes/filling_pipe.py | 4 ++-- .../pipes/output_formatters/clusters_vt_formatter.py | 8 ++++---- .../output_formatters/geojson_feature_converter.py | 6 +++--- .../core/pipes/output_formatters/geojson_formatter.py | 4 ++-- .../output_formatters/osmoscope_layer_formatter.py | 6 +++--- .../pipes/output_formatters/vector_tile_formatter.py | 8 ++++---- .../addr_house_number_no_digit/digits_filter.py | 2 +- .../addresses_pipes/custom_feature_converter.py | 4 ++-- .../duplicate_label_role/custom_feature_converter.py | 4 ++-- .../place_nodes_close/custom_feature_converter.py | 4 ++-- .../same_wikidata/custom_feature_converter.py | 4 ++-- src/analyser/core/yaml_logic/yaml_loader.py | 10 +++++----- src/analyser/database/__init__.py | 2 +- 26 files changed, 68 insertions(+), 71 deletions(-) diff --git a/src/analyser/cli.py b/src/analyser/cli.py index 9b2f379..9aeb924 100644 --- a/src/analyser/cli.py +++ b/src/analyser/cli.py @@ -1,4 +1,4 @@ -from analyser.core.core import Core +from .core.core import Core import argparse def cli(): diff --git a/src/analyser/config/config.py b/src/analyser/config/config.py index 06310ff..bd28fcc 100644 --- a/src/analyser/config/config.py +++ b/src/analyser/config/config.py @@ -1,4 +1,4 @@ -from analyser.logger.logger import LOG +from ..logger.logger import LOG from pathlib import Path import yaml diff --git a/src/analyser/core/assembler/pipe_factory.py b/src/analyser/core/assembler/pipe_factory.py index 1bc529e..0c40a10 100644 --- a/src/analyser/core/assembler/pipe_factory.py +++ b/src/analyser/core/assembler/pipe_factory.py @@ -1,10 +1,10 @@ from __future__ import annotations -from analyser.core.exceptions import YAMLSyntaxException -import importlib +from ..exceptions import YAMLSyntaxException +from .. import pipes as pipes_module import typing if typing.TYPE_CHECKING: # pragma: no cover - from analyser.core.qa_rule import ExecutionContext + from ..qa_rule import ExecutionContext class PipeFactory(): """ @@ -18,10 +18,8 @@ def assemble_pipe(node_data: dict, exec_context: ExecutionContext): if 'type' not in node_data: raise YAMLSyntaxException("Each node of the tree (pipe) should have a type defined.") - module = importlib.import_module('analyser.core.pipes') - try: - assembled_pipe = getattr(module, node_data['type'])(node_data, exec_context) + assembled_pipe = getattr(pipes_module, node_data['type'])(node_data, exec_context) except AttributeError: raise YAMLSyntaxException(f"The type {node_data['type']} doesn't exist.") diff --git a/src/analyser/core/assembler/pipeline_assembler.py b/src/analyser/core/assembler/pipeline_assembler.py index 13fde14..c241d21 100644 --- a/src/analyser/core/assembler/pipeline_assembler.py +++ b/src/analyser/core/assembler/pipeline_assembler.py @@ -1,15 +1,15 @@ from __future__ import annotations -from analyser.core.deconstructor import PipelineDeconstructor, BACKTRACKING_EVENT, NEW_NODE_EVENT -from analyser.core.pipes import FillingPipe -from analyser.core.qa_rule import ExecutionContext -from analyser.logger.logger import LOG +from ..deconstructor import PipelineDeconstructor, BACKTRACKING_EVENT, NEW_NODE_EVENT +from ..pipes import FillingPipe +from ..qa_rule import ExecutionContext +from ...logger.logger import LOG from collections import deque from .pipe_factory import PipeFactory from typing import Deque, Dict import typing if typing.TYPE_CHECKING: # pragma: no cover - from analyser.core import Pipe + from .. import Pipe class PipelineAssembler(): """ diff --git a/src/analyser/core/core.py b/src/analyser/core/core.py index 618e250..20ead65 100644 --- a/src/analyser/core/core.py +++ b/src/analyser/core/core.py @@ -1,8 +1,8 @@ -from analyser.core.yaml_logic.yaml_loader import load_yaml_rule -from analyser.core.assembler.pipeline_assembler import PipelineAssembler -from analyser.logger.logger import LOG -from analyser.logger.timer import Timer -from analyser.config import Config +from .yaml_logic.yaml_loader import load_yaml_rule +from .assembler.pipeline_assembler import PipelineAssembler +from ..logger.logger import LOG +from ..logger.timer import Timer +from ..config import Config from pathlib import Path from typing import Dict diff --git a/src/analyser/core/deconstructor/pipeline_deconstructor.py b/src/analyser/core/deconstructor/pipeline_deconstructor.py index 4060813..8f3dae2 100644 --- a/src/analyser/core/deconstructor/pipeline_deconstructor.py +++ b/src/analyser/core/deconstructor/pipeline_deconstructor.py @@ -1,6 +1,6 @@ from typing import Callable, Deque, Dict, List from collections import deque -from analyser.logger.logger import LOG +from ...logger.logger import LOG NEW_NODE_EVENT = 'new_node' BACKTRACKING_EVENT = 'backtracking' diff --git a/src/analyser/core/dynamic_value/resolver.py b/src/analyser/core/dynamic_value/resolver.py index b473e7f..5381ac4 100644 --- a/src/analyser/core/dynamic_value/resolver.py +++ b/src/analyser/core/dynamic_value/resolver.py @@ -1,5 +1,5 @@ -from analyser.core.dynamic_value import DynamicValue +from . import DynamicValue from typing import Any, Dict, Iterable def resolve_all(data_to_resolve: Any, resolver_data: Dict, ) -> Any: diff --git a/src/analyser/core/dynamic_value/switch.py b/src/analyser/core/dynamic_value/switch.py index b5da57a..9cd95ff 100644 --- a/src/analyser/core/dynamic_value/switch.py +++ b/src/analyser/core/dynamic_value/switch.py @@ -1,4 +1,4 @@ -from analyser.core.dynamic_value import DynamicValue +from . import DynamicValue from typing import Any, Dict class Switch(DynamicValue): @@ -18,4 +18,4 @@ def resolve(self, data: Dict) -> Any: if data[self.expression] not in self.cases: raise Exception(f'The case {data[self.expression]} is not in the configured switch cases: {list(self.cases.keys())}') - return self.cases[data[self.expression]] \ No newline at end of file + return self.cases[data[self.expression]] diff --git a/src/analyser/core/dynamic_value/variable.py b/src/analyser/core/dynamic_value/variable.py index ec9af36..24dbaeb 100644 --- a/src/analyser/core/dynamic_value/variable.py +++ b/src/analyser/core/dynamic_value/variable.py @@ -1,4 +1,4 @@ -from analyser.core.dynamic_value import DynamicValue +from . import DynamicValue from typing import Any, Dict class Variable(DynamicValue): @@ -13,4 +13,4 @@ def resolve(self, data: Dict) -> Any: if self.name not in data: raise Exception(f'The variable name {self.name} was not found in the input dictionnary.') - return data[self.name] \ No newline at end of file + return data[self.name] diff --git a/src/analyser/core/pipe.py b/src/analyser/core/pipe.py index c2df12d..3c231c7 100644 --- a/src/analyser/core/pipe.py +++ b/src/analyser/core/pipe.py @@ -1,13 +1,13 @@ from __future__ import annotations from abc import ABCMeta, abstractmethod -from analyser.logger.logger import LOG -from analyser.core.exceptions import YAMLSyntaxException +from ..logger.logger import LOG +from .exceptions import YAMLSyntaxException import typing import uuid import logging if typing.TYPE_CHECKING: # pragma: no cover - from analyser.core.qa_rule import ExecutionContext + from .qa_rule import ExecutionContext class Pipe(metaclass=ABCMeta): """ diff --git a/src/analyser/core/pipes/data_fetching/sql_processor.py b/src/analyser/core/pipes/data_fetching/sql_processor.py index 645fa82..7bf247e 100644 --- a/src/analyser/core/pipes/data_fetching/sql_processor.py +++ b/src/analyser/core/pipes/data_fetching/sql_processor.py @@ -3,10 +3,10 @@ from typing import Dict, List import psycopg2.extras -from analyser.config.config import Config -from analyser.core import Pipe -from analyser.database.connection import connect -from analyser.logger.timer import Timer +from ....config.config import Config +from ... import Pipe +from ....database.connection import connect +from ....logger.timer import Timer from psycopg2._psycopg import connection diff --git a/src/analyser/core/pipes/data_processing/geometry_converter.py b/src/analyser/core/pipes/data_processing/geometry_converter.py index b1f9647..e199a91 100644 --- a/src/analyser/core/pipes/data_processing/geometry_converter.py +++ b/src/analyser/core/pipes/data_processing/geometry_converter.py @@ -1,7 +1,7 @@ from __future__ import annotations -import importlib from typing import Dict -from analyser.core import Pipe +from ... import Pipe +from ... import model as core_model class GeometryConverter(Pipe): """ @@ -18,8 +18,7 @@ def process(self, data: Dict) -> Dict: #If one data doesn't contain a geometry_holder it should be ignored. if (data['geometry_holder'] is None): return None - module = importlib.import_module('analyser.core.model') - dclass = getattr(module, self.geometry_type) + dclass = getattr(core_model, self.geometry_type) convert = getattr(dclass, 'create_from_WKT_string') data['geometry_holder'] = convert(data['geometry_holder']) - return data \ No newline at end of file + return data diff --git a/src/analyser/core/pipes/data_processing/loop_data_processor.py b/src/analyser/core/pipes/data_processing/loop_data_processor.py index dc25495..7331d77 100644 --- a/src/analyser/core/pipes/data_processing/loop_data_processor.py +++ b/src/analyser/core/pipes/data_processing/loop_data_processor.py @@ -1,6 +1,6 @@ from __future__ import annotations -from analyser.logger.timer import Timer -from analyser.core import Pipe +from ....logger.timer import Timer +from ... import Pipe from typing import List class LoopDataProcessor(Pipe): diff --git a/src/analyser/core/pipes/filling_pipe.py b/src/analyser/core/pipes/filling_pipe.py index 6b649cb..44e9067 100644 --- a/src/analyser/core/pipes/filling_pipe.py +++ b/src/analyser/core/pipes/filling_pipe.py @@ -1,5 +1,5 @@ from __future__ import annotations -from analyser.core import Pipe +from ...core import Pipe class FillingPipe(Pipe): """ @@ -10,4 +10,4 @@ def process(self, data: any = None) -> any: """ Contains the execution logic of this pipe. """ - return data \ No newline at end of file + return data diff --git a/src/analyser/core/pipes/output_formatters/clusters_vt_formatter.py b/src/analyser/core/pipes/output_formatters/clusters_vt_formatter.py index b6dad67..547a7e4 100644 --- a/src/analyser/core/pipes/output_formatters/clusters_vt_formatter.py +++ b/src/analyser/core/pipes/output_formatters/clusters_vt_formatter.py @@ -1,9 +1,9 @@ from __future__ import annotations from geojson.feature import Feature, FeatureCollection from geojson import dumps -from analyser.logger.timer import Timer -from analyser.config import Config -from analyser.core import Pipe +from ....logger.timer import Timer +from ....config import Config +from ... import Pipe from pathlib import Path from typing import List import subprocess @@ -51,4 +51,4 @@ def call_clustering_vt(self, output_dir: Path, feature_collection: FeatureCollec except subprocess.TimeoutExpired as e: self.log(e, logging.FATAL) except subprocess.CalledProcessError as e: - self.log(e, logging.FATAL) \ No newline at end of file + self.log(e, logging.FATAL) diff --git a/src/analyser/core/pipes/output_formatters/geojson_feature_converter.py b/src/analyser/core/pipes/output_formatters/geojson_feature_converter.py index f0dd6ab..9bd5762 100644 --- a/src/analyser/core/pipes/output_formatters/geojson_feature_converter.py +++ b/src/analyser/core/pipes/output_formatters/geojson_feature_converter.py @@ -1,7 +1,7 @@ from __future__ import annotations -from analyser.core.dynamic_value import resolve_all +from ...dynamic_value import resolve_all from typing import Dict -from analyser.core import Pipe +from ... import Pipe from geojson import Feature class GeoJSONFeatureConverter(Pipe): @@ -26,4 +26,4 @@ def process(self, elements: Dict) -> Feature: for k, v in resolved_value.items(): properties[k] = v returned_geom = elements.pop('geometry_holder').to_geojson_feature(self.current_id, properties) - return returned_geom \ No newline at end of file + return returned_geom diff --git a/src/analyser/core/pipes/output_formatters/geojson_formatter.py b/src/analyser/core/pipes/output_formatters/geojson_formatter.py index 9274fb0..95b2f48 100644 --- a/src/analyser/core/pipes/output_formatters/geojson_formatter.py +++ b/src/analyser/core/pipes/output_formatters/geojson_formatter.py @@ -1,8 +1,8 @@ from __future__ import annotations -from analyser.config import Config +from ....config import Config from typing import List from geojson.feature import Feature -from analyser.core import Pipe +from ....core import Pipe from pathlib import Path from geojson import FeatureCollection, dump diff --git a/src/analyser/core/pipes/output_formatters/osmoscope_layer_formatter.py b/src/analyser/core/pipes/output_formatters/osmoscope_layer_formatter.py index 8092128..b583a57 100644 --- a/src/analyser/core/pipes/output_formatters/osmoscope_layer_formatter.py +++ b/src/analyser/core/pipes/output_formatters/osmoscope_layer_formatter.py @@ -1,7 +1,7 @@ from __future__ import annotations -from analyser.database.connection import connect -from analyser.config import Config -from analyser.core import Pipe +from ....database.connection import connect +from ....config import Config +from ... import Pipe from pathlib import Path import json diff --git a/src/analyser/core/pipes/output_formatters/vector_tile_formatter.py b/src/analyser/core/pipes/output_formatters/vector_tile_formatter.py index 869f8ed..69c4675 100644 --- a/src/analyser/core/pipes/output_formatters/vector_tile_formatter.py +++ b/src/analyser/core/pipes/output_formatters/vector_tile_formatter.py @@ -1,9 +1,9 @@ from __future__ import annotations from geojson.feature import Feature, FeatureCollection from geojson import dumps -from analyser.logger.timer import Timer -from analyser.config import Config -from analyser.core import Pipe +from ....logger.timer import Timer +from ....config import Config +from ... import Pipe from pathlib import Path from typing import List import subprocess @@ -57,4 +57,4 @@ def call_tippecanoe(self, output_dir: Path, feature_collection: FeatureCollectio except subprocess.TimeoutExpired as e: self.log(e, logging.FATAL) except subprocess.CalledProcessError as e: - self.log(e, logging.FATAL) \ No newline at end of file + self.log(e, logging.FATAL) diff --git a/src/analyser/core/pipes/rules_specific_pipes/addr_house_number_no_digit/digits_filter.py b/src/analyser/core/pipes/rules_specific_pipes/addr_house_number_no_digit/digits_filter.py index 3dc1148..d6598ce 100644 --- a/src/analyser/core/pipes/rules_specific_pipes/addr_house_number_no_digit/digits_filter.py +++ b/src/analyser/core/pipes/rules_specific_pipes/addr_house_number_no_digit/digits_filter.py @@ -1,4 +1,4 @@ -from analyser.core.pipe import Pipe +from ....pipe import Pipe from typing import Dict import re diff --git a/src/analyser/core/pipes/rules_specific_pipes/addresses_pipes/custom_feature_converter.py b/src/analyser/core/pipes/rules_specific_pipes/addresses_pipes/custom_feature_converter.py index a265e59..1422019 100644 --- a/src/analyser/core/pipes/rules_specific_pipes/addresses_pipes/custom_feature_converter.py +++ b/src/analyser/core/pipes/rules_specific_pipes/addresses_pipes/custom_feature_converter.py @@ -1,4 +1,4 @@ -from analyser.core.pipe import Pipe +from ....pipe import Pipe from geojson.feature import Feature from typing import Dict @@ -25,4 +25,4 @@ def on_created(self) -> None: # for a in address: # print(a) - # return elements.pop('geometry_holder').to_geojson_feature(self.current_feature_id, properties) \ No newline at end of file + # return elements.pop('geometry_holder').to_geojson_feature(self.current_feature_id, properties) diff --git a/src/analyser/core/pipes/rules_specific_pipes/duplicate_label_role/custom_feature_converter.py b/src/analyser/core/pipes/rules_specific_pipes/duplicate_label_role/custom_feature_converter.py index f4afb3a..a084994 100644 --- a/src/analyser/core/pipes/rules_specific_pipes/duplicate_label_role/custom_feature_converter.py +++ b/src/analyser/core/pipes/rules_specific_pipes/duplicate_label_role/custom_feature_converter.py @@ -1,4 +1,4 @@ -from analyser.core.pipe import Pipe +from ....pipe import Pipe from geojson.feature import Feature from typing import Dict @@ -34,4 +34,4 @@ def process(self, elements: Dict) -> Feature: type = members[i][0] properties[f'{type}/@idLabel {label_members_count}'] = members[i][1:] - return elements.pop('geometry_holder').to_geojson_feature(self.current_feature_id, properties) \ No newline at end of file + return elements.pop('geometry_holder').to_geojson_feature(self.current_feature_id, properties) diff --git a/src/analyser/core/pipes/rules_specific_pipes/place_nodes_close/custom_feature_converter.py b/src/analyser/core/pipes/rules_specific_pipes/place_nodes_close/custom_feature_converter.py index 8fd66d8..d8fc2fb 100644 --- a/src/analyser/core/pipes/rules_specific_pipes/place_nodes_close/custom_feature_converter.py +++ b/src/analyser/core/pipes/rules_specific_pipes/place_nodes_close/custom_feature_converter.py @@ -1,4 +1,4 @@ -from analyser.core.pipe import Pipe +from .....core.pipe import Pipe from geojson.feature import Feature from typing import Dict @@ -20,4 +20,4 @@ def process(self, elements: Dict) -> Feature: for i in range(len(common_ids)): properties[f'n/@idClose node {i+1}'] = common_ids[i] - return elements.pop('geometry_holder').to_geojson_feature(self.current_feature_id, properties) \ No newline at end of file + return elements.pop('geometry_holder').to_geojson_feature(self.current_feature_id, properties) diff --git a/src/analyser/core/pipes/rules_specific_pipes/same_wikidata/custom_feature_converter.py b/src/analyser/core/pipes/rules_specific_pipes/same_wikidata/custom_feature_converter.py index 24c1110..a2337db 100644 --- a/src/analyser/core/pipes/rules_specific_pipes/same_wikidata/custom_feature_converter.py +++ b/src/analyser/core/pipes/rules_specific_pipes/same_wikidata/custom_feature_converter.py @@ -1,6 +1,6 @@ -from analyser.core.pipe import Pipe +from ....pipe import Pipe from typing import List -from analyser.core.model.node import Node +from ....model.node import Node from geojson import Feature class SameWikiDataFeatureConverter(Pipe): diff --git a/src/analyser/core/yaml_logic/yaml_loader.py b/src/analyser/core/yaml_logic/yaml_loader.py index 4100477..a2aee6c 100644 --- a/src/analyser/core/yaml_logic/yaml_loader.py +++ b/src/analyser/core/yaml_logic/yaml_loader.py @@ -1,7 +1,7 @@ -from analyser.core.dynamic_value.switch import Switch -from analyser.core.dynamic_value.variable import Variable -from analyser.core.assembler import PipelineAssembler -from analyser.logger.logger import LOG +from ..dynamic_value.switch import Switch +from ..dynamic_value.variable import Variable +from ..assembler import PipelineAssembler +from ...logger.logger import LOG from pathlib import Path import yaml @@ -48,4 +48,4 @@ def switch_constructor(loader: yaml.SafeLoader, node: yaml.MappingNode): Creates a Switch object using the node's data. """ data = loader.construct_mapping(node, deep=True) - return Switch(data['expression'], data['cases']) \ No newline at end of file + return Switch(data['expression'], data['cases']) diff --git a/src/analyser/database/__init__.py b/src/analyser/database/__init__.py index 0a7e649..e2d1579 100644 --- a/src/analyser/database/__init__.py +++ b/src/analyser/database/__init__.py @@ -2,4 +2,4 @@ Module handling the database. """ -from analyser.database.connection import connect +from .connection import connect From 886901c064c1238c92bdab18f1e6203627262bfa Mon Sep 17 00:00:00 2001 From: Sarah Hoffmann Date: Tue, 24 Sep 2024 21:51:29 +0200 Subject: [PATCH 05/13] rename package to nominatim_data_analyser --- cli.py | 2 +- pyproject.toml | 46 +++++++++---------- .../cli.py | 0 .../config/__init__.py | 0 .../config/config.py | 0 .../config/default.yaml | 0 .../core/__init__.py | 0 .../core/assembler/__init__.py | 0 .../core/assembler/pipe_factory.py | 0 .../core/assembler/pipeline_assembler.py | 0 .../core/core.py | 0 .../core/deconstructor/__init__.py | 0 .../deconstructor/pipeline_deconstructor.py | 0 .../core/dynamic_value/__init__.py | 0 .../core/dynamic_value/dynamic_value.py | 0 .../core/dynamic_value/resolver.py | 0 .../core/dynamic_value/switch.py | 0 .../core/dynamic_value/variable.py | 0 .../core/exceptions/__init__.py | 0 .../core/exceptions/yaml_syntax_exception.py | 0 .../core/model/__init__.py | 0 .../core/model/geometry.py | 0 .../core/model/node.py | 0 .../core/pipe.py | 0 .../core/pipes/__init__.py | 0 .../core/pipes/data_fetching/__init__.py | 0 .../core/pipes/data_fetching/sql_processor.py | 0 .../core/pipes/data_processing/__init__.py | 0 .../data_processing/geometry_converter.py | 0 .../data_processing/loop_data_processor.py | 0 .../core/pipes/filling_pipe.py | 0 .../core/pipes/output_formatters/__init__.py | 0 .../clusters_vt_formatter.py | 0 .../geojson_feature_converter.py | 0 .../output_formatters/geojson_formatter.py | 0 .../osmoscope_layer_formatter.py | 0 .../vector_tile_formatter.py | 0 .../pipes/rules_specific_pipes/__init__.py | 0 .../digits_filter.py | 0 .../custom_feature_converter.py | 0 .../custom_feature_converter.py | 0 .../custom_feature_converter.py | 0 .../same_wikidata/custom_feature_converter.py | 0 .../core/qa_rule/__init__.py | 0 .../core/qa_rule/execution_context.py | 0 .../core/yaml_logic/__init__.py | 0 .../core/yaml_logic/yaml_loader.py | 0 .../database/__init__.py | 0 .../database/connection.py | 0 .../logger/logger.py | 0 .../logger/timer.py | 0 .../BA_way_not_part_relation.yaml | 0 .../addr_housenumber_no_digit.yaml | 0 .../addr_place_and_street.yaml | 0 .../addr_place_or_street_rank28.yaml | 0 .../addr_street_wrong_name.yaml | 0 .../bad_interpolations.yaml | 0 .../duplicate_label_role.yaml | 0 .../rules_specifications/no_admin_level.yaml | 0 .../place_nodes_close.yaml | 0 .../postcode_bad_format.yaml | 0 .../rules_specifications/same_wikidata.yaml | 0 tests/assembler/test_pipe_factory.py | 8 ++-- tests/assembler/test_pipeline_assembler.py | 14 +++--- tests/config/test_config.py | 4 +- tests/conftest.py | 14 +++--- tests/core/test_core.py | 6 +-- tests/database/test_connection.py | 2 +- .../test_pipeline_deconstructor.py | 14 +++--- tests/dynamic_value/test_resolver.py | 4 +- tests/dynamic_value/test_switch.py | 4 +- tests/dynamic_value/test_variable.py | 2 +- tests/model/test_node.py | 2 +- .../test_geojson_feature_converter.py | 6 +-- .../test_geojson_formatter.py | 6 +-- .../test_osmoscope_layer_formatter.py | 4 +- .../test_vector_tile_formatter.py | 8 ++-- .../test_digits_filter.py | 4 +- .../test_duplicate_label_role_CFC.py | 6 +-- .../test_place_nodes_close_CFC.py | 6 +-- .../same_wikidata/test_same_wikidata_CFC.py | 4 +- tests/pipes/test_geometry_converter.py | 4 +- tests/pipes/test_loop_data_processor.py | 12 ++--- tests/pipes/test_pipe.py | 12 ++--- tests/pipes/test_sql_processor.py | 2 +- tests/yaml_logic/test_yaml_loader.py | 12 ++--- 86 files changed, 104 insertions(+), 104 deletions(-) rename src/{analyser => nominatim_data_analyser}/cli.py (100%) rename src/{analyser => nominatim_data_analyser}/config/__init__.py (100%) rename src/{analyser => nominatim_data_analyser}/config/config.py (100%) rename src/{analyser => nominatim_data_analyser}/config/default.yaml (100%) rename src/{analyser => nominatim_data_analyser}/core/__init__.py (100%) rename src/{analyser => nominatim_data_analyser}/core/assembler/__init__.py (100%) rename src/{analyser => nominatim_data_analyser}/core/assembler/pipe_factory.py (100%) rename src/{analyser => nominatim_data_analyser}/core/assembler/pipeline_assembler.py (100%) rename src/{analyser => nominatim_data_analyser}/core/core.py (100%) rename src/{analyser => nominatim_data_analyser}/core/deconstructor/__init__.py (100%) rename src/{analyser => nominatim_data_analyser}/core/deconstructor/pipeline_deconstructor.py (100%) rename src/{analyser => nominatim_data_analyser}/core/dynamic_value/__init__.py (100%) rename src/{analyser => nominatim_data_analyser}/core/dynamic_value/dynamic_value.py (100%) rename src/{analyser => nominatim_data_analyser}/core/dynamic_value/resolver.py (100%) rename src/{analyser => nominatim_data_analyser}/core/dynamic_value/switch.py (100%) rename src/{analyser => nominatim_data_analyser}/core/dynamic_value/variable.py (100%) rename src/{analyser => nominatim_data_analyser}/core/exceptions/__init__.py (100%) rename src/{analyser => nominatim_data_analyser}/core/exceptions/yaml_syntax_exception.py (100%) rename src/{analyser => nominatim_data_analyser}/core/model/__init__.py (100%) rename src/{analyser => nominatim_data_analyser}/core/model/geometry.py (100%) rename src/{analyser => nominatim_data_analyser}/core/model/node.py (100%) rename src/{analyser => nominatim_data_analyser}/core/pipe.py (100%) rename src/{analyser => nominatim_data_analyser}/core/pipes/__init__.py (100%) rename src/{analyser => nominatim_data_analyser}/core/pipes/data_fetching/__init__.py (100%) rename src/{analyser => nominatim_data_analyser}/core/pipes/data_fetching/sql_processor.py (100%) rename src/{analyser => nominatim_data_analyser}/core/pipes/data_processing/__init__.py (100%) rename src/{analyser => nominatim_data_analyser}/core/pipes/data_processing/geometry_converter.py (100%) rename src/{analyser => nominatim_data_analyser}/core/pipes/data_processing/loop_data_processor.py (100%) rename src/{analyser => nominatim_data_analyser}/core/pipes/filling_pipe.py (100%) rename src/{analyser => nominatim_data_analyser}/core/pipes/output_formatters/__init__.py (100%) rename src/{analyser => nominatim_data_analyser}/core/pipes/output_formatters/clusters_vt_formatter.py (100%) rename src/{analyser => nominatim_data_analyser}/core/pipes/output_formatters/geojson_feature_converter.py (100%) rename src/{analyser => nominatim_data_analyser}/core/pipes/output_formatters/geojson_formatter.py (100%) rename src/{analyser => nominatim_data_analyser}/core/pipes/output_formatters/osmoscope_layer_formatter.py (100%) rename src/{analyser => nominatim_data_analyser}/core/pipes/output_formatters/vector_tile_formatter.py (100%) rename src/{analyser => nominatim_data_analyser}/core/pipes/rules_specific_pipes/__init__.py (100%) rename src/{analyser => nominatim_data_analyser}/core/pipes/rules_specific_pipes/addr_house_number_no_digit/digits_filter.py (100%) rename src/{analyser => nominatim_data_analyser}/core/pipes/rules_specific_pipes/addresses_pipes/custom_feature_converter.py (100%) rename src/{analyser => nominatim_data_analyser}/core/pipes/rules_specific_pipes/duplicate_label_role/custom_feature_converter.py (100%) rename src/{analyser => nominatim_data_analyser}/core/pipes/rules_specific_pipes/place_nodes_close/custom_feature_converter.py (100%) rename src/{analyser => nominatim_data_analyser}/core/pipes/rules_specific_pipes/same_wikidata/custom_feature_converter.py (100%) rename src/{analyser => nominatim_data_analyser}/core/qa_rule/__init__.py (100%) rename src/{analyser => nominatim_data_analyser}/core/qa_rule/execution_context.py (100%) rename src/{analyser => nominatim_data_analyser}/core/yaml_logic/__init__.py (100%) rename src/{analyser => nominatim_data_analyser}/core/yaml_logic/yaml_loader.py (100%) rename src/{analyser => nominatim_data_analyser}/database/__init__.py (100%) rename src/{analyser => nominatim_data_analyser}/database/connection.py (100%) rename src/{analyser => nominatim_data_analyser}/logger/logger.py (100%) rename src/{analyser => nominatim_data_analyser}/logger/timer.py (100%) rename src/{analyser => nominatim_data_analyser}/rules_specifications/BA_way_not_part_relation.yaml (100%) rename src/{analyser => nominatim_data_analyser}/rules_specifications/addr_housenumber_no_digit.yaml (100%) rename src/{analyser => nominatim_data_analyser}/rules_specifications/addr_place_and_street.yaml (100%) rename src/{analyser => nominatim_data_analyser}/rules_specifications/addr_place_or_street_rank28.yaml (100%) rename src/{analyser => nominatim_data_analyser}/rules_specifications/addr_street_wrong_name.yaml (100%) rename src/{analyser => nominatim_data_analyser}/rules_specifications/bad_interpolations.yaml (100%) rename src/{analyser => nominatim_data_analyser}/rules_specifications/duplicate_label_role.yaml (100%) rename src/{analyser => nominatim_data_analyser}/rules_specifications/no_admin_level.yaml (100%) rename src/{analyser => nominatim_data_analyser}/rules_specifications/place_nodes_close.yaml (100%) rename src/{analyser => nominatim_data_analyser}/rules_specifications/postcode_bad_format.yaml (100%) rename src/{analyser => nominatim_data_analyser}/rules_specifications/same_wikidata.yaml (100%) diff --git a/cli.py b/cli.py index 4344dfd..c0507e2 100755 --- a/cli.py +++ b/cli.py @@ -4,6 +4,6 @@ sys.path.insert(1, str(Path(__file__, '..', 'src').resolve())) -from analyser.cli import cli +from nominatim_data_analyser.cli import cli sys.exit(cli()) diff --git a/pyproject.toml b/pyproject.toml index 43a784f..cee571b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,28 +33,28 @@ tests = [ ] [tool.setuptools] -packages = ["analyser", - "analyser.database", - "analyser.config", - "analyser.logger", - "analyser.core", - "analyser.core.exceptions", - "analyser.core.yaml_logic", - "analyser.core.dynamic_value", - "analyser.core.deconstructor", - "analyser.core.model", - "analyser.core.pipes", - "analyser.core.pipes.rules_specific_pipes", - "analyser.core.pipes.rules_specific_pipes.place_nodes_close", - "analyser.core.pipes.rules_specific_pipes.addresses_pipes", - "analyser.core.pipes.rules_specific_pipes.addr_house_number_no_digit", - "analyser.core.pipes.rules_specific_pipes.duplicate_label_role", - "analyser.core.pipes.rules_specific_pipes.same_wikidata", - "analyser.core.pipes.output_formatters", - "analyser.core.pipes.data_processing", - "analyser.core.pipes.data_fetching", - "analyser.core.qa_rule", - "analyser.core.assembler", +packages = ["nominatim_data_analyser", + "nominatim_data_analyser.database", + "nominatim_data_analyser.config", + "nominatim_data_analyser.logger", + "nominatim_data_analyser.core", + "nominatim_data_analyser.core.exceptions", + "nominatim_data_analyser.core.yaml_logic", + "nominatim_data_analyser.core.dynamic_value", + "nominatim_data_analyser.core.deconstructor", + "nominatim_data_analyser.core.model", + "nominatim_data_analyser.core.pipes", + "nominatim_data_analyser.core.pipes.rules_specific_pipes", + "nominatim_data_analyser.core.pipes.rules_specific_pipes.place_nodes_close", + "nominatim_data_analyser.core.pipes.rules_specific_pipes.addresses_pipes", + "nominatim_data_analyser.core.pipes.rules_specific_pipes.addr_house_number_no_digit", + "nominatim_data_analyser.core.pipes.rules_specific_pipes.duplicate_label_role", + "nominatim_data_analyser.core.pipes.rules_specific_pipes.same_wikidata", + "nominatim_data_analyser.core.pipes.output_formatters", + "nominatim_data_analyser.core.pipes.data_processing", + "nominatim_data_analyser.core.pipes.data_fetching", + "nominatim_data_analyser.core.qa_rule", + "nominatim_data_analyser.core.assembler", ] package-dir = {"" = "src"} @@ -63,7 +63,7 @@ package-dir = {"" = "src"} analyser = ["rules_specifications/*.yaml"] [project.scripts] -nominatim-data-analyser = "analyser.cli:cli" +nominatim-data-analyser = "nominatim_data_analyser.cli:cli" [tool.pytest.ini_options] testpaths = ["tests"] diff --git a/src/analyser/cli.py b/src/nominatim_data_analyser/cli.py similarity index 100% rename from src/analyser/cli.py rename to src/nominatim_data_analyser/cli.py diff --git a/src/analyser/config/__init__.py b/src/nominatim_data_analyser/config/__init__.py similarity index 100% rename from src/analyser/config/__init__.py rename to src/nominatim_data_analyser/config/__init__.py diff --git a/src/analyser/config/config.py b/src/nominatim_data_analyser/config/config.py similarity index 100% rename from src/analyser/config/config.py rename to src/nominatim_data_analyser/config/config.py diff --git a/src/analyser/config/default.yaml b/src/nominatim_data_analyser/config/default.yaml similarity index 100% rename from src/analyser/config/default.yaml rename to src/nominatim_data_analyser/config/default.yaml diff --git a/src/analyser/core/__init__.py b/src/nominatim_data_analyser/core/__init__.py similarity index 100% rename from src/analyser/core/__init__.py rename to src/nominatim_data_analyser/core/__init__.py diff --git a/src/analyser/core/assembler/__init__.py b/src/nominatim_data_analyser/core/assembler/__init__.py similarity index 100% rename from src/analyser/core/assembler/__init__.py rename to src/nominatim_data_analyser/core/assembler/__init__.py diff --git a/src/analyser/core/assembler/pipe_factory.py b/src/nominatim_data_analyser/core/assembler/pipe_factory.py similarity index 100% rename from src/analyser/core/assembler/pipe_factory.py rename to src/nominatim_data_analyser/core/assembler/pipe_factory.py diff --git a/src/analyser/core/assembler/pipeline_assembler.py b/src/nominatim_data_analyser/core/assembler/pipeline_assembler.py similarity index 100% rename from src/analyser/core/assembler/pipeline_assembler.py rename to src/nominatim_data_analyser/core/assembler/pipeline_assembler.py diff --git a/src/analyser/core/core.py b/src/nominatim_data_analyser/core/core.py similarity index 100% rename from src/analyser/core/core.py rename to src/nominatim_data_analyser/core/core.py diff --git a/src/analyser/core/deconstructor/__init__.py b/src/nominatim_data_analyser/core/deconstructor/__init__.py similarity index 100% rename from src/analyser/core/deconstructor/__init__.py rename to src/nominatim_data_analyser/core/deconstructor/__init__.py diff --git a/src/analyser/core/deconstructor/pipeline_deconstructor.py b/src/nominatim_data_analyser/core/deconstructor/pipeline_deconstructor.py similarity index 100% rename from src/analyser/core/deconstructor/pipeline_deconstructor.py rename to src/nominatim_data_analyser/core/deconstructor/pipeline_deconstructor.py diff --git a/src/analyser/core/dynamic_value/__init__.py b/src/nominatim_data_analyser/core/dynamic_value/__init__.py similarity index 100% rename from src/analyser/core/dynamic_value/__init__.py rename to src/nominatim_data_analyser/core/dynamic_value/__init__.py diff --git a/src/analyser/core/dynamic_value/dynamic_value.py b/src/nominatim_data_analyser/core/dynamic_value/dynamic_value.py similarity index 100% rename from src/analyser/core/dynamic_value/dynamic_value.py rename to src/nominatim_data_analyser/core/dynamic_value/dynamic_value.py diff --git a/src/analyser/core/dynamic_value/resolver.py b/src/nominatim_data_analyser/core/dynamic_value/resolver.py similarity index 100% rename from src/analyser/core/dynamic_value/resolver.py rename to src/nominatim_data_analyser/core/dynamic_value/resolver.py diff --git a/src/analyser/core/dynamic_value/switch.py b/src/nominatim_data_analyser/core/dynamic_value/switch.py similarity index 100% rename from src/analyser/core/dynamic_value/switch.py rename to src/nominatim_data_analyser/core/dynamic_value/switch.py diff --git a/src/analyser/core/dynamic_value/variable.py b/src/nominatim_data_analyser/core/dynamic_value/variable.py similarity index 100% rename from src/analyser/core/dynamic_value/variable.py rename to src/nominatim_data_analyser/core/dynamic_value/variable.py diff --git a/src/analyser/core/exceptions/__init__.py b/src/nominatim_data_analyser/core/exceptions/__init__.py similarity index 100% rename from src/analyser/core/exceptions/__init__.py rename to src/nominatim_data_analyser/core/exceptions/__init__.py diff --git a/src/analyser/core/exceptions/yaml_syntax_exception.py b/src/nominatim_data_analyser/core/exceptions/yaml_syntax_exception.py similarity index 100% rename from src/analyser/core/exceptions/yaml_syntax_exception.py rename to src/nominatim_data_analyser/core/exceptions/yaml_syntax_exception.py diff --git a/src/analyser/core/model/__init__.py b/src/nominatim_data_analyser/core/model/__init__.py similarity index 100% rename from src/analyser/core/model/__init__.py rename to src/nominatim_data_analyser/core/model/__init__.py diff --git a/src/analyser/core/model/geometry.py b/src/nominatim_data_analyser/core/model/geometry.py similarity index 100% rename from src/analyser/core/model/geometry.py rename to src/nominatim_data_analyser/core/model/geometry.py diff --git a/src/analyser/core/model/node.py b/src/nominatim_data_analyser/core/model/node.py similarity index 100% rename from src/analyser/core/model/node.py rename to src/nominatim_data_analyser/core/model/node.py diff --git a/src/analyser/core/pipe.py b/src/nominatim_data_analyser/core/pipe.py similarity index 100% rename from src/analyser/core/pipe.py rename to src/nominatim_data_analyser/core/pipe.py diff --git a/src/analyser/core/pipes/__init__.py b/src/nominatim_data_analyser/core/pipes/__init__.py similarity index 100% rename from src/analyser/core/pipes/__init__.py rename to src/nominatim_data_analyser/core/pipes/__init__.py diff --git a/src/analyser/core/pipes/data_fetching/__init__.py b/src/nominatim_data_analyser/core/pipes/data_fetching/__init__.py similarity index 100% rename from src/analyser/core/pipes/data_fetching/__init__.py rename to src/nominatim_data_analyser/core/pipes/data_fetching/__init__.py diff --git a/src/analyser/core/pipes/data_fetching/sql_processor.py b/src/nominatim_data_analyser/core/pipes/data_fetching/sql_processor.py similarity index 100% rename from src/analyser/core/pipes/data_fetching/sql_processor.py rename to src/nominatim_data_analyser/core/pipes/data_fetching/sql_processor.py diff --git a/src/analyser/core/pipes/data_processing/__init__.py b/src/nominatim_data_analyser/core/pipes/data_processing/__init__.py similarity index 100% rename from src/analyser/core/pipes/data_processing/__init__.py rename to src/nominatim_data_analyser/core/pipes/data_processing/__init__.py diff --git a/src/analyser/core/pipes/data_processing/geometry_converter.py b/src/nominatim_data_analyser/core/pipes/data_processing/geometry_converter.py similarity index 100% rename from src/analyser/core/pipes/data_processing/geometry_converter.py rename to src/nominatim_data_analyser/core/pipes/data_processing/geometry_converter.py diff --git a/src/analyser/core/pipes/data_processing/loop_data_processor.py b/src/nominatim_data_analyser/core/pipes/data_processing/loop_data_processor.py similarity index 100% rename from src/analyser/core/pipes/data_processing/loop_data_processor.py rename to src/nominatim_data_analyser/core/pipes/data_processing/loop_data_processor.py diff --git a/src/analyser/core/pipes/filling_pipe.py b/src/nominatim_data_analyser/core/pipes/filling_pipe.py similarity index 100% rename from src/analyser/core/pipes/filling_pipe.py rename to src/nominatim_data_analyser/core/pipes/filling_pipe.py diff --git a/src/analyser/core/pipes/output_formatters/__init__.py b/src/nominatim_data_analyser/core/pipes/output_formatters/__init__.py similarity index 100% rename from src/analyser/core/pipes/output_formatters/__init__.py rename to src/nominatim_data_analyser/core/pipes/output_formatters/__init__.py diff --git a/src/analyser/core/pipes/output_formatters/clusters_vt_formatter.py b/src/nominatim_data_analyser/core/pipes/output_formatters/clusters_vt_formatter.py similarity index 100% rename from src/analyser/core/pipes/output_formatters/clusters_vt_formatter.py rename to src/nominatim_data_analyser/core/pipes/output_formatters/clusters_vt_formatter.py diff --git a/src/analyser/core/pipes/output_formatters/geojson_feature_converter.py b/src/nominatim_data_analyser/core/pipes/output_formatters/geojson_feature_converter.py similarity index 100% rename from src/analyser/core/pipes/output_formatters/geojson_feature_converter.py rename to src/nominatim_data_analyser/core/pipes/output_formatters/geojson_feature_converter.py diff --git a/src/analyser/core/pipes/output_formatters/geojson_formatter.py b/src/nominatim_data_analyser/core/pipes/output_formatters/geojson_formatter.py similarity index 100% rename from src/analyser/core/pipes/output_formatters/geojson_formatter.py rename to src/nominatim_data_analyser/core/pipes/output_formatters/geojson_formatter.py diff --git a/src/analyser/core/pipes/output_formatters/osmoscope_layer_formatter.py b/src/nominatim_data_analyser/core/pipes/output_formatters/osmoscope_layer_formatter.py similarity index 100% rename from src/analyser/core/pipes/output_formatters/osmoscope_layer_formatter.py rename to src/nominatim_data_analyser/core/pipes/output_formatters/osmoscope_layer_formatter.py diff --git a/src/analyser/core/pipes/output_formatters/vector_tile_formatter.py b/src/nominatim_data_analyser/core/pipes/output_formatters/vector_tile_formatter.py similarity index 100% rename from src/analyser/core/pipes/output_formatters/vector_tile_formatter.py rename to src/nominatim_data_analyser/core/pipes/output_formatters/vector_tile_formatter.py diff --git a/src/analyser/core/pipes/rules_specific_pipes/__init__.py b/src/nominatim_data_analyser/core/pipes/rules_specific_pipes/__init__.py similarity index 100% rename from src/analyser/core/pipes/rules_specific_pipes/__init__.py rename to src/nominatim_data_analyser/core/pipes/rules_specific_pipes/__init__.py diff --git a/src/analyser/core/pipes/rules_specific_pipes/addr_house_number_no_digit/digits_filter.py b/src/nominatim_data_analyser/core/pipes/rules_specific_pipes/addr_house_number_no_digit/digits_filter.py similarity index 100% rename from src/analyser/core/pipes/rules_specific_pipes/addr_house_number_no_digit/digits_filter.py rename to src/nominatim_data_analyser/core/pipes/rules_specific_pipes/addr_house_number_no_digit/digits_filter.py diff --git a/src/analyser/core/pipes/rules_specific_pipes/addresses_pipes/custom_feature_converter.py b/src/nominatim_data_analyser/core/pipes/rules_specific_pipes/addresses_pipes/custom_feature_converter.py similarity index 100% rename from src/analyser/core/pipes/rules_specific_pipes/addresses_pipes/custom_feature_converter.py rename to src/nominatim_data_analyser/core/pipes/rules_specific_pipes/addresses_pipes/custom_feature_converter.py diff --git a/src/analyser/core/pipes/rules_specific_pipes/duplicate_label_role/custom_feature_converter.py b/src/nominatim_data_analyser/core/pipes/rules_specific_pipes/duplicate_label_role/custom_feature_converter.py similarity index 100% rename from src/analyser/core/pipes/rules_specific_pipes/duplicate_label_role/custom_feature_converter.py rename to src/nominatim_data_analyser/core/pipes/rules_specific_pipes/duplicate_label_role/custom_feature_converter.py diff --git a/src/analyser/core/pipes/rules_specific_pipes/place_nodes_close/custom_feature_converter.py b/src/nominatim_data_analyser/core/pipes/rules_specific_pipes/place_nodes_close/custom_feature_converter.py similarity index 100% rename from src/analyser/core/pipes/rules_specific_pipes/place_nodes_close/custom_feature_converter.py rename to src/nominatim_data_analyser/core/pipes/rules_specific_pipes/place_nodes_close/custom_feature_converter.py diff --git a/src/analyser/core/pipes/rules_specific_pipes/same_wikidata/custom_feature_converter.py b/src/nominatim_data_analyser/core/pipes/rules_specific_pipes/same_wikidata/custom_feature_converter.py similarity index 100% rename from src/analyser/core/pipes/rules_specific_pipes/same_wikidata/custom_feature_converter.py rename to src/nominatim_data_analyser/core/pipes/rules_specific_pipes/same_wikidata/custom_feature_converter.py diff --git a/src/analyser/core/qa_rule/__init__.py b/src/nominatim_data_analyser/core/qa_rule/__init__.py similarity index 100% rename from src/analyser/core/qa_rule/__init__.py rename to src/nominatim_data_analyser/core/qa_rule/__init__.py diff --git a/src/analyser/core/qa_rule/execution_context.py b/src/nominatim_data_analyser/core/qa_rule/execution_context.py similarity index 100% rename from src/analyser/core/qa_rule/execution_context.py rename to src/nominatim_data_analyser/core/qa_rule/execution_context.py diff --git a/src/analyser/core/yaml_logic/__init__.py b/src/nominatim_data_analyser/core/yaml_logic/__init__.py similarity index 100% rename from src/analyser/core/yaml_logic/__init__.py rename to src/nominatim_data_analyser/core/yaml_logic/__init__.py diff --git a/src/analyser/core/yaml_logic/yaml_loader.py b/src/nominatim_data_analyser/core/yaml_logic/yaml_loader.py similarity index 100% rename from src/analyser/core/yaml_logic/yaml_loader.py rename to src/nominatim_data_analyser/core/yaml_logic/yaml_loader.py diff --git a/src/analyser/database/__init__.py b/src/nominatim_data_analyser/database/__init__.py similarity index 100% rename from src/analyser/database/__init__.py rename to src/nominatim_data_analyser/database/__init__.py diff --git a/src/analyser/database/connection.py b/src/nominatim_data_analyser/database/connection.py similarity index 100% rename from src/analyser/database/connection.py rename to src/nominatim_data_analyser/database/connection.py diff --git a/src/analyser/logger/logger.py b/src/nominatim_data_analyser/logger/logger.py similarity index 100% rename from src/analyser/logger/logger.py rename to src/nominatim_data_analyser/logger/logger.py diff --git a/src/analyser/logger/timer.py b/src/nominatim_data_analyser/logger/timer.py similarity index 100% rename from src/analyser/logger/timer.py rename to src/nominatim_data_analyser/logger/timer.py diff --git a/src/analyser/rules_specifications/BA_way_not_part_relation.yaml b/src/nominatim_data_analyser/rules_specifications/BA_way_not_part_relation.yaml similarity index 100% rename from src/analyser/rules_specifications/BA_way_not_part_relation.yaml rename to src/nominatim_data_analyser/rules_specifications/BA_way_not_part_relation.yaml diff --git a/src/analyser/rules_specifications/addr_housenumber_no_digit.yaml b/src/nominatim_data_analyser/rules_specifications/addr_housenumber_no_digit.yaml similarity index 100% rename from src/analyser/rules_specifications/addr_housenumber_no_digit.yaml rename to src/nominatim_data_analyser/rules_specifications/addr_housenumber_no_digit.yaml diff --git a/src/analyser/rules_specifications/addr_place_and_street.yaml b/src/nominatim_data_analyser/rules_specifications/addr_place_and_street.yaml similarity index 100% rename from src/analyser/rules_specifications/addr_place_and_street.yaml rename to src/nominatim_data_analyser/rules_specifications/addr_place_and_street.yaml diff --git a/src/analyser/rules_specifications/addr_place_or_street_rank28.yaml b/src/nominatim_data_analyser/rules_specifications/addr_place_or_street_rank28.yaml similarity index 100% rename from src/analyser/rules_specifications/addr_place_or_street_rank28.yaml rename to src/nominatim_data_analyser/rules_specifications/addr_place_or_street_rank28.yaml diff --git a/src/analyser/rules_specifications/addr_street_wrong_name.yaml b/src/nominatim_data_analyser/rules_specifications/addr_street_wrong_name.yaml similarity index 100% rename from src/analyser/rules_specifications/addr_street_wrong_name.yaml rename to src/nominatim_data_analyser/rules_specifications/addr_street_wrong_name.yaml diff --git a/src/analyser/rules_specifications/bad_interpolations.yaml b/src/nominatim_data_analyser/rules_specifications/bad_interpolations.yaml similarity index 100% rename from src/analyser/rules_specifications/bad_interpolations.yaml rename to src/nominatim_data_analyser/rules_specifications/bad_interpolations.yaml diff --git a/src/analyser/rules_specifications/duplicate_label_role.yaml b/src/nominatim_data_analyser/rules_specifications/duplicate_label_role.yaml similarity index 100% rename from src/analyser/rules_specifications/duplicate_label_role.yaml rename to src/nominatim_data_analyser/rules_specifications/duplicate_label_role.yaml diff --git a/src/analyser/rules_specifications/no_admin_level.yaml b/src/nominatim_data_analyser/rules_specifications/no_admin_level.yaml similarity index 100% rename from src/analyser/rules_specifications/no_admin_level.yaml rename to src/nominatim_data_analyser/rules_specifications/no_admin_level.yaml diff --git a/src/analyser/rules_specifications/place_nodes_close.yaml b/src/nominatim_data_analyser/rules_specifications/place_nodes_close.yaml similarity index 100% rename from src/analyser/rules_specifications/place_nodes_close.yaml rename to src/nominatim_data_analyser/rules_specifications/place_nodes_close.yaml diff --git a/src/analyser/rules_specifications/postcode_bad_format.yaml b/src/nominatim_data_analyser/rules_specifications/postcode_bad_format.yaml similarity index 100% rename from src/analyser/rules_specifications/postcode_bad_format.yaml rename to src/nominatim_data_analyser/rules_specifications/postcode_bad_format.yaml diff --git a/src/analyser/rules_specifications/same_wikidata.yaml b/src/nominatim_data_analyser/rules_specifications/same_wikidata.yaml similarity index 100% rename from src/analyser/rules_specifications/same_wikidata.yaml rename to src/nominatim_data_analyser/rules_specifications/same_wikidata.yaml diff --git a/tests/assembler/test_pipe_factory.py b/tests/assembler/test_pipe_factory.py index e0663f8..b742219 100644 --- a/tests/assembler/test_pipe_factory.py +++ b/tests/assembler/test_pipe_factory.py @@ -1,6 +1,6 @@ -from analyser.core.pipes.data_processing.geometry_converter import GeometryConverter -from analyser.core.assembler.pipe_factory import PipeFactory -from analyser.core.exceptions import YAMLSyntaxException +from nominatim_data_analyser.core.pipes.data_processing.geometry_converter import GeometryConverter +from nominatim_data_analyser.core.assembler.pipe_factory import PipeFactory +from nominatim_data_analyser.core.exceptions import YAMLSyntaxException import pytest def test_assemble_pipe_ok() -> None: @@ -39,4 +39,4 @@ def test_assemble_pipe_wrong_type() -> None: 'type': 'wrongtypewhichdoesntexist' } with pytest.raises(YAMLSyntaxException, match='The type wrongtypewhichdoesntexist doesn\'t exist.'): - PipeFactory.assemble_pipe(node_data, None) \ No newline at end of file + PipeFactory.assemble_pipe(node_data, None) diff --git a/tests/assembler/test_pipeline_assembler.py b/tests/assembler/test_pipeline_assembler.py index f26e203..0aba7c8 100644 --- a/tests/assembler/test_pipeline_assembler.py +++ b/tests/assembler/test_pipeline_assembler.py @@ -1,9 +1,9 @@ -from analyser.core.pipes.data_fetching.sql_processor import SQLProcessor -from analyser.core.pipes.output_formatters import GeoJSONFeatureConverter -from analyser.core.pipes.data_processing import GeometryConverter -from analyser.core.pipes.filling_pipe import FillingPipe -from analyser.core.assembler import PipelineAssembler -from analyser.core.pipe import Pipe +from nominatim_data_analyser.core.pipes.data_fetching.sql_processor import SQLProcessor +from nominatim_data_analyser.core.pipes.output_formatters import GeoJSONFeatureConverter +from nominatim_data_analyser.core.pipes.data_processing import GeometryConverter +from nominatim_data_analyser.core.pipes.filling_pipe import FillingPipe +from nominatim_data_analyser.core.assembler import PipelineAssembler +from nominatim_data_analyser.core.pipe import Pipe import pytest def test_on_new_node(pipeline_assembler: PipelineAssembler, filling_pipe: Pipe) -> None: @@ -76,4 +76,4 @@ def test_assemble() -> None: @pytest.fixture def pipeline_assembler() -> PipelineAssembler: - return PipelineAssembler({}, 'test_rule') \ No newline at end of file + return PipelineAssembler({}, 'test_rule') diff --git a/tests/config/test_config.py b/tests/config/test_config.py index fef0e3d..51bcffa 100644 --- a/tests/config/test_config.py +++ b/tests/config/test_config.py @@ -1,5 +1,5 @@ -from analyser.config import Config +from nominatim_data_analyser.config import Config from pathlib import Path import pytest import yaml @@ -28,4 +28,4 @@ def test_load_broken_config(config: Config) -> None: be raised as the config file has a wrong syntax. """ with pytest.raises(yaml.YAMLError): - config.load_config(Path(__file__).parent / 'broken_config') \ No newline at end of file + config.load_config(Path(__file__).parent / 'broken_config') diff --git a/tests/conftest.py b/tests/conftest.py index 45e716a..2d026ff 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -6,17 +6,17 @@ # always test against source sys.path.insert(1, str(Path(__file__, '..', '..', 'src').resolve())) -from analyser.config import Config -from analyser.core.pipes import FillingPipe -from analyser.core.pipes.data_fetching.sql_processor import SQLProcessor -from analyser.core.pipes.data_processing import (GeometryConverter, +from nominatim_data_analyser.config import Config +from nominatim_data_analyser.core.pipes import FillingPipe +from nominatim_data_analyser.core.pipes.data_fetching.sql_processor import SQLProcessor +from nominatim_data_analyser.core.pipes.data_processing import (GeometryConverter, LoopDataProcessor) -from analyser.core.pipes.output_formatters import (GeoJSONFeatureConverter, +from nominatim_data_analyser.core.pipes.output_formatters import (GeoJSONFeatureConverter, GeoJSONFormatter, OsmoscopeLayerFormatter, VectorTileFormatter) -from analyser.core.qa_rule import ExecutionContext -from analyser.database.connection import connect +from nominatim_data_analyser.core.qa_rule import ExecutionContext +from nominatim_data_analyser.database.connection import connect from psycopg2._psycopg import connection, cursor diff --git a/tests/core/test_core.py b/tests/core/test_core.py index b7714b4..0ff1e55 100644 --- a/tests/core/test_core.py +++ b/tests/core/test_core.py @@ -2,8 +2,8 @@ from pathlib import Path import pytest -from analyser.config.config import Config -from analyser.core.core import Core +from nominatim_data_analyser.config.config import Config +from nominatim_data_analyser.core.core import Core rules_path = Path(__file__).parent / 'rules' @@ -36,7 +36,7 @@ def setup_mock(tmp_path, monkeypatch) -> None: Set the RulesFolderPath to the temporary folder of the test for the creation of output files. """ - monkeypatch.setattr('analyser.core.yaml_logic.yaml_loader.base_rules_path', + monkeypatch.setattr('nominatim_data_analyser.core.yaml_logic.yaml_loader.base_rules_path', rules_path) Config.values['RulesFolderPath'] = tmp_path diff --git a/tests/database/test_connection.py b/tests/database/test_connection.py index c2b0826..835725d 100644 --- a/tests/database/test_connection.py +++ b/tests/database/test_connection.py @@ -1,4 +1,4 @@ -from analyser.database.connection import connect +from nominatim_data_analyser.database.connection import connect def test_connect(dsn) -> None: diff --git a/tests/deconstructor/test_pipeline_deconstructor.py b/tests/deconstructor/test_pipeline_deconstructor.py index 7d0d7ec..e35a645 100644 --- a/tests/deconstructor/test_pipeline_deconstructor.py +++ b/tests/deconstructor/test_pipeline_deconstructor.py @@ -1,5 +1,5 @@ -from analyser.core.deconstructor.pipeline_deconstructor import BACKTRACKING_EVENT, NEW_NODE_EVENT -from analyser.core.deconstructor import PipelineDeconstructor +from nominatim_data_analyser.core.deconstructor.pipeline_deconstructor import BACKTRACKING_EVENT, NEW_NODE_EVENT +from nominatim_data_analyser.core.deconstructor import PipelineDeconstructor import pytest def test_deconstruct_basic() -> None: @@ -107,9 +107,9 @@ def callback(self, new_node: dict = None): x += 1 #Mock the methods - monkeypatch.setattr('analyser.core.deconstructor.pipeline_deconstructor.PipelineDeconstructor._notify_new_node', + monkeypatch.setattr('nominatim_data_analyser.core.deconstructor.pipeline_deconstructor.PipelineDeconstructor._notify_new_node', callback) - monkeypatch.setattr('analyser.core.deconstructor.pipeline_deconstructor.PipelineDeconstructor._explore_deeper_or_backtrack', + monkeypatch.setattr('nominatim_data_analyser.core.deconstructor.pipeline_deconstructor.PipelineDeconstructor._explore_deeper_or_backtrack', callback) pipeline_deconstructor._send_current_node_and_explore() assert x == 2 @@ -143,7 +143,7 @@ def new_node_callback(node: dict): pipeline_deconstructor._event_callbacks[NEW_NODE_EVENT].append(new_node_callback) #Mock a dumb _backtrack method. - monkeypatch.setattr('analyser.core.deconstructor.pipeline_deconstructor.PipelineDeconstructor._backtrack', + monkeypatch.setattr('nominatim_data_analyser.core.deconstructor.pipeline_deconstructor.PipelineDeconstructor._backtrack', lambda self: 1) pipeline_deconstructor.current_node = node1 @@ -170,7 +170,7 @@ def test_backtrack(pipeline_deconstructor: PipelineDeconstructor, monkeypatch) - pipeline_deconstructor.nodes_history.append(node) #Mock a dumb _explore_deeper_or_backtrack method to not launch the recursion. - monkeypatch.setattr('analyser.core.deconstructor.pipeline_deconstructor.PipelineDeconstructor._explore_deeper_or_backtrack', + monkeypatch.setattr('nominatim_data_analyser.core.deconstructor.pipeline_deconstructor.PipelineDeconstructor._explore_deeper_or_backtrack', lambda self: 1) pipeline_deconstructor._backtrack() @@ -254,4 +254,4 @@ def test_init_event_callbacks(pipeline_deconstructor: PipelineDeconstructor) -> @pytest.fixture def pipeline_deconstructor() -> PipelineDeconstructor: - return PipelineDeconstructor({}, 'test_rule') \ No newline at end of file + return PipelineDeconstructor({}, 'test_rule') diff --git a/tests/dynamic_value/test_resolver.py b/tests/dynamic_value/test_resolver.py index 1a988d9..2cddda2 100644 --- a/tests/dynamic_value/test_resolver.py +++ b/tests/dynamic_value/test_resolver.py @@ -1,6 +1,6 @@ import pytest -from analyser.core.dynamic_value import Variable -from analyser.core.dynamic_value.resolver import (_contains_dynamic_value, +from nominatim_data_analyser.core.dynamic_value import Variable +from nominatim_data_analyser.core.dynamic_value.resolver import (_contains_dynamic_value, _is_dynamic_value, _resolve_if_resolvable, is_resolvable, resolve_all, diff --git a/tests/dynamic_value/test_switch.py b/tests/dynamic_value/test_switch.py index dbb0637..ab898ae 100644 --- a/tests/dynamic_value/test_switch.py +++ b/tests/dynamic_value/test_switch.py @@ -1,4 +1,4 @@ -from analyser.core.dynamic_value.switch import Switch +from nominatim_data_analyser.core.dynamic_value.switch import Switch import pytest def test_resolve_switch_ok(switch: Switch) -> None: @@ -45,4 +45,4 @@ def switch() -> Switch: 'case1': 'result1', 'case2': 'result2', 'case3': 'result3' - }) \ No newline at end of file + }) diff --git a/tests/dynamic_value/test_variable.py b/tests/dynamic_value/test_variable.py index 6e7fbae..1c465df 100644 --- a/tests/dynamic_value/test_variable.py +++ b/tests/dynamic_value/test_variable.py @@ -1,5 +1,5 @@ -from analyser.core.dynamic_value import Variable +from nominatim_data_analyser.core.dynamic_value import Variable import pytest def test_resolve_variable_ok(variable: Variable) -> None: diff --git a/tests/model/test_node.py b/tests/model/test_node.py index 98a9e65..7226830 100644 --- a/tests/model/test_node.py +++ b/tests/model/test_node.py @@ -1,4 +1,4 @@ -from analyser.core.model.node import Node +from nominatim_data_analyser.core.model.node import Node from geojson.feature import Feature diff --git a/tests/pipes/output_formatters/test_geojson_feature_converter.py b/tests/pipes/output_formatters/test_geojson_feature_converter.py index 5669c7c..181b214 100644 --- a/tests/pipes/output_formatters/test_geojson_feature_converter.py +++ b/tests/pipes/output_formatters/test_geojson_feature_converter.py @@ -1,5 +1,5 @@ -from analyser.core.pipes.output_formatters import GeoJSONFeatureConverter -from analyser.core.model import Node +from nominatim_data_analyser.core.pipes.output_formatters import GeoJSONFeatureConverter +from nominatim_data_analyser.core.model import Node from geojson.feature import Feature def test_on_created_geojson_feature_converter(geojson_feature_converter: GeoJSONFeatureConverter) -> None: @@ -21,4 +21,4 @@ def test_process_geojson_feature_converter(geojson_feature_converter: GeoJSONFea feature: Feature = geojson_feature_converter.process(data) assert feature['geometry']['type'] == 'Point' assert feature['geometry']['coordinates'] == [10, 15] - assert feature['properties'] == {'prop1': 'val1', 'prop2': 'val2'} \ No newline at end of file + assert feature['properties'] == {'prop1': 'val1', 'prop2': 'val2'} diff --git a/tests/pipes/output_formatters/test_geojson_formatter.py b/tests/pipes/output_formatters/test_geojson_formatter.py index 01046d5..6fdee8c 100644 --- a/tests/pipes/output_formatters/test_geojson_formatter.py +++ b/tests/pipes/output_formatters/test_geojson_formatter.py @@ -1,6 +1,6 @@ -from analyser.core.pipes.output_formatters import GeoJSONFormatter +from nominatim_data_analyser.core.pipes.output_formatters import GeoJSONFormatter from geojson import Feature, Point, FeatureCollection, loads -from analyser.config import Config +from nominatim_data_analyser.config import Config def test_process_geojson_formatter(config: Config, geojson_formatter: GeoJSONFormatter, @@ -27,4 +27,4 @@ def test_process_geojson_formatter(config: Config, #Verify the content of the geojson created with open(tmp_path/'test_folder/test_file.json' , 'r') as file: - assert loads(file.read()) == FeatureCollection(features) \ No newline at end of file + assert loads(file.read()) == FeatureCollection(features) diff --git a/tests/pipes/output_formatters/test_osmoscope_layer_formatter.py b/tests/pipes/output_formatters/test_osmoscope_layer_formatter.py index 5509a2a..abd10f8 100644 --- a/tests/pipes/output_formatters/test_osmoscope_layer_formatter.py +++ b/tests/pipes/output_formatters/test_osmoscope_layer_formatter.py @@ -1,6 +1,6 @@ from tests.conftest import temp_db_cursor -from analyser.config import Config -from analyser.core.pipes import OsmoscopeLayerFormatter +from nominatim_data_analyser.config import Config +from nominatim_data_analyser.core.pipes import OsmoscopeLayerFormatter import pytest import json diff --git a/tests/pipes/output_formatters/test_vector_tile_formatter.py b/tests/pipes/output_formatters/test_vector_tile_formatter.py index 386b214..8467422 100644 --- a/tests/pipes/output_formatters/test_vector_tile_formatter.py +++ b/tests/pipes/output_formatters/test_vector_tile_formatter.py @@ -1,5 +1,5 @@ -from analyser.core.pipes import VectorTileFormatter -from analyser.config import Config +from nominatim_data_analyser.core.pipes import VectorTileFormatter +from nominatim_data_analyser.config import Config from geojson import Feature, Point def test_process_vector_tile_formatter(vector_tile_formatter: VectorTileFormatter, @@ -26,8 +26,8 @@ def test_process_vector_tile_formatter(vector_tile_formatter: VectorTileFormatte ] #Mock the call to Tippecanoe - monkeypatch.setattr('analyser.core.pipes.output_formatters.vector_tile_formatter.VectorTileFormatter.call_tippecanoe', + monkeypatch.setattr('nominatim_data_analyser.core.pipes.output_formatters.vector_tile_formatter.VectorTileFormatter.call_tippecanoe', lambda self, output_dir, feature_collection: None) web_path = vector_tile_formatter.process(features) - assert web_path == 'test_prefix_path/test_rule/vector-tiles/{z}/{x}/{y}.pbf' \ No newline at end of file + assert web_path == 'test_prefix_path/test_rule/vector-tiles/{z}/{x}/{y}.pbf' diff --git a/tests/pipes/rules_specific_pipes/addr_house_number_no_digit/test_digits_filter.py b/tests/pipes/rules_specific_pipes/addr_house_number_no_digit/test_digits_filter.py index 3e040da..362fc20 100644 --- a/tests/pipes/rules_specific_pipes/addr_house_number_no_digit/test_digits_filter.py +++ b/tests/pipes/rules_specific_pipes/addr_house_number_no_digit/test_digits_filter.py @@ -1,7 +1,7 @@ import pytest -from analyser.core.pipes.rules_specific_pipes import \ +from nominatim_data_analyser.core.pipes.rules_specific_pipes import \ AddrHouseNumberNoDigitFilter -from analyser.core.qa_rule import ExecutionContext +from nominatim_data_analyser.core.qa_rule import ExecutionContext def test_process_addr_HN_no_digit_filter(digits_filter: AddrHouseNumberNoDigitFilter) -> None: diff --git a/tests/pipes/rules_specific_pipes/duplicate_label_role/test_duplicate_label_role_CFC.py b/tests/pipes/rules_specific_pipes/duplicate_label_role/test_duplicate_label_role_CFC.py index dcba328..435486d 100644 --- a/tests/pipes/rules_specific_pipes/duplicate_label_role/test_duplicate_label_role_CFC.py +++ b/tests/pipes/rules_specific_pipes/duplicate_label_role/test_duplicate_label_role_CFC.py @@ -1,8 +1,8 @@ import pytest -from analyser.core.model import Node -from analyser.core.pipes.rules_specific_pipes import \ +from nominatim_data_analyser.core.model import Node +from nominatim_data_analyser.core.pipes.rules_specific_pipes import \ DuplicateLabelRoleCustomFeatureConverter -from analyser.core.qa_rule import ExecutionContext +from nominatim_data_analyser.core.qa_rule import ExecutionContext from geojson.feature import Feature diff --git a/tests/pipes/rules_specific_pipes/place_nodes_close/test_place_nodes_close_CFC.py b/tests/pipes/rules_specific_pipes/place_nodes_close/test_place_nodes_close_CFC.py index a8eb1f6..2916296 100644 --- a/tests/pipes/rules_specific_pipes/place_nodes_close/test_place_nodes_close_CFC.py +++ b/tests/pipes/rules_specific_pipes/place_nodes_close/test_place_nodes_close_CFC.py @@ -1,8 +1,8 @@ import pytest -from analyser.core.model import Node -from analyser.core.pipes.rules_specific_pipes import \ +from nominatim_data_analyser.core.model import Node +from nominatim_data_analyser.core.pipes.rules_specific_pipes import \ PlaceNodesCloseCustomFeatureConverter -from analyser.core.qa_rule import ExecutionContext +from nominatim_data_analyser.core.qa_rule import ExecutionContext from geojson.feature import Feature def test_process_place_nodes_close_CFC(place_nodes_close_CFC: PlaceNodesCloseCustomFeatureConverter) -> None: diff --git a/tests/pipes/rules_specific_pipes/same_wikidata/test_same_wikidata_CFC.py b/tests/pipes/rules_specific_pipes/same_wikidata/test_same_wikidata_CFC.py index 9158620..c9074f1 100644 --- a/tests/pipes/rules_specific_pipes/same_wikidata/test_same_wikidata_CFC.py +++ b/tests/pipes/rules_specific_pipes/same_wikidata/test_same_wikidata_CFC.py @@ -1,7 +1,7 @@ import pytest -from analyser.core.pipes.rules_specific_pipes import \ +from nominatim_data_analyser.core.pipes.rules_specific_pipes import \ SameWikiDataFeatureConverter -from analyser.core.qa_rule import ExecutionContext +from nominatim_data_analyser.core.qa_rule import ExecutionContext def test_same_wikidata_CFC(same_wikidata_CFC: SameWikiDataFeatureConverter) -> None: diff --git a/tests/pipes/test_geometry_converter.py b/tests/pipes/test_geometry_converter.py index 3bfe593..603fca6 100644 --- a/tests/pipes/test_geometry_converter.py +++ b/tests/pipes/test_geometry_converter.py @@ -1,4 +1,4 @@ -from analyser.core.pipes.data_processing import GeometryConverter +from nominatim_data_analyser.core.pipes.data_processing import GeometryConverter def test_on_created_geometry_converter(geometry_converter: GeometryConverter) -> None: """ @@ -21,4 +21,4 @@ def test_process_geometry_converter_none_geometry_holder(geometry_converter: Geo Test the process() method of the GeometryConverter with one data containing a None value for the geometry_holder. """ data_result = geometry_converter.process({'geometry_holder': None}) - assert data_result is None \ No newline at end of file + assert data_result is None diff --git a/tests/pipes/test_loop_data_processor.py b/tests/pipes/test_loop_data_processor.py index 78b1113..98f6cb7 100644 --- a/tests/pipes/test_loop_data_processor.py +++ b/tests/pipes/test_loop_data_processor.py @@ -1,8 +1,8 @@ -from analyser.core.pipes.data_processing import LoopDataProcessor -from analyser.core.pipes.data_processing.geometry_converter import \ +from nominatim_data_analyser.core.pipes.data_processing import LoopDataProcessor +from nominatim_data_analyser.core.pipes.data_processing.geometry_converter import \ GeometryConverter -from analyser.core.pipes.filling_pipe import FillingPipe -from analyser.core.pipes.output_formatters.geojson_feature_converter import \ +from nominatim_data_analyser.core.pipes.filling_pipe import FillingPipe +from nominatim_data_analyser.core.pipes.output_formatters.geojson_feature_converter import \ GeoJSONFeatureConverter from geojson.feature import Feature @@ -31,7 +31,7 @@ def test_process_one_data_none(loop_data_processor: LoopDataProcessor, geometry_converter.plug_pipe(filling_pipe).plug_pipe(geojson_feature_converter) #Mock the FillingPipe process() method to return None. - monkeypatch.setattr('analyser.core.pipes.filling_pipe.FillingPipe.process', + monkeypatch.setattr('nominatim_data_analyser.core.pipes.filling_pipe.FillingPipe.process', lambda self, data: None) loop_data_processor.processing_pipeline = geometry_converter @@ -70,7 +70,7 @@ def test_process_multiple_result(loop_data_processor: LoopDataProcessor, geometry_converter.plug_pipe(geojson_feature_converter).plug_pipe(filling_pipe) #Mock the FillingPipe process() method to return the data 3 times (in a list). - monkeypatch.setattr('analyser.core.pipes.filling_pipe.FillingPipe.process', + monkeypatch.setattr('nominatim_data_analyser.core.pipes.filling_pipe.FillingPipe.process', lambda self, data: [data, data, data]) loop_data_processor.processing_pipeline = geometry_converter diff --git a/tests/pipes/test_pipe.py b/tests/pipes/test_pipe.py index d0ae22a..51e90f8 100644 --- a/tests/pipes/test_pipe.py +++ b/tests/pipes/test_pipe.py @@ -1,8 +1,8 @@ -from analyser.core.exceptions.yaml_syntax_exception import YAMLSyntaxException -from analyser.core.pipes.data_processing import GeometryConverter -from analyser.core.qa_rule import ExecutionContext -from analyser.core.pipes import FillingPipe +from nominatim_data_analyser.core.exceptions.yaml_syntax_exception import YAMLSyntaxException +from nominatim_data_analyser.core.pipes.data_processing import GeometryConverter +from nominatim_data_analyser.core.qa_rule import ExecutionContext +from nominatim_data_analyser.core.pipes import FillingPipe import logging import pytest @@ -21,7 +21,7 @@ def test_process_and_next(filling_pipe: FillingPipe, execution_context: Executio def callback(self, data = None): nonlocal x x += 1 - monkeypatch.setattr('analyser.core.pipes.filling_pipe.FillingPipe.process', + monkeypatch.setattr('nominatim_data_analyser.core.pipes.filling_pipe.FillingPipe.process', callback) filling_pipe.process_and_next() assert x == 2 @@ -66,4 +66,4 @@ def test_log(filling_pipe: FillingPipe) -> None: @pytest.fixture def filling_pipe(execution_context: ExecutionContext) -> FillingPipe: - return FillingPipe({}, execution_context) \ No newline at end of file + return FillingPipe({}, execution_context) diff --git a/tests/pipes/test_sql_processor.py b/tests/pipes/test_sql_processor.py index 196b42a..65ccd51 100644 --- a/tests/pipes/test_sql_processor.py +++ b/tests/pipes/test_sql_processor.py @@ -1,4 +1,4 @@ -from analyser.core.pipes.data_fetching import SQLProcessor +from nominatim_data_analyser.core.pipes.data_fetching import SQLProcessor from psycopg2._psycopg import connection, cursor diff --git a/tests/yaml_logic/test_yaml_loader.py b/tests/yaml_logic/test_yaml_loader.py index a34f20e..42958d7 100644 --- a/tests/yaml_logic/test_yaml_loader.py +++ b/tests/yaml_logic/test_yaml_loader.py @@ -1,12 +1,12 @@ -from analyser.core.dynamic_value.variable import Variable -from analyser.core.dynamic_value.switch import Switch +from nominatim_data_analyser.core.dynamic_value.variable import Variable +from nominatim_data_analyser.core.dynamic_value.switch import Switch from pathlib import Path -import analyser.core.yaml_logic.yaml_loader as yaml_loader +import nominatim_data_analyser.core.yaml_logic.yaml_loader as yaml_loader import pytest import yaml -from analyser.core.yaml_logic.yaml_loader import load_yaml_rule -from analyser.core import Pipe +from nominatim_data_analyser.core.yaml_logic.yaml_loader import load_yaml_rule +from nominatim_data_analyser.core import Pipe def test_load_yaml_rule(yaml_path) -> None: """ @@ -78,4 +78,4 @@ def test_construct_variable(yaml_path) -> None: @pytest.fixture def yaml_path() -> Path: - return Path(__file__).parent / 'yaml' \ No newline at end of file + return Path(__file__).parent / 'yaml' From 9e4152f013ef8a9a6ce55924ee59324127209468 Mon Sep 17 00:00:00 2001 From: Sarah Hoffmann Date: Tue, 24 Sep 2024 22:39:36 +0200 Subject: [PATCH 06/13] adapt rules for new osm2pgsql middle format --- .../rules_specifications/BA_way_not_part_relation.yaml | 6 +++--- .../rules_specifications/duplicate_label_role.yaml | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/nominatim_data_analyser/rules_specifications/BA_way_not_part_relation.yaml b/src/nominatim_data_analyser/rules_specifications/BA_way_not_part_relation.yaml index 9956755..fb0a540 100644 --- a/src/nominatim_data_analyser/rules_specifications/BA_way_not_part_relation.yaml +++ b/src/nominatim_data_analyser/rules_specifications/BA_way_not_part_relation.yaml @@ -3,10 +3,10 @@ QUERY: type: SQLProcessor query: > SELECT ST_AsText(centroid) AS geometry_holder, osm_id FROM ( - SELECT centroid, osm_id, array_agg(osm_id) AS array_osm_id, concat('w', osm_id) AS type_osm_id FROM placex + SELECT centroid, osm_id, array_agg(osm_id) AS array_osm_id FROM placex WHERE osm_type='W' AND class='boundary' AND type='administrative' GROUP BY osm_id, centroid ) AS px - WHERE NOT EXISTS(SELECT 1 FROM planet_osm_rels WHERE px.array_osm_id <@ parts AND px.type_osm_id=ANY(members)); + WHERE NOT EXISTS(SELECT 1 FROM planet_osm_rels WHERE px.array_osm_id <@ planet_osm_member_ids(members, 'W'::char(1))); out: LOOP_PROCESSING: type: LoopDataProcessor @@ -32,4 +32,4 @@ QUERY: doc: description: Ways with boundary=administrative that are not part of a relation. why_problem: Administrative boundary should always be mapped as relations. Every boundary way should be member of a relation. - how_to_fix: You might have to create an appropriate administrative relation. \ No newline at end of file + how_to_fix: You might have to create an appropriate administrative relation. diff --git a/src/nominatim_data_analyser/rules_specifications/duplicate_label_role.yaml b/src/nominatim_data_analyser/rules_specifications/duplicate_label_role.yaml index c247827..d51a518 100644 --- a/src/nominatim_data_analyser/rules_specifications/duplicate_label_role.yaml +++ b/src/nominatim_data_analyser/rules_specifications/duplicate_label_role.yaml @@ -4,7 +4,7 @@ QUERY: query: > SELECT ST_AsText(centroid) AS geometry_holder, osm_id, osm_type, members FROM placex AS px JOIN planet_osm_rels AS por ON px.osm_id=por.id WHERE class='boundary' AND type='administrative' AND - cardinality(array_positions(members, 'label')) >= 2; + jsonb_array_length(jsonb_path_query_array(members, '$[*].role ? (@ == "label")')) >= 2; out: LOOP_PROCESSING: type: LoopDataProcessor @@ -27,4 +27,4 @@ QUERY: doc: description: Admin boundaries with more than one member with role 'label'. why_problem: Boundary can have exactly one member with place label. - how_to_fix: Make sure that the admin boundary contains only one member with role 'label'. \ No newline at end of file + how_to_fix: Make sure that the admin boundary contains only one member with role 'label'. From f40ce1d33d086e1cf421c701ee285cb3ef8b3c22 Mon Sep 17 00:00:00 2001 From: Sarah Hoffmann Date: Tue, 24 Sep 2024 22:39:52 +0200 Subject: [PATCH 07/13] adapt configuration paths to new layout --- .gitignore | 2 +- src/nominatim_data_analyser/core/core.py | 2 +- .../core/pipes/output_formatters/clusters_vt_formatter.py | 2 +- src/nominatim_data_analyser/core/yaml_logic/yaml_loader.py | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.gitignore b/.gitignore index e6e8dc6..490e612 100755 --- a/.gitignore +++ b/.gitignore @@ -2,7 +2,7 @@ src/nominatim_data_analyser.egg-info dist -analyser/config/config.yaml +src/nominatim_data_analyser/config/config.yaml .mason mason_packages diff --git a/src/nominatim_data_analyser/core/core.py b/src/nominatim_data_analyser/core/core.py index 20ead65..f3edf38 100644 --- a/src/nominatim_data_analyser/core/core.py +++ b/src/nominatim_data_analyser/core/core.py @@ -12,7 +12,7 @@ class Core(): """ def __init__(self) -> None: Config.load_config() - self.rules_path = Path('analyser/rules_specifications') + self.rules_path = Path(__file__, '..', '..', 'rules_specifications').resolve() def execute_all(self, filter=None) -> None: """ diff --git a/src/nominatim_data_analyser/core/pipes/output_formatters/clusters_vt_formatter.py b/src/nominatim_data_analyser/core/pipes/output_formatters/clusters_vt_formatter.py index 547a7e4..9be6b6d 100644 --- a/src/nominatim_data_analyser/core/pipes/output_formatters/clusters_vt_formatter.py +++ b/src/nominatim_data_analyser/core/pipes/output_formatters/clusters_vt_formatter.py @@ -42,7 +42,7 @@ def call_clustering_vt(self, output_dir: Path, feature_collection: FeatureCollec """ try: result = subprocess.run( - [f'{Path(__file__).parent.resolve()}/../../../../clustering-vt/build/clustering-vt', output_dir, str(self.radius)], + [f'./clustering-vt/build/clustering-vt', output_dir, str(self.radius)], check=True, input=dumps(feature_collection, sort_keys=True).encode(), capture_output=True diff --git a/src/nominatim_data_analyser/core/yaml_logic/yaml_loader.py b/src/nominatim_data_analyser/core/yaml_logic/yaml_loader.py index a2aee6c..4a09349 100644 --- a/src/nominatim_data_analyser/core/yaml_logic/yaml_loader.py +++ b/src/nominatim_data_analyser/core/yaml_logic/yaml_loader.py @@ -5,7 +5,7 @@ from pathlib import Path import yaml -base_rules_path = Path('analyser/rules_specifications') +base_rules_path = Path(__file__, '..', '..', '..', 'rules_specifications').resolve() def load_yaml_rule(file_name: str) -> dict: """ From 3241247187e0317a37ba3cc648bc0518ce12a63c Mon Sep 17 00:00:00 2001 From: Sarah Hoffmann Date: Tue, 24 Sep 2024 23:36:58 +0200 Subject: [PATCH 08/13] compile clustering-vt as extension with vendored-in deps --- .gitignore | 4 - .gitmodules | 3 - MANIFEST.in | 1 + clustering-vt/.mason | 1 - clustering-vt/Makefile | 42 - .../geojson/0.4.3/include/mapbox/geojson.hpp | 45 + .../include/mapbox/geojson/rapidjson.hpp | 32 + .../0.4.3/include/mapbox/geojson_impl.hpp | 457 +++ .../geometry/1.0.0/include/mapbox/feature.hpp | 113 + .../1.0.0/include/mapbox/geometry.hpp | 12 + .../1.0.0/include/mapbox/geometry/box.hpp | 36 + .../1.0.0/include/mapbox/geometry/empty.hpp | 18 + .../include/mapbox/geometry/envelope.hpp | 33 + .../mapbox/geometry/for_each_point.hpp | 51 + .../include/mapbox/geometry/geometry.hpp | 56 + .../include/mapbox/geometry/line_string.hpp | 28 + .../mapbox/geometry/multi_line_string.hpp | 28 + .../include/mapbox/geometry/multi_point.hpp | 28 + .../include/mapbox/geometry/multi_polygon.hpp | 28 + .../1.0.0/include/mapbox/geometry/point.hpp | 42 + .../mapbox/geometry/point_arithmetic.hpp | 119 + .../1.0.0/include/mapbox/geometry/polygon.hpp | 45 + .../1.0.0/include/mapbox/geometry_io.hpp | 98 + contrib/kdbush/0.1.3/LICENSE | 13 + contrib/kdbush/0.1.3/README.md | 3 + contrib/kdbush/0.1.3/include/kdbush.hpp | 220 ++ contrib/protozero/1.7.0/LICENSE.md | 24 + .../include/protozero/basic_pbf_builder.hpp | 266 ++ .../include/protozero/basic_pbf_writer.hpp | 1054 +++++++ .../1.7.0/include/protozero/buffer_fixed.hpp | 222 ++ .../1.7.0/include/protozero/buffer_string.hpp | 76 + .../1.7.0/include/protozero/buffer_tmpl.hpp | 113 + .../1.7.0/include/protozero/buffer_vector.hpp | 76 + .../1.7.0/include/protozero/byteswap.hpp | 99 + .../1.7.0/include/protozero/config.hpp | 48 + .../1.7.0/include/protozero/data_view.hpp | 236 ++ .../1.7.0/include/protozero/exception.hpp | 101 + .../1.7.0/include/protozero/iterators.hpp | 481 +++ .../1.7.0/include/protozero/pbf_builder.hpp | 32 + .../1.7.0/include/protozero/pbf_message.hpp | 184 ++ .../1.7.0/include/protozero/pbf_reader.hpp | 977 +++++++ .../1.7.0/include/protozero/pbf_writer.hpp | 76 + .../1.7.0/include/protozero/types.hpp | 66 + .../1.7.0/include/protozero/varint.hpp | 245 ++ .../1.7.0/include/protozero/version.hpp | 34 + .../1.1.0/include/rapidjson/allocators.h | 271 ++ .../1.1.0/include/rapidjson/document.h | 2575 +++++++++++++++++ .../1.1.0/include/rapidjson/encodedstream.h | 299 ++ .../1.1.0/include/rapidjson/encodings.h | 716 +++++ .../1.1.0/include/rapidjson/error/en.h | 74 + .../1.1.0/include/rapidjson/error/error.h | 155 + .../1.1.0/include/rapidjson/filereadstream.h | 99 + .../1.1.0/include/rapidjson/filewritestream.h | 104 + .../rapidjson/1.1.0/include/rapidjson/fwd.h | 151 + .../include/rapidjson/internal/biginteger.h | 290 ++ .../1.1.0/include/rapidjson/internal/diyfp.h | 258 ++ .../1.1.0/include/rapidjson/internal/dtoa.h | 245 ++ .../include/rapidjson/internal/ieee754.h | 78 + .../1.1.0/include/rapidjson/internal/itoa.h | 304 ++ .../1.1.0/include/rapidjson/internal/meta.h | 181 ++ .../1.1.0/include/rapidjson/internal/pow10.h | 55 + .../1.1.0/include/rapidjson/internal/regex.h | 701 +++++ .../1.1.0/include/rapidjson/internal/stack.h | 230 ++ .../include/rapidjson/internal/strfunc.h | 55 + .../1.1.0/include/rapidjson/internal/strtod.h | 269 ++ .../1.1.0/include/rapidjson/internal/swap.h | 46 + .../1.1.0/include/rapidjson/istreamwrapper.h | 115 + .../1.1.0/include/rapidjson/memorybuffer.h | 70 + .../1.1.0/include/rapidjson/memorystream.h | 71 + .../include/rapidjson/msinttypes/inttypes.h | 316 ++ .../include/rapidjson/msinttypes/stdint.h | 300 ++ .../1.1.0/include/rapidjson/ostreamwrapper.h | 81 + .../1.1.0/include/rapidjson/pointer.h | 1358 +++++++++ .../1.1.0/include/rapidjson/prettywriter.h | 255 ++ .../1.1.0/include/rapidjson/rapidjson.h | 615 ++++ .../1.1.0/include/rapidjson/reader.h | 1879 ++++++++++++ .../1.1.0/include/rapidjson/schema.h | 2006 +++++++++++++ .../1.1.0/include/rapidjson/stream.h | 179 ++ .../1.1.0/include/rapidjson/stringbuffer.h | 117 + .../1.1.0/include/rapidjson/writer.h | 610 ++++ contrib/rapidjson/LICENSE | 57 + contrib/supercluster/0.3.2/LICENSE | 13 + contrib/supercluster/0.3.2/README.md | 3 + .../0.3.2/include/supercluster.hpp | 386 +++ contrib/variant/1.2.0/LICENSE | 25 + contrib/variant/1.2.0/README.md | 248 ++ .../variant/1.2.0/include/mapbox/optional.hpp | 74 + .../include/mapbox/recursive_wrapper.hpp | 122 + .../variant/1.2.0/include/mapbox/variant.hpp | 1053 +++++++ .../1.2.0/include/mapbox/variant_cast.hpp | 85 + .../1.2.0/include/mapbox/variant_io.hpp | 45 + .../1.2.0/include/mapbox/variant_visitor.hpp | 43 + contrib/vtzero/1.1.0/LICENSE | 25 + .../vtzero/1.1.0/include/vtzero/builder.hpp | 1365 +++++++++ .../1.1.0/include/vtzero/builder_impl.hpp | 267 ++ .../include/vtzero/encoded_property_value.hpp | 244 ++ .../vtzero/1.1.0/include/vtzero/exception.hpp | 134 + .../vtzero/1.1.0/include/vtzero/feature.hpp | 315 ++ .../include/vtzero/feature_builder_impl.hpp | 126 + .../vtzero/1.1.0/include/vtzero/geometry.hpp | 444 +++ contrib/vtzero/1.1.0/include/vtzero/index.hpp | 264 ++ contrib/vtzero/1.1.0/include/vtzero/layer.hpp | 512 ++++ .../vtzero/1.1.0/include/vtzero/output.hpp | 64 + .../vtzero/1.1.0/include/vtzero/property.hpp | 89 + .../1.1.0/include/vtzero/property_mapper.hpp | 103 + .../1.1.0/include/vtzero/property_value.hpp | 398 +++ contrib/vtzero/1.1.0/include/vtzero/types.hpp | 432 +++ .../1.1.0/include/vtzero/vector_tile.hpp | 290 ++ .../vtzero/1.1.0/include/vtzero/version.hpp | 36 + pyproject.toml | 2 +- 110 files changed, 28307 insertions(+), 51 deletions(-) create mode 100644 MANIFEST.in delete mode 160000 clustering-vt/.mason delete mode 100644 clustering-vt/Makefile create mode 100644 contrib/geojson/0.4.3/include/mapbox/geojson.hpp create mode 100644 contrib/geojson/0.4.3/include/mapbox/geojson/rapidjson.hpp create mode 100644 contrib/geojson/0.4.3/include/mapbox/geojson_impl.hpp create mode 100644 contrib/geometry/1.0.0/include/mapbox/feature.hpp create mode 100644 contrib/geometry/1.0.0/include/mapbox/geometry.hpp create mode 100644 contrib/geometry/1.0.0/include/mapbox/geometry/box.hpp create mode 100644 contrib/geometry/1.0.0/include/mapbox/geometry/empty.hpp create mode 100644 contrib/geometry/1.0.0/include/mapbox/geometry/envelope.hpp create mode 100644 contrib/geometry/1.0.0/include/mapbox/geometry/for_each_point.hpp create mode 100644 contrib/geometry/1.0.0/include/mapbox/geometry/geometry.hpp create mode 100644 contrib/geometry/1.0.0/include/mapbox/geometry/line_string.hpp create mode 100644 contrib/geometry/1.0.0/include/mapbox/geometry/multi_line_string.hpp create mode 100644 contrib/geometry/1.0.0/include/mapbox/geometry/multi_point.hpp create mode 100644 contrib/geometry/1.0.0/include/mapbox/geometry/multi_polygon.hpp create mode 100644 contrib/geometry/1.0.0/include/mapbox/geometry/point.hpp create mode 100644 contrib/geometry/1.0.0/include/mapbox/geometry/point_arithmetic.hpp create mode 100644 contrib/geometry/1.0.0/include/mapbox/geometry/polygon.hpp create mode 100644 contrib/geometry/1.0.0/include/mapbox/geometry_io.hpp create mode 100644 contrib/kdbush/0.1.3/LICENSE create mode 100644 contrib/kdbush/0.1.3/README.md create mode 100644 contrib/kdbush/0.1.3/include/kdbush.hpp create mode 100644 contrib/protozero/1.7.0/LICENSE.md create mode 100644 contrib/protozero/1.7.0/include/protozero/basic_pbf_builder.hpp create mode 100644 contrib/protozero/1.7.0/include/protozero/basic_pbf_writer.hpp create mode 100644 contrib/protozero/1.7.0/include/protozero/buffer_fixed.hpp create mode 100644 contrib/protozero/1.7.0/include/protozero/buffer_string.hpp create mode 100644 contrib/protozero/1.7.0/include/protozero/buffer_tmpl.hpp create mode 100644 contrib/protozero/1.7.0/include/protozero/buffer_vector.hpp create mode 100644 contrib/protozero/1.7.0/include/protozero/byteswap.hpp create mode 100644 contrib/protozero/1.7.0/include/protozero/config.hpp create mode 100644 contrib/protozero/1.7.0/include/protozero/data_view.hpp create mode 100644 contrib/protozero/1.7.0/include/protozero/exception.hpp create mode 100644 contrib/protozero/1.7.0/include/protozero/iterators.hpp create mode 100644 contrib/protozero/1.7.0/include/protozero/pbf_builder.hpp create mode 100644 contrib/protozero/1.7.0/include/protozero/pbf_message.hpp create mode 100644 contrib/protozero/1.7.0/include/protozero/pbf_reader.hpp create mode 100644 contrib/protozero/1.7.0/include/protozero/pbf_writer.hpp create mode 100644 contrib/protozero/1.7.0/include/protozero/types.hpp create mode 100644 contrib/protozero/1.7.0/include/protozero/varint.hpp create mode 100644 contrib/protozero/1.7.0/include/protozero/version.hpp create mode 100644 contrib/rapidjson/1.1.0/include/rapidjson/allocators.h create mode 100644 contrib/rapidjson/1.1.0/include/rapidjson/document.h create mode 100644 contrib/rapidjson/1.1.0/include/rapidjson/encodedstream.h create mode 100644 contrib/rapidjson/1.1.0/include/rapidjson/encodings.h create mode 100644 contrib/rapidjson/1.1.0/include/rapidjson/error/en.h create mode 100644 contrib/rapidjson/1.1.0/include/rapidjson/error/error.h create mode 100644 contrib/rapidjson/1.1.0/include/rapidjson/filereadstream.h create mode 100644 contrib/rapidjson/1.1.0/include/rapidjson/filewritestream.h create mode 100644 contrib/rapidjson/1.1.0/include/rapidjson/fwd.h create mode 100644 contrib/rapidjson/1.1.0/include/rapidjson/internal/biginteger.h create mode 100644 contrib/rapidjson/1.1.0/include/rapidjson/internal/diyfp.h create mode 100644 contrib/rapidjson/1.1.0/include/rapidjson/internal/dtoa.h create mode 100644 contrib/rapidjson/1.1.0/include/rapidjson/internal/ieee754.h create mode 100644 contrib/rapidjson/1.1.0/include/rapidjson/internal/itoa.h create mode 100644 contrib/rapidjson/1.1.0/include/rapidjson/internal/meta.h create mode 100644 contrib/rapidjson/1.1.0/include/rapidjson/internal/pow10.h create mode 100644 contrib/rapidjson/1.1.0/include/rapidjson/internal/regex.h create mode 100644 contrib/rapidjson/1.1.0/include/rapidjson/internal/stack.h create mode 100644 contrib/rapidjson/1.1.0/include/rapidjson/internal/strfunc.h create mode 100644 contrib/rapidjson/1.1.0/include/rapidjson/internal/strtod.h create mode 100644 contrib/rapidjson/1.1.0/include/rapidjson/internal/swap.h create mode 100644 contrib/rapidjson/1.1.0/include/rapidjson/istreamwrapper.h create mode 100644 contrib/rapidjson/1.1.0/include/rapidjson/memorybuffer.h create mode 100644 contrib/rapidjson/1.1.0/include/rapidjson/memorystream.h create mode 100644 contrib/rapidjson/1.1.0/include/rapidjson/msinttypes/inttypes.h create mode 100644 contrib/rapidjson/1.1.0/include/rapidjson/msinttypes/stdint.h create mode 100644 contrib/rapidjson/1.1.0/include/rapidjson/ostreamwrapper.h create mode 100644 contrib/rapidjson/1.1.0/include/rapidjson/pointer.h create mode 100644 contrib/rapidjson/1.1.0/include/rapidjson/prettywriter.h create mode 100644 contrib/rapidjson/1.1.0/include/rapidjson/rapidjson.h create mode 100644 contrib/rapidjson/1.1.0/include/rapidjson/reader.h create mode 100644 contrib/rapidjson/1.1.0/include/rapidjson/schema.h create mode 100644 contrib/rapidjson/1.1.0/include/rapidjson/stream.h create mode 100644 contrib/rapidjson/1.1.0/include/rapidjson/stringbuffer.h create mode 100644 contrib/rapidjson/1.1.0/include/rapidjson/writer.h create mode 100644 contrib/rapidjson/LICENSE create mode 100644 contrib/supercluster/0.3.2/LICENSE create mode 100644 contrib/supercluster/0.3.2/README.md create mode 100644 contrib/supercluster/0.3.2/include/supercluster.hpp create mode 100644 contrib/variant/1.2.0/LICENSE create mode 100644 contrib/variant/1.2.0/README.md create mode 100644 contrib/variant/1.2.0/include/mapbox/optional.hpp create mode 100644 contrib/variant/1.2.0/include/mapbox/recursive_wrapper.hpp create mode 100644 contrib/variant/1.2.0/include/mapbox/variant.hpp create mode 100644 contrib/variant/1.2.0/include/mapbox/variant_cast.hpp create mode 100644 contrib/variant/1.2.0/include/mapbox/variant_io.hpp create mode 100644 contrib/variant/1.2.0/include/mapbox/variant_visitor.hpp create mode 100644 contrib/vtzero/1.1.0/LICENSE create mode 100644 contrib/vtzero/1.1.0/include/vtzero/builder.hpp create mode 100644 contrib/vtzero/1.1.0/include/vtzero/builder_impl.hpp create mode 100644 contrib/vtzero/1.1.0/include/vtzero/encoded_property_value.hpp create mode 100644 contrib/vtzero/1.1.0/include/vtzero/exception.hpp create mode 100644 contrib/vtzero/1.1.0/include/vtzero/feature.hpp create mode 100644 contrib/vtzero/1.1.0/include/vtzero/feature_builder_impl.hpp create mode 100644 contrib/vtzero/1.1.0/include/vtzero/geometry.hpp create mode 100644 contrib/vtzero/1.1.0/include/vtzero/index.hpp create mode 100644 contrib/vtzero/1.1.0/include/vtzero/layer.hpp create mode 100644 contrib/vtzero/1.1.0/include/vtzero/output.hpp create mode 100644 contrib/vtzero/1.1.0/include/vtzero/property.hpp create mode 100644 contrib/vtzero/1.1.0/include/vtzero/property_mapper.hpp create mode 100644 contrib/vtzero/1.1.0/include/vtzero/property_value.hpp create mode 100644 contrib/vtzero/1.1.0/include/vtzero/types.hpp create mode 100644 contrib/vtzero/1.1.0/include/vtzero/vector_tile.hpp create mode 100644 contrib/vtzero/1.1.0/include/vtzero/version.hpp diff --git a/.gitignore b/.gitignore index 490e612..e654a5b 100755 --- a/.gitignore +++ b/.gitignore @@ -3,7 +3,3 @@ src/nominatim_data_analyser.egg-info dist src/nominatim_data_analyser/config/config.yaml - -.mason -mason_packages -build diff --git a/.gitmodules b/.gitmodules index d33197d..e69de29 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +0,0 @@ -[submodule "clustering-vt/.mason"] - path = clustering-vt/.mason - url = https://github.com/mapbox/mason diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000..554155a --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1 @@ +recursive-include contrib * diff --git a/clustering-vt/.mason b/clustering-vt/.mason deleted file mode 160000 index 704f706..0000000 --- a/clustering-vt/.mason +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 704f706f55a8362788cf4daf01be41afadb33482 diff --git a/clustering-vt/Makefile b/clustering-vt/Makefile deleted file mode 100644 index edc2b56..0000000 --- a/clustering-vt/Makefile +++ /dev/null @@ -1,42 +0,0 @@ -CFLAGS += -I include --std=c++17 - -export MASON_DIR = $(shell pwd)/.mason -export MASON = $(MASON_DIR)/mason - -SUPERCLUSTER = supercluster 0.3.2 -KDBUSH = kdbush 0.1.3 -RAPIDJSON = rapidjson 1.1.0 -GEOMETRY = geometry 1.0.0 -GEOJSON = geojson 0.4.3 -VARIANT = variant 1.2.0 -VTZERO = vtzero 1.1.0 -PROTOZERO = protozero 1.7.0 - -DEPS = `$(MASON) cflags $(SUPERCLUSTER)` `$(MASON) cflags $(KDBUSH)` `$(MASON) cflags $(GEOJSON)` `$(MASON) cflags $(GEOMETRY)` `$(MASON) cflags $(VARIANT)` `$(MASON) cflags $(VTZERO)` `$(MASON) cflags $(PROTOZERO)` -RAPIDJSON_DEP = `$(MASON) cflags $(RAPIDJSON)` - -default: - make build/clustering-vt - -$(MASON): -ifeq (,$(wildcard $(MASON))) - rm -f -R ${MASON_DIR} - git submodule update --init --recursive --force -endif - -mason_packages: $(MASON) - $(MASON) install $(SUPERCLUSTER) - $(MASON) install $(KDBUSH) - $(MASON) install $(RAPIDJSON) - $(MASON) install $(GEOJSON) - $(MASON) install $(GEOMETRY) - $(MASON) install $(VARIANT) - $(MASON) install $(VTZERO) - $(MASON) install $(PROTOZERO) - -build/clustering-vt: clustering-vt.cpp mason_packages Makefile - mkdir -p build - $(CXX) clustering-vt.cpp $(CFLAGS) -O3 $(DEPS) $(RAPIDJSON_DEP) -lstdc++fs -o build/clustering-vt - -clean: - rm -rf build diff --git a/contrib/geojson/0.4.3/include/mapbox/geojson.hpp b/contrib/geojson/0.4.3/include/mapbox/geojson.hpp new file mode 100644 index 0000000..7cff162 --- /dev/null +++ b/contrib/geojson/0.4.3/include/mapbox/geojson.hpp @@ -0,0 +1,45 @@ +#pragma once + +#include +#include +#include + +namespace mapbox { +namespace geojson { + +using empty = mapbox::geometry::empty; +using point = mapbox::geometry::point; +using multi_point = mapbox::geometry::multi_point; +using line_string = mapbox::geometry::line_string; +using linear_ring = mapbox::geometry::linear_ring; +using multi_line_string = mapbox::geometry::multi_line_string; +using polygon = mapbox::geometry::polygon; +using multi_polygon = mapbox::geometry::multi_polygon; +using geometry = mapbox::geometry::geometry; +using geometry_collection = mapbox::geometry::geometry_collection; + +using value = mapbox::feature::value; +using null_value_t = mapbox::feature::null_value_t; +using identifier = mapbox::feature::identifier; +using feature = mapbox::feature::feature; +using feature_collection = mapbox::feature::feature_collection; + +// Parse inputs of known types. Instantiations are provided for geometry, feature, and +// feature_collection. +template +T parse(const std::string &); + +// Parse any GeoJSON type. +using geojson = mapbox::util::variant; +geojson parse(const std::string &); + +// Stringify inputs of known types. Instantiations are provided for geometry, feature, and +// feature_collection. +template +std::string stringify(const T &); + +// Stringify any GeoJSON type. +std::string stringify(const geojson &); + +} // namespace geojson +} // namespace mapbox diff --git a/contrib/geojson/0.4.3/include/mapbox/geojson/rapidjson.hpp b/contrib/geojson/0.4.3/include/mapbox/geojson/rapidjson.hpp new file mode 100644 index 0000000..360eefd --- /dev/null +++ b/contrib/geojson/0.4.3/include/mapbox/geojson/rapidjson.hpp @@ -0,0 +1,32 @@ +#pragma once + +#include +#include + +namespace mapbox { +namespace geojson { + +// Use the CrtAllocator, because the MemoryPoolAllocator is broken on ARM +// https://github.com/miloyip/rapidjson/issues/200, 301, 388 +using rapidjson_allocator = rapidjson::CrtAllocator; +using rapidjson_document = rapidjson::GenericDocument, rapidjson_allocator>; +using rapidjson_value = rapidjson::GenericValue, rapidjson_allocator>; + +// Convert inputs of known types. Instantiations are provided for geometry, feature, and +// feature_collection. +template +T convert(const rapidjson_value &); + +// Convert any GeoJSON type. +geojson convert(const rapidjson_value &); + +// Convert back to rapidjson value. Instantiations are provided for geometry, feature, and +// feature_collection. +template +rapidjson_value convert(const T &, rapidjson_allocator&); + +// Convert any GeoJSON type. +rapidjson_value convert(const geojson &, rapidjson_allocator&); + +} // namespace geojson +} // namespace mapbox diff --git a/contrib/geojson/0.4.3/include/mapbox/geojson_impl.hpp b/contrib/geojson/0.4.3/include/mapbox/geojson_impl.hpp new file mode 100644 index 0000000..2b96210 --- /dev/null +++ b/contrib/geojson/0.4.3/include/mapbox/geojson_impl.hpp @@ -0,0 +1,457 @@ +#pragma once + +#include +#include + +#include +#include +#include +#include + +#include + +namespace mapbox { +namespace geojson { + +using error = std::runtime_error; +using prop_map = std::unordered_map; + +template +T convert(const rapidjson_value &json); + +template <> +point convert(const rapidjson_value &json) { + if (json.Size() < 2) + throw error("coordinates array must have at least 2 numbers"); + + return point{ json[0].GetDouble(), json[1].GetDouble() }; +} + +template +Cont convert(const rapidjson_value &json) { + Cont points; + auto size = json.Size(); + points.reserve(size); + + for (auto &element : json.GetArray()) { + points.push_back(convert(element)); + } + return points; +} + +template <> +geometry convert(const rapidjson_value &json) { + if (!json.IsObject()) + throw error("Geometry must be an object"); + + const auto &json_end = json.MemberEnd(); + + const auto &type_itr = json.FindMember("type"); + if (type_itr == json_end) + throw error("Geometry must have a type property"); + + const auto &type = type_itr->value; + + if (type == "GeometryCollection") { + const auto &geometries_itr = json.FindMember("geometries"); + if (geometries_itr == json_end) + throw error("GeometryCollection must have a geometries property"); + + const auto &json_geometries = geometries_itr->value; + + if (!json_geometries.IsArray()) + throw error("GeometryCollection geometries property must be an array"); + + return geometry{ convert(json_geometries) }; + } + + const auto &coords_itr = json.FindMember("coordinates"); + + if (coords_itr == json_end) + throw error(std::string(type.GetString()) + " geometry must have a coordinates property"); + + const auto &json_coords = coords_itr->value; + if (!json_coords.IsArray()) + throw error("coordinates property must be an array"); + + if (type == "Point") + return geometry{ convert(json_coords) }; + if (type == "MultiPoint") + return geometry{ convert(json_coords) }; + if (type == "LineString") + return geometry{ convert(json_coords) }; + if (type == "MultiLineString") + return geometry{ convert(json_coords) }; + if (type == "Polygon") + return geometry{ convert(json_coords) }; + if (type == "MultiPolygon") + return geometry{ convert(json_coords) }; + + throw error(std::string(type.GetString()) + " not yet implemented"); +} + +template <> +value convert(const rapidjson_value &json); + +template <> +prop_map convert(const rapidjson_value &json) { + if (!json.IsObject()) + throw error("properties must be an object"); + + prop_map result; + for (auto &member : json.GetObject()) { + result.emplace(std::string(member.name.GetString(), member.name.GetStringLength()), + convert(member.value)); + } + return result; +} + +template <> +value convert(const rapidjson_value &json) { + switch (json.GetType()) { + case rapidjson::kNullType: + return null_value_t{}; + case rapidjson::kFalseType: + return false; + case rapidjson::kTrueType: + return true; + case rapidjson::kObjectType: + return convert(json); + case rapidjson::kArrayType: + return convert>(json); + case rapidjson::kStringType: + return std::string(json.GetString(), json.GetStringLength()); + default: + assert(json.GetType() == rapidjson::kNumberType); + if (json.IsUint64()) + return std::uint64_t(json.GetUint64()); + if (json.IsInt64()) + return std::int64_t(json.GetInt64()); + return json.GetDouble(); + } +} + +template <> +identifier convert(const rapidjson_value &json) { + switch (json.GetType()) { + case rapidjson::kStringType: + return std::string(json.GetString(), json.GetStringLength()); + case rapidjson::kNumberType: + if (json.IsUint64()) + return std::uint64_t(json.GetUint64()); + if (json.IsInt64()) + return std::int64_t(json.GetInt64()); + return json.GetDouble(); + default: + throw error("Feature id must be a string or number"); + } +} + +template <> +feature convert(const rapidjson_value &json) { + if (!json.IsObject()) + throw error("Feature must be an object"); + + auto const &json_end = json.MemberEnd(); + auto const &type_itr = json.FindMember("type"); + + if (type_itr == json_end) + throw error("Feature must have a type property"); + if (type_itr->value != "Feature") + throw error("Feature type must be Feature"); + + auto const &geom_itr = json.FindMember("geometry"); + + if (geom_itr == json_end) + throw error("Feature must have a geometry property"); + + feature result{ convert(geom_itr->value) }; + + auto const &id_itr = json.FindMember("id"); + if (id_itr != json_end) { + result.id = convert(id_itr->value); + } + + auto const &prop_itr = json.FindMember("properties"); + if (prop_itr != json_end) { + const auto &json_props = prop_itr->value; + if (!json_props.IsNull()) { + result.properties = convert(json_props); + } + } + + return result; +} + +template <> +geojson convert(const rapidjson_value &json) { + if (!json.IsObject()) + throw error("GeoJSON must be an object"); + + const auto &type_itr = json.FindMember("type"); + const auto &json_end = json.MemberEnd(); + + if (type_itr == json_end) + throw error("GeoJSON must have a type property"); + + const auto &type = type_itr->value; + + if (type == "FeatureCollection") { + const auto &features_itr = json.FindMember("features"); + if (features_itr == json_end) + throw error("FeatureCollection must have features property"); + + const auto &json_features = features_itr->value; + + if (!json_features.IsArray()) + throw error("FeatureCollection features property must be an array"); + + feature_collection collection; + + const auto &size = json_features.Size(); + collection.reserve(size); + + for (auto &feature_obj : json_features.GetArray()) { + collection.push_back(convert(feature_obj)); + } + + return geojson{ collection }; + } + + if (type == "Feature") + return geojson{ convert(json) }; + + return geojson{ convert(json) }; +} + +template +T parse(const std::string &json) { + rapidjson_document d; + d.Parse(json.c_str()); + if (d.HasParseError()) { + std::stringstream message; + message << d.GetErrorOffset() << " - " << rapidjson::GetParseError_En(d.GetParseError()); + throw error(message.str()); + } + return convert(d); +} + +template <> +geometry parse(const std::string &); +template <> +feature parse(const std::string &); +template <> +feature_collection parse(const std::string &); + +geojson parse(const std::string &json) { + return parse(json); +} + +geojson convert(const rapidjson_value &json) { + return convert(json); +} + +template <> +rapidjson_value convert(const geometry&, rapidjson_allocator&); + +template <> +rapidjson_value convert(const feature&, rapidjson_allocator&); + +template <> +rapidjson_value convert(const feature_collection&, rapidjson_allocator&); + +struct to_type { +public: + const char * operator()(const empty&) { + return "Empty"; + } + + const char * operator()(const point&) { + return "Point"; + } + + const char * operator()(const line_string&) { + return "LineString"; + } + + const char * operator()(const polygon&) { + return "Polygon"; + } + + const char * operator()(const multi_point&) { + return "MultiPoint"; + } + + const char * operator()(const multi_line_string&) { + return "MultiLineString"; + } + + const char * operator()(const multi_polygon&) { + return "MultiPolygon"; + } + + const char * operator()(const geometry_collection&) { + return "GeometryCollection"; + } +}; + +struct to_coordinates_or_geometries { + rapidjson_allocator& allocator; + + // Handles line_string, polygon, multi_point, multi_line_string, multi_polygon, and geometry_collection. + template + rapidjson_value operator()(const std::vector& vector) { + rapidjson_value result; + result.SetArray(); + for (std::size_t i = 0; i < vector.size(); ++i) { + result.PushBack(operator()(vector[i]), allocator); + } + return result; + } + + rapidjson_value operator()(const point& element) { + rapidjson_value result; + result.SetArray(); + result.PushBack(element.x, allocator); + result.PushBack(element.y, allocator); + return result; + } + + rapidjson_value operator()(const geometry& element) { + return convert(element, allocator); + } +}; + +struct to_value { + rapidjson_allocator& allocator; + + rapidjson_value operator()(null_value_t) { + rapidjson_value result; + result.SetNull(); + return result; + } + + rapidjson_value operator()(bool t) { + rapidjson_value result; + result.SetBool(t); + return result; + } + + rapidjson_value operator()(int64_t t) { + rapidjson_value result; + result.SetInt64(t); + return result; + } + + rapidjson_value operator()(uint64_t t) { + rapidjson_value result; + result.SetUint64(t); + return result; + } + + rapidjson_value operator()(double t) { + rapidjson_value result; + result.SetDouble(t); + return result; + } + + rapidjson_value operator()(const std::string& t) { + rapidjson_value result; + result.SetString(t.data(), rapidjson::SizeType(t.size()), allocator); + return result; + } + + rapidjson_value operator()(const std::vector& array) { + rapidjson_value result; + result.SetArray(); + for (const auto& item : array) { + result.PushBack(value::visit(item, *this), allocator); + } + return result; + } + + rapidjson_value operator()(const std::unordered_map& map) { + rapidjson_value result; + result.SetObject(); + for (const auto& property : map) { + result.AddMember( + rapidjson::GenericStringRef { + property.first.data(), + rapidjson::SizeType(property.first.size()) + }, + value::visit(property.second, *this), + allocator); + } + return result; + } +}; + +template <> +rapidjson_value convert(const geometry& element, rapidjson_allocator& allocator) { + rapidjson_value result(rapidjson::kObjectType); + + result.AddMember( + "type", + rapidjson::GenericStringRef { geometry::visit(element, to_type()) }, + allocator); + + result.AddMember( + rapidjson::GenericStringRef { element.is() ? "geometries" : "coordinates" }, + geometry::visit(element, to_coordinates_or_geometries { allocator }), + allocator); + + return result; +} + +template <> +rapidjson_value convert(const feature& element, rapidjson_allocator& allocator) { + rapidjson_value result(rapidjson::kObjectType); + result.AddMember("type", "Feature", allocator); + + if (!element.id.is()) { + result.AddMember("id", identifier::visit(element.id, to_value { allocator }), allocator); + } + + result.AddMember("geometry", convert(element.geometry, allocator), allocator); + result.AddMember("properties", to_value { allocator }(element.properties), allocator); + + return result; +} + +template <> +rapidjson_value convert(const feature_collection& collection, rapidjson_allocator& allocator) { + rapidjson_value result(rapidjson::kObjectType); + result.AddMember("type", "FeatureCollection", allocator); + + rapidjson_value features(rapidjson::kArrayType); + for (const auto& element : collection) { + features.PushBack(convert(element, allocator), allocator); + } + result.AddMember("features", features, allocator); + + return result; +} + +rapidjson_value convert(const geojson& element, rapidjson_allocator& allocator) { + return geojson::visit(element, [&] (const auto& alternative) { + return convert(alternative, allocator); + }); +} + +template +std::string stringify(const T& t) { + rapidjson_allocator allocator; + rapidjson::StringBuffer buffer; + rapidjson::Writer writer(buffer); + convert(t, allocator).Accept(writer); + return buffer.GetString(); +} + +std::string stringify(const geojson& element) { + return geojson::visit(element, [] (const auto& alternative) { + return stringify(alternative); + }); +} + +} // namespace geojson +} // namespace mapbox diff --git a/contrib/geometry/1.0.0/include/mapbox/feature.hpp b/contrib/geometry/1.0.0/include/mapbox/feature.hpp new file mode 100644 index 0000000..8d03f46 --- /dev/null +++ b/contrib/geometry/1.0.0/include/mapbox/feature.hpp @@ -0,0 +1,113 @@ +#pragma once + +#include + +#include + +#include +#include +#include +#include + +namespace mapbox { +namespace feature { + +struct value; + +struct null_value_t +{ +}; + +constexpr bool operator==(const null_value_t&, const null_value_t&) { return true; } +constexpr bool operator!=(const null_value_t&, const null_value_t&) { return false; } +constexpr bool operator<(const null_value_t&, const null_value_t&) { return false; } + +constexpr null_value_t null_value = null_value_t(); + +// Multiple numeric types (uint64_t, int64_t, double) are present in order to support +// the widest possible range of JSON numbers, which do not have a maximum range. +// Implementations that produce `value`s should use that order for type preference, +// using uint64_t for positive integers, int64_t for negative integers, and double +// for non-integers and integers outside the range of 64 bits. +using value_base = mapbox::util::variant>, + mapbox::util::recursive_wrapper>>; + +struct value : value_base +{ + using value_base::value_base; +}; + +using property_map = std::unordered_map; + +// The same considerations and requirement for numeric types apply as for `value_base`. +using identifier = mapbox::util::variant; + +template +struct feature +{ + using coordinate_type = T; + using geometry_type = mapbox::geometry::geometry; // Fully qualified to avoid GCC -fpermissive error. + + geometry_type geometry; + property_map properties; + identifier id; + + feature() + : geometry(), + properties(), + id() {} + feature(geometry_type const& geom_) + : geometry(geom_), + properties(), + id() {} + feature(geometry_type&& geom_) + : geometry(std::move(geom_)), + properties(), + id() {} + feature(geometry_type const& geom_, property_map const& prop_) + : geometry(geom_), properties(prop_), id() {} + feature(geometry_type&& geom_, property_map&& prop_) + : geometry(std::move(geom_)), + properties(std::move(prop_)), + id() {} + feature(geometry_type const& geom_, property_map const& prop_, identifier const& id_) + : geometry(geom_), + properties(prop_), + id(id_) {} + feature(geometry_type&& geom_, property_map&& prop_, identifier&& id_) + : geometry(std::move(geom_)), + properties(std::move(prop_)), + id(std::move(id_)) {} +}; + +template +constexpr bool operator==(feature const& lhs, feature const& rhs) +{ + return lhs.id == rhs.id && lhs.geometry == rhs.geometry && lhs.properties == rhs.properties; +} + +template +constexpr bool operator!=(feature const& lhs, feature const& rhs) +{ + return !(lhs == rhs); +} + +template class Cont = std::vector> +struct feature_collection : Cont> +{ + using coordinate_type = T; + using feature_type = feature; + using container_type = Cont; + using size_type = typename container_type::size_type; + + template + feature_collection(Args&&... args) : container_type(std::forward(args)...) + { + } + feature_collection(std::initializer_list args) + : container_type(std::move(args)) {} +}; + +} // namespace feature +} // namespace mapbox diff --git a/contrib/geometry/1.0.0/include/mapbox/geometry.hpp b/contrib/geometry/1.0.0/include/mapbox/geometry.hpp new file mode 100644 index 0000000..9caecd9 --- /dev/null +++ b/contrib/geometry/1.0.0/include/mapbox/geometry.hpp @@ -0,0 +1,12 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include diff --git a/contrib/geometry/1.0.0/include/mapbox/geometry/box.hpp b/contrib/geometry/1.0.0/include/mapbox/geometry/box.hpp new file mode 100644 index 0000000..bb3ea57 --- /dev/null +++ b/contrib/geometry/1.0.0/include/mapbox/geometry/box.hpp @@ -0,0 +1,36 @@ +#pragma once + +#include + +namespace mapbox { +namespace geometry { + +template +struct box +{ + using coordinate_type = T; + using point_type = point; + + constexpr box(point_type const& min_, point_type const& max_) + : min(min_), max(max_) + { + } + + point_type min; + point_type max; +}; + +template +constexpr bool operator==(box const& lhs, box const& rhs) +{ + return lhs.min == rhs.min && lhs.max == rhs.max; +} + +template +constexpr bool operator!=(box const& lhs, box const& rhs) +{ + return lhs.min != rhs.min || lhs.max != rhs.max; +} + +} // namespace geometry +} // namespace mapbox diff --git a/contrib/geometry/1.0.0/include/mapbox/geometry/empty.hpp b/contrib/geometry/1.0.0/include/mapbox/geometry/empty.hpp new file mode 100644 index 0000000..0ea446d --- /dev/null +++ b/contrib/geometry/1.0.0/include/mapbox/geometry/empty.hpp @@ -0,0 +1,18 @@ +#pragma once + +namespace mapbox { +namespace geometry { + +struct empty +{ +}; // this Geometry type represents the empty point set, ∅, for the coordinate space (OGC Simple Features). + +constexpr bool operator==(empty, empty) { return true; } +constexpr bool operator!=(empty, empty) { return false; } +constexpr bool operator<(empty, empty) { return false; } +constexpr bool operator>(empty, empty) { return false; } +constexpr bool operator<=(empty, empty) { return true; } +constexpr bool operator>=(empty, empty) { return true; } + +} // namespace geometry +} // namespace mapbox diff --git a/contrib/geometry/1.0.0/include/mapbox/geometry/envelope.hpp b/contrib/geometry/1.0.0/include/mapbox/geometry/envelope.hpp new file mode 100644 index 0000000..e1f722f --- /dev/null +++ b/contrib/geometry/1.0.0/include/mapbox/geometry/envelope.hpp @@ -0,0 +1,33 @@ +#pragma once + +#include +#include + +#include + +namespace mapbox { +namespace geometry { + +template +box envelope(G const& geometry) +{ + using limits = std::numeric_limits; + + T min_t = limits::has_infinity ? -limits::infinity() : limits::min(); + T max_t = limits::has_infinity ? limits::infinity() : limits::max(); + + point min(max_t, max_t); + point max(min_t, min_t); + + for_each_point(geometry, [&](point const& point) { + if (min.x > point.x) min.x = point.x; + if (min.y > point.y) min.y = point.y; + if (max.x < point.x) max.x = point.x; + if (max.y < point.y) max.y = point.y; + }); + + return box(min, max); +} + +} // namespace geometry +} // namespace mapbox diff --git a/contrib/geometry/1.0.0/include/mapbox/geometry/for_each_point.hpp b/contrib/geometry/1.0.0/include/mapbox/geometry/for_each_point.hpp new file mode 100644 index 0000000..d31b484 --- /dev/null +++ b/contrib/geometry/1.0.0/include/mapbox/geometry/for_each_point.hpp @@ -0,0 +1,51 @@ +#pragma once + +#include + +namespace mapbox { +namespace geometry { + +template +void for_each_point(mapbox::geometry::empty const&, F&&) +{ +} + +template +auto for_each_point(Point&& point, F&& f) + -> decltype(point.x, point.y, void()) +{ + f(std::forward(point)); +} + +template +auto for_each_point(Container&& container, F&& f) + -> decltype(container.begin(), container.end(), void()); + +template +void for_each_point(mapbox::util::variant const& geom, F&& f) +{ + mapbox::util::variant::visit(geom, [&](auto const& g) { + for_each_point(g, f); + }); +} + +template +void for_each_point(mapbox::util::variant& geom, F&& f) +{ + mapbox::util::variant::visit(geom, [&](auto& g) { + for_each_point(g, f); + }); +} + +template +auto for_each_point(Container&& container, F&& f) + -> decltype(container.begin(), container.end(), void()) +{ + for (auto& e : container) + { + for_each_point(e, f); + } +} + +} // namespace geometry +} // namespace mapbox diff --git a/contrib/geometry/1.0.0/include/mapbox/geometry/geometry.hpp b/contrib/geometry/1.0.0/include/mapbox/geometry/geometry.hpp new file mode 100644 index 0000000..fa1e7c3 --- /dev/null +++ b/contrib/geometry/1.0.0/include/mapbox/geometry/geometry.hpp @@ -0,0 +1,56 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include + +// stl +#include + +namespace mapbox { +namespace geometry { + +template class Cont = std::vector> +struct geometry_collection; + +template class Cont = std::vector> +using geometry_base = mapbox::util::variant, + line_string, + polygon, + multi_point, + multi_line_string, + multi_polygon, + geometry_collection>; + +template class Cont = std::vector> +struct geometry : geometry_base +{ + using coordinate_type = T; + using geometry_base::geometry_base; +}; + +template class Cont> +struct geometry_collection : Cont> +{ + using coordinate_type = T; + using geometry_type = geometry; + using container_type = Cont; + using size_type = typename container_type::size_type; + + template + geometry_collection(Args&&... args) : container_type(std::forward(args)...) + { + } + geometry_collection(std::initializer_list args) + : container_type(std::move(args)) {} +}; + +} // namespace geometry +} // namespace mapbox diff --git a/contrib/geometry/1.0.0/include/mapbox/geometry/line_string.hpp b/contrib/geometry/1.0.0/include/mapbox/geometry/line_string.hpp new file mode 100644 index 0000000..a015f71 --- /dev/null +++ b/contrib/geometry/1.0.0/include/mapbox/geometry/line_string.hpp @@ -0,0 +1,28 @@ +#pragma once + +// mapbox +#include +// stl +#include + +namespace mapbox { +namespace geometry { + +template class Cont = std::vector> +struct line_string : Cont> +{ + using coordinate_type = T; + using point_type = point; + using container_type = Cont; + using size_type = typename container_type::size_type; + + template + line_string(Args&&... args) : container_type(std::forward(args)...) + { + } + line_string(std::initializer_list args) + : container_type(std::move(args)) {} +}; + +} // namespace geometry +} // namespace mapbox diff --git a/contrib/geometry/1.0.0/include/mapbox/geometry/multi_line_string.hpp b/contrib/geometry/1.0.0/include/mapbox/geometry/multi_line_string.hpp new file mode 100644 index 0000000..b528fa6 --- /dev/null +++ b/contrib/geometry/1.0.0/include/mapbox/geometry/multi_line_string.hpp @@ -0,0 +1,28 @@ +#pragma once + +// mapbox +#include +// stl +#include + +namespace mapbox { +namespace geometry { + +template class Cont = std::vector> +struct multi_line_string : Cont> +{ + using coordinate_type = T; + using line_string_type = line_string; + using container_type = Cont; + using size_type = typename container_type::size_type; + + template + multi_line_string(Args&&... args) : container_type(std::forward(args)...) + { + } + multi_line_string(std::initializer_list args) + : container_type(std::move(args)) {} +}; + +} // namespace geometry +} // namespace mapbox diff --git a/contrib/geometry/1.0.0/include/mapbox/geometry/multi_point.hpp b/contrib/geometry/1.0.0/include/mapbox/geometry/multi_point.hpp new file mode 100644 index 0000000..060927b --- /dev/null +++ b/contrib/geometry/1.0.0/include/mapbox/geometry/multi_point.hpp @@ -0,0 +1,28 @@ +#pragma once + +// mapbox +#include +// stl +#include + +namespace mapbox { +namespace geometry { + +template class Cont = std::vector> +struct multi_point : Cont> +{ + using coordinate_type = T; + using point_type = point; + using container_type = Cont; + using size_type = typename container_type::size_type; + + template + multi_point(Args&&... args) : container_type(std::forward(args)...) + { + } + multi_point(std::initializer_list args) + : container_type(std::move(args)) {} +}; + +} // namespace geometry +} // namespace mapbox diff --git a/contrib/geometry/1.0.0/include/mapbox/geometry/multi_polygon.hpp b/contrib/geometry/1.0.0/include/mapbox/geometry/multi_polygon.hpp new file mode 100644 index 0000000..9546860 --- /dev/null +++ b/contrib/geometry/1.0.0/include/mapbox/geometry/multi_polygon.hpp @@ -0,0 +1,28 @@ +#pragma once + +// mapbox +#include +// stl +#include + +namespace mapbox { +namespace geometry { + +template class Cont = std::vector> +struct multi_polygon : Cont> +{ + using coordinate_type = T; + using polygon_type = polygon; + using container_type = Cont; + using size_type = typename container_type::size_type; + + template + multi_polygon(Args&&... args) : container_type(std::forward(args)...) + { + } + multi_polygon(std::initializer_list args) + : container_type(std::move(args)) {} +}; + +} // namespace geometry +} // namespace mapbox diff --git a/contrib/geometry/1.0.0/include/mapbox/geometry/point.hpp b/contrib/geometry/1.0.0/include/mapbox/geometry/point.hpp new file mode 100644 index 0000000..da8d677 --- /dev/null +++ b/contrib/geometry/1.0.0/include/mapbox/geometry/point.hpp @@ -0,0 +1,42 @@ +#pragma once + +namespace mapbox { +namespace geometry { + +template +struct point +{ + using coordinate_type = T; + + constexpr point() + : x(), y() + { + } + constexpr point(T x_, T y_) + : x(x_), y(y_) + { + } + + T x; + T y; +}; + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wfloat-equal" + +template +constexpr bool operator==(point const& lhs, point const& rhs) +{ + return lhs.x == rhs.x && lhs.y == rhs.y; +} + +#pragma GCC diagnostic pop + +template +constexpr bool operator!=(point const& lhs, point const& rhs) +{ + return !(lhs == rhs); +} + +} // namespace geometry +} // namespace mapbox diff --git a/contrib/geometry/1.0.0/include/mapbox/geometry/point_arithmetic.hpp b/contrib/geometry/1.0.0/include/mapbox/geometry/point_arithmetic.hpp new file mode 100644 index 0000000..0c4c632 --- /dev/null +++ b/contrib/geometry/1.0.0/include/mapbox/geometry/point_arithmetic.hpp @@ -0,0 +1,119 @@ +#pragma once + +namespace mapbox { +namespace geometry { + +template +point operator+(point const& lhs, point const& rhs) +{ + return point(lhs.x + rhs.x, lhs.y + rhs.y); +} + +template +point operator+(point const& lhs, T const& rhs) +{ + return point(lhs.x + rhs, lhs.y + rhs); +} + +template +point operator-(point const& lhs, point const& rhs) +{ + return point(lhs.x - rhs.x, lhs.y - rhs.y); +} + +template +point operator-(point const& lhs, T const& rhs) +{ + return point(lhs.x - rhs, lhs.y - rhs); +} + +template +point operator*(point const& lhs, point const& rhs) +{ + return point(lhs.x * rhs.x, lhs.y * rhs.y); +} + +template +point operator*(point const& lhs, T const& rhs) +{ + return point(lhs.x * rhs, lhs.y * rhs); +} + +template +point operator/(point const& lhs, point const& rhs) +{ + return point(lhs.x / rhs.x, lhs.y / rhs.y); +} + +template +point operator/(point const& lhs, T const& rhs) +{ + return point(lhs.x / rhs, lhs.y / rhs); +} + +template +point& operator+=(point& lhs, point const& rhs) +{ + lhs.x += rhs.x; + lhs.y += rhs.y; + return lhs; +} + +template +point& operator+=(point& lhs, T const& rhs) +{ + lhs.x += rhs; + lhs.y += rhs; + return lhs; +} + +template +point& operator-=(point& lhs, point const& rhs) +{ + lhs.x -= rhs.x; + lhs.y -= rhs.y; + return lhs; +} + +template +point& operator-=(point& lhs, T const& rhs) +{ + lhs.x -= rhs; + lhs.y -= rhs; + return lhs; +} + +template +point& operator*=(point& lhs, point const& rhs) +{ + lhs.x *= rhs.x; + lhs.y *= rhs.y; + return lhs; +} + +template +point& operator*=(point& lhs, T const& rhs) +{ + lhs.x *= rhs; + lhs.y *= rhs; + return lhs; +} + +template +point& operator/=(point& lhs, point const& rhs) +{ + lhs.x /= rhs.x; + lhs.y /= rhs.y; + return lhs; +} + +template +point& operator/=(point& lhs, T const& rhs) +{ + lhs.x /= rhs; + lhs.y /= rhs; + return lhs; +} + +} // namespace geometry +} // namespace mapbox diff --git a/contrib/geometry/1.0.0/include/mapbox/geometry/polygon.hpp b/contrib/geometry/1.0.0/include/mapbox/geometry/polygon.hpp new file mode 100644 index 0000000..5cad7bb --- /dev/null +++ b/contrib/geometry/1.0.0/include/mapbox/geometry/polygon.hpp @@ -0,0 +1,45 @@ +#pragma once + +// mapbox +#include + +// stl +#include + +namespace mapbox { +namespace geometry { + +template class Cont = std::vector> +struct linear_ring : Cont> +{ + using coordinate_type = T; + using point_type = point; + using container_type = Cont; + using size_type = typename container_type::size_type; + + template + linear_ring(Args&&... args) : container_type(std::forward(args)...) + { + } + linear_ring(std::initializer_list args) + : container_type(std::move(args)) {} +}; + +template class Cont = std::vector> +struct polygon : Cont> +{ + using coordinate_type = T; + using linear_ring_type = linear_ring; + using container_type = Cont; + using size_type = typename container_type::size_type; + + template + polygon(Args&&... args) : container_type(std::forward(args)...) + { + } + polygon(std::initializer_list args) + : container_type(std::move(args)) {} +}; + +} // namespace geometry +} // namespace mapbox diff --git a/contrib/geometry/1.0.0/include/mapbox/geometry_io.hpp b/contrib/geometry/1.0.0/include/mapbox/geometry_io.hpp new file mode 100644 index 0000000..4d91ccc --- /dev/null +++ b/contrib/geometry/1.0.0/include/mapbox/geometry_io.hpp @@ -0,0 +1,98 @@ +#pragma once + +#include +#include + +#include +#include + +namespace mapbox { +namespace geometry { + +std::ostream& operator<<(std::ostream& os, const empty&) +{ + return os << "[]"; +} + +template +std::ostream& operator<<(std::ostream& os, const point& point) +{ + return os << "[" << point.x << "," << point.y << "]"; +} + +template class C, class... Args> +std::ostream& operator<<(std::ostream& os, const C& cont) +{ + os << "["; + for (auto it = cont.cbegin();;) + { + os << *it; + if (++it == cont.cend()) + { + break; + } + os << ","; + } + return os << "]"; +} + +template +std::ostream& operator<<(std::ostream& os, const line_string& geom) +{ + return os << static_cast::container_type>(geom); +} + +template +std::ostream& operator<<(std::ostream& os, const linear_ring& geom) +{ + return os << static_cast::container_type>(geom); +} + +template +std::ostream& operator<<(std::ostream& os, const polygon& geom) +{ + return os << static_cast::container_type>(geom); +} + +template +std::ostream& operator<<(std::ostream& os, const multi_point& geom) +{ + return os << static_cast::container_type>(geom); +} + +template +std::ostream& operator<<(std::ostream& os, const multi_line_string& geom) +{ + return os << static_cast::container_type>(geom); +} + +template +std::ostream& operator<<(std::ostream& os, const multi_polygon& geom) +{ + return os << static_cast::container_type>(geom); +} + +template +std::ostream& operator<<(std::ostream& os, const geometry& geom) +{ + geometry::visit(geom, [&](const auto& g) { os << g; }); + return os; +} + +template +std::ostream& operator<<(std::ostream& os, const geometry_collection& geom) +{ + return os << static_cast::container_type>(geom); +} + +} // namespace geometry + +namespace feature { + +std::ostream& operator<<(std::ostream& os, const null_value_t&) +{ + return os << "[]"; +} + +} // namespace feature +} // namespace mapbox diff --git a/contrib/kdbush/0.1.3/LICENSE b/contrib/kdbush/0.1.3/LICENSE new file mode 100644 index 0000000..3c8f67e --- /dev/null +++ b/contrib/kdbush/0.1.3/LICENSE @@ -0,0 +1,13 @@ +Copyright (c) 2016, Vladimir Agafonkin + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/contrib/kdbush/0.1.3/README.md b/contrib/kdbush/0.1.3/README.md new file mode 100644 index 0000000..ea0f639 --- /dev/null +++ b/contrib/kdbush/0.1.3/README.md @@ -0,0 +1,3 @@ +A C++11 port of [kdbush](https://github.com/mourner/kdbush), a fast static spatial index for 2D points. + +[![Build Status](https://travis-ci.org/mourner/kdbush.hpp.svg?branch=master)](https://travis-ci.org/mourner/kdbush.hpp) diff --git a/contrib/kdbush/0.1.3/include/kdbush.hpp b/contrib/kdbush/0.1.3/include/kdbush.hpp new file mode 100644 index 0000000..99bf120 --- /dev/null +++ b/contrib/kdbush/0.1.3/include/kdbush.hpp @@ -0,0 +1,220 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace kdbush { + +template +struct nth { + inline static typename std::tuple_element::type get(const T &t) { + return std::get(t); + } +}; + +template +class KDBush { + +public: + using TNumber = decltype(nth<0, TPoint>::get(std::declval())); + static_assert( + std::is_same::get(std::declval()))>::value, + "point component types must be identical"); + + static const std::uint8_t defaultNodeSize = 64; + + KDBush(const std::uint8_t nodeSize_ = defaultNodeSize) : nodeSize(nodeSize_) { + } + + KDBush(const std::vector &points_, const std::uint8_t nodeSize_ = defaultNodeSize) + : KDBush(std::begin(points_), std::end(points_), nodeSize_) { + } + + template + KDBush(const TPointIter &points_begin, + const TPointIter &points_end, + const std::uint8_t nodeSize_ = defaultNodeSize) + : nodeSize(nodeSize_) { + fill(points_begin, points_end); + } + + void fill(const std::vector &points_) { + fill(std::begin(points_), std::end(points_)); + } + + template + void fill(const TPointIter &points_begin, const TPointIter &points_end) { + assert(points.empty()); + const TIndex size = static_cast(std::distance(points_begin, points_end)); + + if (size == 0) return; + + points.reserve(size); + ids.reserve(size); + + TIndex i = 0; + for (auto p = points_begin; p != points_end; p++) { + points.emplace_back(nth<0, TPoint>::get(*p), nth<1, TPoint>::get(*p)); + ids.push_back(i++); + } + + sortKD(0, size - 1, 0); + } + + template + void range(const TNumber minX, + const TNumber minY, + const TNumber maxX, + const TNumber maxY, + const TVisitor &visitor) const { + range(minX, minY, maxX, maxY, visitor, 0, static_cast(ids.size() - 1), 0); + } + + template + void within(const TNumber qx, const TNumber qy, const TNumber r, const TVisitor &visitor) const { + within(qx, qy, r, visitor, 0, static_cast(ids.size() - 1), 0); + } + +protected: + std::vector ids; + std::vector> points; + +private: + const std::uint8_t nodeSize; + + template + void range(const TNumber minX, + const TNumber minY, + const TNumber maxX, + const TNumber maxY, + const TVisitor &visitor, + const TIndex left, + const TIndex right, + const std::uint8_t axis) const { + + if (points.empty()) return; + + if (right - left <= nodeSize) { + for (auto i = left; i <= right; i++) { + const TNumber x = std::get<0>(points[i]); + const TNumber y = std::get<1>(points[i]); + if (x >= minX && x <= maxX && y >= minY && y <= maxY) visitor(ids[i]); + } + return; + } + + const TIndex m = (left + right) >> 1; + const TNumber x = std::get<0>(points[m]); + const TNumber y = std::get<1>(points[m]); + + if (x >= minX && x <= maxX && y >= minY && y <= maxY) visitor(ids[m]); + + if (axis == 0 ? minX <= x : minY <= y) + range(minX, minY, maxX, maxY, visitor, left, m - 1, (axis + 1) % 2); + + if (axis == 0 ? maxX >= x : maxY >= y) + range(minX, minY, maxX, maxY, visitor, m + 1, right, (axis + 1) % 2); + } + + template + void within(const TNumber qx, + const TNumber qy, + const TNumber r, + const TVisitor &visitor, + const TIndex left, + const TIndex right, + const std::uint8_t axis) const { + + if (points.empty()) return; + + const TNumber r2 = r * r; + + if (right - left <= nodeSize) { + for (auto i = left; i <= right; i++) { + const TNumber x = std::get<0>(points[i]); + const TNumber y = std::get<1>(points[i]); + if (sqDist(x, y, qx, qy) <= r2) visitor(ids[i]); + } + return; + } + + const TIndex m = (left + right) >> 1; + const TNumber x = std::get<0>(points[m]); + const TNumber y = std::get<1>(points[m]); + + if (sqDist(x, y, qx, qy) <= r2) visitor(ids[m]); + + if (axis == 0 ? qx - r <= x : qy - r <= y) + within(qx, qy, r, visitor, left, m - 1, (axis + 1) % 2); + + if (axis == 0 ? qx + r >= x : qy + r >= y) + within(qx, qy, r, visitor, m + 1, right, (axis + 1) % 2); + } + + void sortKD(const TIndex left, const TIndex right, const std::uint8_t axis) { + if (right - left <= nodeSize) return; + const TIndex m = (left + right) >> 1; + if (axis == 0) { + select<0>(m, left, right); + } else { + select<1>(m, left, right); + } + sortKD(left, m - 1, (axis + 1) % 2); + sortKD(m + 1, right, (axis + 1) % 2); + } + + template + void select(const TIndex k, TIndex left, TIndex right) { + + while (right > left) { + if (right - left > 600) { + const double n = right - left + 1; + const double m = k - left + 1; + const double z = std::log(n); + const double s = 0.5 * std::exp(2 * z / 3); + const double r = + k - m * s / n + 0.5 * std::sqrt(z * s * (1 - s / n)) * (2 * m < n ? -1 : 1); + select(k, std::max(left, TIndex(r)), std::min(right, TIndex(r + s))); + } + + const TNumber t = std::get(points[k]); + TIndex i = left; + TIndex j = right; + + swapItem(left, k); + if (std::get(points[right]) > t) swapItem(left, right); + + while (i < j) { + swapItem(i++, j--); + while (std::get(points[i]) < t) i++; + while (std::get(points[j]) > t) j--; + } + + if (std::get(points[left]) == t) + swapItem(left, j); + else { + swapItem(++j, right); + } + + if (j <= k) left = j + 1; + if (k <= j) right = j - 1; + } + } + + void swapItem(const TIndex i, const TIndex j) { + std::iter_swap(ids.begin() + static_cast(i), ids.begin() + static_cast(j)); + std::iter_swap(points.begin() + static_cast(i), points.begin() + static_cast(j)); + } + + TNumber sqDist(const TNumber ax, const TNumber ay, const TNumber bx, const TNumber by) const { + auto dx = ax - bx; + auto dy = ay - by; + return dx * dx + dy * dy; + } +}; + +} // namespace kdbush diff --git a/contrib/protozero/1.7.0/LICENSE.md b/contrib/protozero/1.7.0/LICENSE.md new file mode 100644 index 0000000..d0b3011 --- /dev/null +++ b/contrib/protozero/1.7.0/LICENSE.md @@ -0,0 +1,24 @@ +protozero copyright (c) Mapbox. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/contrib/protozero/1.7.0/include/protozero/basic_pbf_builder.hpp b/contrib/protozero/1.7.0/include/protozero/basic_pbf_builder.hpp new file mode 100644 index 0000000..0ede726 --- /dev/null +++ b/contrib/protozero/1.7.0/include/protozero/basic_pbf_builder.hpp @@ -0,0 +1,266 @@ +#ifndef PROTOZERO_BASIC_PBF_BUILDER_HPP +#define PROTOZERO_BASIC_PBF_BUILDER_HPP + +/***************************************************************************** + +protozero - Minimalistic protocol buffer decoder and encoder in C++. + +This file is from https://github.com/mapbox/protozero where you can find more +documentation. + +*****************************************************************************/ + +/** + * @file basic_pbf_builder.hpp + * + * @brief Contains the basic_pbf_builder template class. + */ + +#include "basic_pbf_writer.hpp" +#include "types.hpp" + +#include + +namespace protozero { + +/** + * The basic_pbf_builder is used to write PBF formatted messages into a buffer. + * It is based on the basic_pbf_writer class and has all the same methods. The + * difference is that while the pbf_writer class takes an integer tag, + * this template class takes a tag of the template type T. The idea is that + * T will be an enumeration value and this helps reduce the possibility of + * programming errors. + * + * Almost all methods in this class can throw an std::bad_alloc exception if + * the underlying buffer class wants to resize. + * + * Read the tutorial to understand how this class is used. In most cases you + * want to use the pbf_builder class which uses a std::string as buffer type. + */ +template +class basic_pbf_builder : public basic_pbf_writer { + + static_assert(std::is_same::type>::value, + "T must be enum with underlying type protozero::pbf_tag_type"); + +public: + + /// The type of messages this class will build. + using enum_type = T; + + basic_pbf_builder() = default; + + /** + * Create a builder using the given string as a data store. The object + * stores a reference to that string and adds all data to it. The string + * doesn't have to be empty. The pbf_message object will just append data. + */ + explicit basic_pbf_builder(TBuffer& data) noexcept : + basic_pbf_writer{data} { + } + + /** + * Construct a pbf_builder for a submessage from the pbf_message or + * pbf_writer of the parent message. + * + * @param parent_writer The parent pbf_message or pbf_writer + * @param tag Tag of the field that will be written + */ + template + basic_pbf_builder(basic_pbf_writer& parent_writer, P tag) noexcept : + basic_pbf_writer{parent_writer, pbf_tag_type(tag)} { + } + +/// @cond INTERNAL +#define PROTOZERO_WRITER_WRAP_ADD_SCALAR(name, type) \ + void add_##name(T tag, type value) { \ + basic_pbf_writer::add_##name(pbf_tag_type(tag), value); \ + } + + PROTOZERO_WRITER_WRAP_ADD_SCALAR(bool, bool) + PROTOZERO_WRITER_WRAP_ADD_SCALAR(enum, int32_t) + PROTOZERO_WRITER_WRAP_ADD_SCALAR(int32, int32_t) + PROTOZERO_WRITER_WRAP_ADD_SCALAR(sint32, int32_t) + PROTOZERO_WRITER_WRAP_ADD_SCALAR(uint32, uint32_t) + PROTOZERO_WRITER_WRAP_ADD_SCALAR(int64, int64_t) + PROTOZERO_WRITER_WRAP_ADD_SCALAR(sint64, int64_t) + PROTOZERO_WRITER_WRAP_ADD_SCALAR(uint64, uint64_t) + PROTOZERO_WRITER_WRAP_ADD_SCALAR(fixed32, uint32_t) + PROTOZERO_WRITER_WRAP_ADD_SCALAR(sfixed32, int32_t) + PROTOZERO_WRITER_WRAP_ADD_SCALAR(fixed64, uint64_t) + PROTOZERO_WRITER_WRAP_ADD_SCALAR(sfixed64, int64_t) + PROTOZERO_WRITER_WRAP_ADD_SCALAR(float, float) + PROTOZERO_WRITER_WRAP_ADD_SCALAR(double, double) + +#undef PROTOZERO_WRITER_WRAP_ADD_SCALAR +/// @endcond + + /** + * Add "bytes" field to data. + * + * @param tag Tag of the field + * @param value Pointer to value to be written + * @param size Number of bytes to be written + */ + void add_bytes(T tag, const char* value, std::size_t size) { + basic_pbf_writer::add_bytes(pbf_tag_type(tag), value, size); + } + + /** + * Add "bytes" field to data. + * + * @param tag Tag of the field + * @param value Value to be written + */ + void add_bytes(T tag, const data_view& value) { + basic_pbf_writer::add_bytes(pbf_tag_type(tag), value); + } + + /** + * Add "bytes" field to data. + * + * @param tag Tag of the field + * @param value Value to be written + */ + void add_bytes(T tag, const std::string& value) { + basic_pbf_writer::add_bytes(pbf_tag_type(tag), value); + } + + /** + * Add "bytes" field to data. Bytes from the value are written until + * a null byte is encountered. The null byte is not added. + * + * @param tag Tag of the field + * @param value Pointer to zero-delimited value to be written + */ + void add_bytes(T tag, const char* value) { + basic_pbf_writer::add_bytes(pbf_tag_type(tag), value); + } + + /** + * Add "bytes" field to data using vectored input. All the data in the + * 2nd and further arguments is "concatenated" with only a single copy + * into the final buffer. + * + * This will work with objects of any type supporting the data() and + * size() methods like std::string or protozero::data_view. + * + * Example: + * @code + * std::string data1 = "abc"; + * std::string data2 = "xyz"; + * builder.add_bytes_vectored(1, data1, data2); + * @endcode + * + * @tparam Ts List of types supporting data() and size() methods. + * @param tag Tag of the field + * @param values List of objects of types Ts with data to be appended. + */ + template + void add_bytes_vectored(T tag, Ts&&... values) { + basic_pbf_writer::add_bytes_vectored(pbf_tag_type(tag), std::forward(values)...); + } + + /** + * Add "string" field to data. + * + * @param tag Tag of the field + * @param value Pointer to value to be written + * @param size Number of bytes to be written + */ + void add_string(T tag, const char* value, std::size_t size) { + basic_pbf_writer::add_string(pbf_tag_type(tag), value, size); + } + + /** + * Add "string" field to data. + * + * @param tag Tag of the field + * @param value Value to be written + */ + void add_string(T tag, const data_view& value) { + basic_pbf_writer::add_string(pbf_tag_type(tag), value); + } + + /** + * Add "string" field to data. + * + * @param tag Tag of the field + * @param value Value to be written + */ + void add_string(T tag, const std::string& value) { + basic_pbf_writer::add_string(pbf_tag_type(tag), value); + } + + /** + * Add "string" field to data. Bytes from the value are written until + * a null byte is encountered. The null byte is not added. + * + * @param tag Tag of the field + * @param value Pointer to value to be written + */ + void add_string(T tag, const char* value) { + basic_pbf_writer::add_string(pbf_tag_type(tag), value); + } + + /** + * Add "message" field to data. + * + * @param tag Tag of the field + * @param value Pointer to message to be written + * @param size Length of the message + */ + void add_message(T tag, const char* value, std::size_t size) { + basic_pbf_writer::add_message(pbf_tag_type(tag), value, size); + } + + /** + * Add "message" field to data. + * + * @param tag Tag of the field + * @param value Value to be written. The value must be a complete message. + */ + void add_message(T tag, const data_view& value) { + basic_pbf_writer::add_message(pbf_tag_type(tag), value); + } + + /** + * Add "message" field to data. + * + * @param tag Tag of the field + * @param value Value to be written. The value must be a complete message. + */ + void add_message(T tag, const std::string& value) { + basic_pbf_writer::add_message(pbf_tag_type(tag), value); + } + +/// @cond INTERNAL +#define PROTOZERO_WRITER_WRAP_ADD_PACKED(name) \ + template \ + void add_packed_##name(T tag, InputIterator first, InputIterator last) { \ + basic_pbf_writer::add_packed_##name(pbf_tag_type(tag), first, last); \ + } + + PROTOZERO_WRITER_WRAP_ADD_PACKED(bool) + PROTOZERO_WRITER_WRAP_ADD_PACKED(enum) + PROTOZERO_WRITER_WRAP_ADD_PACKED(int32) + PROTOZERO_WRITER_WRAP_ADD_PACKED(sint32) + PROTOZERO_WRITER_WRAP_ADD_PACKED(uint32) + PROTOZERO_WRITER_WRAP_ADD_PACKED(int64) + PROTOZERO_WRITER_WRAP_ADD_PACKED(sint64) + PROTOZERO_WRITER_WRAP_ADD_PACKED(uint64) + PROTOZERO_WRITER_WRAP_ADD_PACKED(fixed32) + PROTOZERO_WRITER_WRAP_ADD_PACKED(sfixed32) + PROTOZERO_WRITER_WRAP_ADD_PACKED(fixed64) + PROTOZERO_WRITER_WRAP_ADD_PACKED(sfixed64) + PROTOZERO_WRITER_WRAP_ADD_PACKED(float) + PROTOZERO_WRITER_WRAP_ADD_PACKED(double) + +#undef PROTOZERO_WRITER_WRAP_ADD_PACKED +/// @endcond + +}; // class basic_pbf_builder + +} // end namespace protozero + +#endif // PROTOZERO_BASIC_PBF_BUILDER_HPP diff --git a/contrib/protozero/1.7.0/include/protozero/basic_pbf_writer.hpp b/contrib/protozero/1.7.0/include/protozero/basic_pbf_writer.hpp new file mode 100644 index 0000000..f167c4d --- /dev/null +++ b/contrib/protozero/1.7.0/include/protozero/basic_pbf_writer.hpp @@ -0,0 +1,1054 @@ +#ifndef PROTOZERO_BASIC_PBF_WRITER_HPP +#define PROTOZERO_BASIC_PBF_WRITER_HPP + +/***************************************************************************** + +protozero - Minimalistic protocol buffer decoder and encoder in C++. + +This file is from https://github.com/mapbox/protozero where you can find more +documentation. + +*****************************************************************************/ + +/** + * @file basic_pbf_writer.hpp + * + * @brief Contains the basic_pbf_writer template class. + */ + +#include "buffer_tmpl.hpp" +#include "config.hpp" +#include "data_view.hpp" +#include "types.hpp" +#include "varint.hpp" + +#if PROTOZERO_BYTE_ORDER != PROTOZERO_LITTLE_ENDIAN +# include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace protozero { + +namespace detail { + + template class packed_field_varint; + template class packed_field_svarint; + template class packed_field_fixed; + +} // end namespace detail + +/** + * The basic_pbf_writer is used to write PBF formatted messages into a buffer. + * + * This uses TBuffer as the type for the underlaying buffer. In typical uses + * this is std::string, but you can use a different type that must support + * the right interface. Please see the documentation for details. + * + * Almost all methods in this class can throw an std::bad_alloc exception if + * the underlying buffer class wants to resize. + */ +template +class basic_pbf_writer { + + // A pointer to a buffer holding the data already written to the PBF + // message. For default constructed writers or writers that have been + // rolled back, this is a nullptr. + TBuffer* m_data = nullptr; + + // A pointer to a parent writer object if this is a submessage. If this + // is a top-level writer, it is a nullptr. + basic_pbf_writer* m_parent_writer = nullptr; + + // This is usually 0. If there is an open submessage, this is set in the + // parent to the rollback position, ie. the last position before the + // submessage was started. This is the position where the header of the + // submessage starts. + std::size_t m_rollback_pos = 0; + + // This is usually 0. If there is an open submessage, this is set in the + // parent to the position where the data of the submessage is written to. + std::size_t m_pos = 0; + + void add_varint(uint64_t value) { + protozero_assert(m_pos == 0 && "you can't add fields to a parent basic_pbf_writer if there is an existing basic_pbf_writer for a submessage"); + protozero_assert(m_data); + add_varint_to_buffer(m_data, value); + } + + void add_field(pbf_tag_type tag, pbf_wire_type type) { + protozero_assert(((tag > 0 && tag < 19000) || (tag > 19999 && tag <= ((1U << 29U) - 1))) && "tag out of range"); + const uint32_t b = (tag << 3U) | uint32_t(type); + add_varint(b); + } + + void add_tagged_varint(pbf_tag_type tag, uint64_t value) { + add_field(tag, pbf_wire_type::varint); + add_varint(value); + } + + template + void add_fixed(T value) { + protozero_assert(m_pos == 0 && "you can't add fields to a parent basic_pbf_writer if there is an existing basic_pbf_writer for a submessage"); + protozero_assert(m_data); +#if PROTOZERO_BYTE_ORDER != PROTOZERO_LITTLE_ENDIAN + byteswap_inplace(&value); +#endif + buffer_customization::append(m_data, reinterpret_cast(&value), sizeof(T)); + } + + template + void add_packed_fixed(pbf_tag_type tag, It first, It last, std::input_iterator_tag /*unused*/) { + if (first == last) { + return; + } + + basic_pbf_writer sw{*this, tag}; + + while (first != last) { + sw.add_fixed(*first++); + } + } + + template + void add_packed_fixed(pbf_tag_type tag, It first, It last, std::forward_iterator_tag /*unused*/) { + if (first == last) { + return; + } + + const auto length = std::distance(first, last); + add_length_varint(tag, sizeof(T) * pbf_length_type(length)); + reserve(sizeof(T) * std::size_t(length)); + + while (first != last) { + add_fixed(*first++); + } + } + + template + void add_packed_varint(pbf_tag_type tag, It first, It last) { + if (first == last) { + return; + } + + basic_pbf_writer sw{*this, tag}; + + while (first != last) { + sw.add_varint(uint64_t(*first++)); + } + } + + template + void add_packed_svarint(pbf_tag_type tag, It first, It last) { + if (first == last) { + return; + } + + basic_pbf_writer sw{*this, tag}; + + while (first != last) { + sw.add_varint(encode_zigzag64(*first++)); + } + } + + // The number of bytes to reserve for the varint holding the length of + // a length-delimited field. The length has to fit into pbf_length_type, + // and a varint needs 8 bit for every 7 bit. + enum : int { + reserve_bytes = sizeof(pbf_length_type) * 8 / 7 + 1 + }; + + // If m_rollpack_pos is set to this special value, it means that when + // the submessage is closed, nothing needs to be done, because the length + // of the submessage has already been written correctly. + enum : std::size_t { + size_is_known = std::numeric_limits::max() + }; + + void open_submessage(pbf_tag_type tag, std::size_t size) { + protozero_assert(m_pos == 0); + protozero_assert(m_data); + if (size == 0) { + m_rollback_pos = buffer_customization::size(m_data); + add_field(tag, pbf_wire_type::length_delimited); + buffer_customization::append_zeros(m_data, std::size_t(reserve_bytes)); + } else { + m_rollback_pos = size_is_known; + add_length_varint(tag, pbf_length_type(size)); + reserve(size); + } + m_pos = buffer_customization::size(m_data); + } + + void rollback_submessage() { + protozero_assert(m_pos != 0); + protozero_assert(m_rollback_pos != size_is_known); + protozero_assert(m_data); + buffer_customization::resize(m_data, m_rollback_pos); + m_pos = 0; + } + + void commit_submessage() { + protozero_assert(m_pos != 0); + protozero_assert(m_rollback_pos != size_is_known); + protozero_assert(m_data); + const auto length = pbf_length_type(buffer_customization::size(m_data) - m_pos); + + protozero_assert(buffer_customization::size(m_data) >= m_pos - reserve_bytes); + const auto n = add_varint_to_buffer(buffer_customization::at_pos(m_data, m_pos - reserve_bytes), length); + + buffer_customization::erase_range(m_data, m_pos - reserve_bytes + n, m_pos); + m_pos = 0; + } + + void close_submessage() { + protozero_assert(m_data); + if (m_pos == 0 || m_rollback_pos == size_is_known) { + return; + } + if (buffer_customization::size(m_data) - m_pos == 0) { + rollback_submessage(); + } else { + commit_submessage(); + } + } + + void add_length_varint(pbf_tag_type tag, pbf_length_type length) { + add_field(tag, pbf_wire_type::length_delimited); + add_varint(length); + } + +public: + + /** + * Create a writer using the specified buffer as a data store. The + * basic_pbf_writer stores a pointer to that buffer and adds all data to + * it. The buffer doesn't have to be empty. The basic_pbf_writer will just + * append data. + */ + explicit basic_pbf_writer(TBuffer& buffer) noexcept : + m_data{&buffer} { + } + + /** + * Create a writer without a data store. In this form the writer can not + * be used! + */ + basic_pbf_writer() noexcept = default; + + /** + * Construct a basic_pbf_writer for a submessage from the basic_pbf_writer + * of the parent message. + * + * @param parent_writer The basic_pbf_writer + * @param tag Tag (field number) of the field that will be written + * @param size Optional size of the submessage in bytes (use 0 for unknown). + * Setting this allows some optimizations but is only possible in + * a few very specific cases. + */ + basic_pbf_writer(basic_pbf_writer& parent_writer, pbf_tag_type tag, std::size_t size = 0) : + m_data{parent_writer.m_data}, + m_parent_writer{&parent_writer} { + m_parent_writer->open_submessage(tag, size); + } + + /// A basic_pbf_writer object can not be copied + basic_pbf_writer(const basic_pbf_writer&) = delete; + + /// A basic_pbf_writer object can not be copied + basic_pbf_writer& operator=(const basic_pbf_writer&) = delete; + + /** + * A basic_pbf_writer object can be moved. After this the other + * basic_pbf_writer will be invalid. + */ + basic_pbf_writer(basic_pbf_writer&& other) noexcept : + m_data{other.m_data}, + m_parent_writer{other.m_parent_writer}, + m_rollback_pos{other.m_rollback_pos}, + m_pos{other.m_pos} { + other.m_data = nullptr; + other.m_parent_writer = nullptr; + other.m_rollback_pos = 0; + other.m_pos = 0; + } + + /** + * A basic_pbf_writer object can be moved. After this the other + * basic_pbf_writer will be invalid. + */ + basic_pbf_writer& operator=(basic_pbf_writer&& other) noexcept { + m_data = other.m_data; + m_parent_writer = other.m_parent_writer; + m_rollback_pos = other.m_rollback_pos; + m_pos = other.m_pos; + other.m_data = nullptr; + other.m_parent_writer = nullptr; + other.m_rollback_pos = 0; + other.m_pos = 0; + return *this; + } + + ~basic_pbf_writer() noexcept { + try { + if (m_parent_writer != nullptr) { + m_parent_writer->close_submessage(); + } + } catch (...) { + // This try/catch is used to make the destructor formally noexcept. + // close_submessage() is not noexcept, but will not throw the way + // it is called here, so we are good. But to be paranoid, call... + std::terminate(); + } + } + + /** + * Check if this writer is valid. A writer is invalid if it was default + * constructed, moved from, or if commit() has been called on it. + * Otherwise it is valid. + */ + bool valid() const noexcept { + return m_data != nullptr; + } + + /** + * Swap the contents of this object with the other. + * + * @param other Other object to swap data with. + */ + void swap(basic_pbf_writer& other) noexcept { + using std::swap; + swap(m_data, other.m_data); + swap(m_parent_writer, other.m_parent_writer); + swap(m_rollback_pos, other.m_rollback_pos); + swap(m_pos, other.m_pos); + } + + /** + * Reserve size bytes in the underlying message store in addition to + * whatever the message store already holds. So unlike + * the `std::string::reserve()` method this is not an absolute size, + * but additional memory that should be reserved. + * + * @param size Number of bytes to reserve in underlying message store. + */ + void reserve(std::size_t size) { + protozero_assert(m_data); + buffer_customization::reserve_additional(m_data, size); + } + + /** + * Commit this submessage. This does the same as when the basic_pbf_writer + * goes out of scope and is destructed. + * + * @pre Must be a basic_pbf_writer of a submessage, ie one opened with the + * basic_pbf_writer constructor taking a parent message. + * @post The basic_pbf_writer is invalid and can't be used any more. + */ + void commit() { + protozero_assert(m_parent_writer && "you can't call commit() on a basic_pbf_writer without a parent"); + protozero_assert(m_pos == 0 && "you can't call commit() on a basic_pbf_writer that has an open nested submessage"); + m_parent_writer->close_submessage(); + m_parent_writer = nullptr; + m_data = nullptr; + } + + /** + * Cancel writing of this submessage. The complete submessage will be + * removed as if it was never created and no fields were added. + * + * @pre Must be a basic_pbf_writer of a submessage, ie one opened with the + * basic_pbf_writer constructor taking a parent message. + * @post The basic_pbf_writer is invalid and can't be used any more. + */ + void rollback() { + protozero_assert(m_parent_writer && "you can't call rollback() on a basic_pbf_writer without a parent"); + protozero_assert(m_pos == 0 && "you can't call rollback() on a basic_pbf_writer that has an open nested submessage"); + m_parent_writer->rollback_submessage(); + m_parent_writer = nullptr; + m_data = nullptr; + } + + ///@{ + /** + * @name Scalar field writer functions + */ + + /** + * Add "bool" field to data. + * + * @param tag Tag (field number) of the field + * @param value Value to be written + */ + void add_bool(pbf_tag_type tag, bool value) { + add_field(tag, pbf_wire_type::varint); + protozero_assert(m_pos == 0 && "you can't add fields to a parent basic_pbf_writer if there is an existing basic_pbf_writer for a submessage"); + protozero_assert(m_data); + m_data->push_back(char(value)); + } + + /** + * Add "enum" field to data. + * + * @param tag Tag (field number) of the field + * @param value Value to be written + */ + void add_enum(pbf_tag_type tag, int32_t value) { + add_tagged_varint(tag, uint64_t(value)); + } + + /** + * Add "int32" field to data. + * + * @param tag Tag (field number) of the field + * @param value Value to be written + */ + void add_int32(pbf_tag_type tag, int32_t value) { + add_tagged_varint(tag, uint64_t(value)); + } + + /** + * Add "sint32" field to data. + * + * @param tag Tag (field number) of the field + * @param value Value to be written + */ + void add_sint32(pbf_tag_type tag, int32_t value) { + add_tagged_varint(tag, encode_zigzag32(value)); + } + + /** + * Add "uint32" field to data. + * + * @param tag Tag (field number) of the field + * @param value Value to be written + */ + void add_uint32(pbf_tag_type tag, uint32_t value) { + add_tagged_varint(tag, value); + } + + /** + * Add "int64" field to data. + * + * @param tag Tag (field number) of the field + * @param value Value to be written + */ + void add_int64(pbf_tag_type tag, int64_t value) { + add_tagged_varint(tag, uint64_t(value)); + } + + /** + * Add "sint64" field to data. + * + * @param tag Tag (field number) of the field + * @param value Value to be written + */ + void add_sint64(pbf_tag_type tag, int64_t value) { + add_tagged_varint(tag, encode_zigzag64(value)); + } + + /** + * Add "uint64" field to data. + * + * @param tag Tag (field number) of the field + * @param value Value to be written + */ + void add_uint64(pbf_tag_type tag, uint64_t value) { + add_tagged_varint(tag, value); + } + + /** + * Add "fixed32" field to data. + * + * @param tag Tag (field number) of the field + * @param value Value to be written + */ + void add_fixed32(pbf_tag_type tag, uint32_t value) { + add_field(tag, pbf_wire_type::fixed32); + add_fixed(value); + } + + /** + * Add "sfixed32" field to data. + * + * @param tag Tag (field number) of the field + * @param value Value to be written + */ + void add_sfixed32(pbf_tag_type tag, int32_t value) { + add_field(tag, pbf_wire_type::fixed32); + add_fixed(value); + } + + /** + * Add "fixed64" field to data. + * + * @param tag Tag (field number) of the field + * @param value Value to be written + */ + void add_fixed64(pbf_tag_type tag, uint64_t value) { + add_field(tag, pbf_wire_type::fixed64); + add_fixed(value); + } + + /** + * Add "sfixed64" field to data. + * + * @param tag Tag (field number) of the field + * @param value Value to be written + */ + void add_sfixed64(pbf_tag_type tag, int64_t value) { + add_field(tag, pbf_wire_type::fixed64); + add_fixed(value); + } + + /** + * Add "float" field to data. + * + * @param tag Tag (field number) of the field + * @param value Value to be written + */ + void add_float(pbf_tag_type tag, float value) { + add_field(tag, pbf_wire_type::fixed32); + add_fixed(value); + } + + /** + * Add "double" field to data. + * + * @param tag Tag (field number) of the field + * @param value Value to be written + */ + void add_double(pbf_tag_type tag, double value) { + add_field(tag, pbf_wire_type::fixed64); + add_fixed(value); + } + + /** + * Add "bytes" field to data. + * + * @param tag Tag (field number) of the field + * @param value Pointer to value to be written + * @param size Number of bytes to be written + */ + void add_bytes(pbf_tag_type tag, const char* value, std::size_t size) { + protozero_assert(m_pos == 0 && "you can't add fields to a parent basic_pbf_writer if there is an existing basic_pbf_writer for a submessage"); + protozero_assert(m_data); + protozero_assert(size <= std::numeric_limits::max()); + add_length_varint(tag, pbf_length_type(size)); + buffer_customization::append(m_data, value, size); + } + + /** + * Add "bytes" field to data. + * + * @param tag Tag (field number) of the field + * @param value Value to be written + */ + void add_bytes(pbf_tag_type tag, const data_view& value) { + add_bytes(tag, value.data(), value.size()); + } + + /** + * Add "bytes" field to data. + * + * @param tag Tag (field number) of the field + * @param value Value to be written + */ + void add_bytes(pbf_tag_type tag, const std::string& value) { + add_bytes(tag, value.data(), value.size()); + } + + /** + * Add "bytes" field to data. Bytes from the value are written until + * a null byte is encountered. The null byte is not added. + * + * @param tag Tag (field number) of the field + * @param value Pointer to zero-delimited value to be written + */ + void add_bytes(pbf_tag_type tag, const char* value) { + add_bytes(tag, value, std::strlen(value)); + } + + /** + * Add "bytes" field to data using vectored input. All the data in the + * 2nd and further arguments is "concatenated" with only a single copy + * into the final buffer. + * + * This will work with objects of any type supporting the data() and + * size() methods like std::string or protozero::data_view. + * + * Example: + * @code + * std::string data1 = "abc"; + * std::string data2 = "xyz"; + * writer.add_bytes_vectored(1, data1, data2); + * @endcode + * + * @tparam Ts List of types supporting data() and size() methods. + * @param tag Tag (field number) of the field + * @param values List of objects of types Ts with data to be appended. + */ + template + void add_bytes_vectored(pbf_tag_type tag, Ts&&... values) { + protozero_assert(m_pos == 0 && "you can't add fields to a parent basic_pbf_writer if there is an existing basic_pbf_writer for a submessage"); + protozero_assert(m_data); + size_t sum_size = 0; + (void)std::initializer_list{sum_size += values.size()...}; + protozero_assert(sum_size <= std::numeric_limits::max()); + add_length_varint(tag, pbf_length_type(sum_size)); + buffer_customization::reserve_additional(m_data, sum_size); + (void)std::initializer_list{(buffer_customization::append(m_data, values.data(), values.size()), 0)...}; + } + + /** + * Add "string" field to data. + * + * @param tag Tag (field number) of the field + * @param value Pointer to value to be written + * @param size Number of bytes to be written + */ + void add_string(pbf_tag_type tag, const char* value, std::size_t size) { + add_bytes(tag, value, size); + } + + /** + * Add "string" field to data. + * + * @param tag Tag (field number) of the field + * @param value Value to be written + */ + void add_string(pbf_tag_type tag, const data_view& value) { + add_bytes(tag, value.data(), value.size()); + } + + /** + * Add "string" field to data. + * + * @param tag Tag (field number) of the field + * @param value Value to be written + */ + void add_string(pbf_tag_type tag, const std::string& value) { + add_bytes(tag, value.data(), value.size()); + } + + /** + * Add "string" field to data. Bytes from the value are written until + * a null byte is encountered. The null byte is not added. + * + * @param tag Tag (field number) of the field + * @param value Pointer to value to be written + */ + void add_string(pbf_tag_type tag, const char* value) { + add_bytes(tag, value, std::strlen(value)); + } + + /** + * Add "message" field to data. + * + * @param tag Tag (field number) of the field + * @param value Pointer to message to be written + * @param size Length of the message + */ + void add_message(pbf_tag_type tag, const char* value, std::size_t size) { + add_bytes(tag, value, size); + } + + /** + * Add "message" field to data. + * + * @param tag Tag (field number) of the field + * @param value Value to be written. The value must be a complete message. + */ + void add_message(pbf_tag_type tag, const data_view& value) { + add_bytes(tag, value.data(), value.size()); + } + + /** + * Add "message" field to data. + * + * @param tag Tag (field number) of the field + * @param value Value to be written. The value must be a complete message. + */ + void add_message(pbf_tag_type tag, const std::string& value) { + add_bytes(tag, value.data(), value.size()); + } + + ///@} + + ///@{ + /** + * @name Repeated packed field writer functions + */ + + /** + * Add "repeated packed bool" field to data. + * + * @tparam InputIterator A type satisfying the InputIterator concept. + * Dereferencing the iterator must yield a type assignable to bool. + * @param tag Tag (field number) of the field + * @param first Iterator pointing to the beginning of the data + * @param last Iterator pointing one past the end of data + */ + template + void add_packed_bool(pbf_tag_type tag, InputIterator first, InputIterator last) { + add_packed_varint(tag, first, last); + } + + /** + * Add "repeated packed enum" field to data. + * + * @tparam InputIterator A type satisfying the InputIterator concept. + * Dereferencing the iterator must yield a type assignable to int32_t. + * @param tag Tag (field number) of the field + * @param first Iterator pointing to the beginning of the data + * @param last Iterator pointing one past the end of data + */ + template + void add_packed_enum(pbf_tag_type tag, InputIterator first, InputIterator last) { + add_packed_varint(tag, first, last); + } + + /** + * Add "repeated packed int32" field to data. + * + * @tparam InputIterator A type satisfying the InputIterator concept. + * Dereferencing the iterator must yield a type assignable to int32_t. + * @param tag Tag (field number) of the field + * @param first Iterator pointing to the beginning of the data + * @param last Iterator pointing one past the end of data + */ + template + void add_packed_int32(pbf_tag_type tag, InputIterator first, InputIterator last) { + add_packed_varint(tag, first, last); + } + + /** + * Add "repeated packed sint32" field to data. + * + * @tparam InputIterator A type satisfying the InputIterator concept. + * Dereferencing the iterator must yield a type assignable to int32_t. + * @param tag Tag (field number) of the field + * @param first Iterator pointing to the beginning of the data + * @param last Iterator pointing one past the end of data + */ + template + void add_packed_sint32(pbf_tag_type tag, InputIterator first, InputIterator last) { + add_packed_svarint(tag, first, last); + } + + /** + * Add "repeated packed uint32" field to data. + * + * @tparam InputIterator A type satisfying the InputIterator concept. + * Dereferencing the iterator must yield a type assignable to uint32_t. + * @param tag Tag (field number) of the field + * @param first Iterator pointing to the beginning of the data + * @param last Iterator pointing one past the end of data + */ + template + void add_packed_uint32(pbf_tag_type tag, InputIterator first, InputIterator last) { + add_packed_varint(tag, first, last); + } + + /** + * Add "repeated packed int64" field to data. + * + * @tparam InputIterator A type satisfying the InputIterator concept. + * Dereferencing the iterator must yield a type assignable to int64_t. + * @param tag Tag (field number) of the field + * @param first Iterator pointing to the beginning of the data + * @param last Iterator pointing one past the end of data + */ + template + void add_packed_int64(pbf_tag_type tag, InputIterator first, InputIterator last) { + add_packed_varint(tag, first, last); + } + + /** + * Add "repeated packed sint64" field to data. + * + * @tparam InputIterator A type satisfying the InputIterator concept. + * Dereferencing the iterator must yield a type assignable to int64_t. + * @param tag Tag (field number) of the field + * @param first Iterator pointing to the beginning of the data + * @param last Iterator pointing one past the end of data + */ + template + void add_packed_sint64(pbf_tag_type tag, InputIterator first, InputIterator last) { + add_packed_svarint(tag, first, last); + } + + /** + * Add "repeated packed uint64" field to data. + * + * @tparam InputIterator A type satisfying the InputIterator concept. + * Dereferencing the iterator must yield a type assignable to uint64_t. + * @param tag Tag (field number) of the field + * @param first Iterator pointing to the beginning of the data + * @param last Iterator pointing one past the end of data + */ + template + void add_packed_uint64(pbf_tag_type tag, InputIterator first, InputIterator last) { + add_packed_varint(tag, first, last); + } + + /** + * Add a "repeated packed" fixed-size field to data. The following + * fixed-size fields are available: + * + * uint32_t -> repeated packed fixed32 + * int32_t -> repeated packed sfixed32 + * uint64_t -> repeated packed fixed64 + * int64_t -> repeated packed sfixed64 + * double -> repeated packed double + * float -> repeated packed float + * + * @tparam ValueType One of the following types: (u)int32/64_t, double, float. + * @tparam InputIterator A type satisfying the InputIterator concept. + * @param tag Tag (field number) of the field + * @param first Iterator pointing to the beginning of the data + * @param last Iterator pointing one past the end of data + */ + template + void add_packed_fixed(pbf_tag_type tag, InputIterator first, InputIterator last) { + static_assert(std::is_same::value || + std::is_same::value || + std::is_same::value || + std::is_same::value || + std::is_same::value || + std::is_same::value, "Only some types are allowed"); + add_packed_fixed(tag, first, last, + typename std::iterator_traits::iterator_category{}); + } + + /** + * Add "repeated packed fixed32" field to data. + * + * @tparam InputIterator A type satisfying the InputIterator concept. + * Dereferencing the iterator must yield a type assignable to uint32_t. + * @param tag Tag (field number) of the field + * @param first Iterator pointing to the beginning of the data + * @param last Iterator pointing one past the end of data + */ + template + void add_packed_fixed32(pbf_tag_type tag, InputIterator first, InputIterator last) { + add_packed_fixed(tag, first, last, + typename std::iterator_traits::iterator_category{}); + } + + /** + * Add "repeated packed sfixed32" field to data. + * + * @tparam InputIterator A type satisfying the InputIterator concept. + * Dereferencing the iterator must yield a type assignable to int32_t. + * @param tag Tag (field number) of the field + * @param first Iterator pointing to the beginning of the data + * @param last Iterator pointing one past the end of data + */ + template + void add_packed_sfixed32(pbf_tag_type tag, InputIterator first, InputIterator last) { + add_packed_fixed(tag, first, last, + typename std::iterator_traits::iterator_category{}); + } + + /** + * Add "repeated packed fixed64" field to data. + * + * @tparam InputIterator A type satisfying the InputIterator concept. + * Dereferencing the iterator must yield a type assignable to uint64_t. + * @param tag Tag (field number) of the field + * @param first Iterator pointing to the beginning of the data + * @param last Iterator pointing one past the end of data + */ + template + void add_packed_fixed64(pbf_tag_type tag, InputIterator first, InputIterator last) { + add_packed_fixed(tag, first, last, + typename std::iterator_traits::iterator_category{}); + } + + /** + * Add "repeated packed sfixed64" field to data. + * + * @tparam InputIterator A type satisfying the InputIterator concept. + * Dereferencing the iterator must yield a type assignable to int64_t. + * @param tag Tag (field number) of the field + * @param first Iterator pointing to the beginning of the data + * @param last Iterator pointing one past the end of data + */ + template + void add_packed_sfixed64(pbf_tag_type tag, InputIterator first, InputIterator last) { + add_packed_fixed(tag, first, last, + typename std::iterator_traits::iterator_category{}); + } + + /** + * Add "repeated packed float" field to data. + * + * @tparam InputIterator A type satisfying the InputIterator concept. + * Dereferencing the iterator must yield a type assignable to float. + * @param tag Tag (field number) of the field + * @param first Iterator pointing to the beginning of the data + * @param last Iterator pointing one past the end of data + */ + template + void add_packed_float(pbf_tag_type tag, InputIterator first, InputIterator last) { + add_packed_fixed(tag, first, last, + typename std::iterator_traits::iterator_category{}); + } + + /** + * Add "repeated packed double" field to data. + * + * @tparam InputIterator A type satisfying the InputIterator concept. + * Dereferencing the iterator must yield a type assignable to double. + * @param tag Tag (field number) of the field + * @param first Iterator pointing to the beginning of the data + * @param last Iterator pointing one past the end of data + */ + template + void add_packed_double(pbf_tag_type tag, InputIterator first, InputIterator last) { + add_packed_fixed(tag, first, last, + typename std::iterator_traits::iterator_category{}); + } + + ///@} + + template friend class detail::packed_field_varint; + template friend class detail::packed_field_svarint; + template friend class detail::packed_field_fixed; + +}; // class basic_pbf_writer + +/** + * Swap two basic_pbf_writer objects. + * + * @param lhs First object. + * @param rhs Second object. + */ +template +inline void swap(basic_pbf_writer& lhs, basic_pbf_writer& rhs) noexcept { + lhs.swap(rhs); +} + +namespace detail { + + template + class packed_field { + + basic_pbf_writer m_writer{}; + + public: + + packed_field(const packed_field&) = delete; + packed_field& operator=(const packed_field&) = delete; + + packed_field(packed_field&&) noexcept = default; + packed_field& operator=(packed_field&&) noexcept = default; + + packed_field() = default; + + packed_field(basic_pbf_writer& parent_writer, pbf_tag_type tag) : + m_writer{parent_writer, tag} { + } + + packed_field(basic_pbf_writer& parent_writer, pbf_tag_type tag, std::size_t size) : + m_writer{parent_writer, tag, size} { + } + + ~packed_field() noexcept = default; + + bool valid() const noexcept { + return m_writer.valid(); + } + + void commit() { + m_writer.commit(); + } + + void rollback() { + m_writer.rollback(); + } + + basic_pbf_writer& writer() noexcept { + return m_writer; + } + + }; // class packed_field + + template + class packed_field_fixed : public packed_field { + + public: + + packed_field_fixed() : + packed_field{} { + } + + template + packed_field_fixed(basic_pbf_writer& parent_writer, P tag) : + packed_field{parent_writer, static_cast(tag)} { + } + + template + packed_field_fixed(basic_pbf_writer& parent_writer, P tag, std::size_t size) : + packed_field{parent_writer, static_cast(tag), size * sizeof(T)} { + } + + void add_element(T value) { + this->writer().template add_fixed(value); + } + + }; // class packed_field_fixed + + template + class packed_field_varint : public packed_field { + + public: + + packed_field_varint() : + packed_field{} { + } + + template + packed_field_varint(basic_pbf_writer& parent_writer, P tag) : + packed_field{parent_writer, static_cast(tag)} { + } + + void add_element(T value) { + this->writer().add_varint(uint64_t(value)); + } + + }; // class packed_field_varint + + template + class packed_field_svarint : public packed_field { + + public: + + packed_field_svarint() : + packed_field{} { + } + + template + packed_field_svarint(basic_pbf_writer& parent_writer, P tag) : + packed_field{parent_writer, static_cast(tag)} { + } + + void add_element(T value) { + this->writer().add_varint(encode_zigzag64(value)); + } + + }; // class packed_field_svarint + +} // end namespace detail + +} // end namespace protozero + +#endif // PROTOZERO_BASIC_PBF_WRITER_HPP diff --git a/contrib/protozero/1.7.0/include/protozero/buffer_fixed.hpp b/contrib/protozero/1.7.0/include/protozero/buffer_fixed.hpp new file mode 100644 index 0000000..b2e6d1d --- /dev/null +++ b/contrib/protozero/1.7.0/include/protozero/buffer_fixed.hpp @@ -0,0 +1,222 @@ +#ifndef PROTOZERO_BUFFER_FIXED_HPP +#define PROTOZERO_BUFFER_FIXED_HPP + +/***************************************************************************** + +protozero - Minimalistic protocol buffer decoder and encoder in C++. + +This file is from https://github.com/mapbox/protozero where you can find more +documentation. + +*****************************************************************************/ + +/** + * @file buffer_fixed.hpp + * + * @brief Contains the fixed_size_buffer_adaptor class. + */ + +#include "buffer_tmpl.hpp" +#include "config.hpp" + +#include +#include +#include +#include + +namespace protozero { + +/** + * This class can be used instead of std::string if you want to create a + * vector tile in a fixed-size buffer. Any operation that needs more space + * than is available will fail with a std::length_error exception. + */ +class fixed_size_buffer_adaptor { + + char* m_data; + std::size_t m_capacity; + std::size_t m_size = 0; + +public: + + /// @cond usual container typedefs not documented + + using size_type = std::size_t; + + using value_type = char; + using reference = value_type&; + using const_reference = const value_type&; + using pointer = value_type*; + using const_pointer = const value_type*; + + using iterator = pointer; + using const_iterator = const_pointer; + + /// @endcond + + /** + * Constructor. + * + * @param data Pointer to some memory allocated for the buffer. + * @param capacity Number of bytes available. + */ + fixed_size_buffer_adaptor(char* data, std::size_t capacity) noexcept : + m_data(data), + m_capacity(capacity) { + } + + /** + * Constructor. + * + * @param container Some container class supporting the member functions + * data() and size(). + */ + template + explicit fixed_size_buffer_adaptor(T& container) : + m_data(container.data()), + m_capacity(container.size()) { + } + + /// Returns a pointer to the data in the buffer. + const char* data() const noexcept { + return m_data; + } + + /// Returns a pointer to the data in the buffer. + char* data() noexcept { + return m_data; + } + + /// The capacity this buffer was created with. + std::size_t capacity() const noexcept { + return m_capacity; + } + + /// The number of bytes used in the buffer. Always <= capacity(). + std::size_t size() const noexcept { + return m_size; + } + + /// Return iterator to beginning of data. + char* begin() noexcept { + return m_data; + } + + /// Return iterator to beginning of data. + const char* begin() const noexcept { + return m_data; + } + + /// Return iterator to beginning of data. + const char* cbegin() const noexcept { + return m_data; + } + + /// Return iterator to end of data. + char* end() noexcept { + return m_data + m_size; + } + + /// Return iterator to end of data. + const char* end() const noexcept { + return m_data + m_size; + } + + /// Return iterator to end of data. + const char* cend() const noexcept { + return m_data + m_size; + } + +/// @cond INTERNAL + + // Do not rely on anything beyond this point + + void append(const char* data, std::size_t count) { + if (m_size + count > m_capacity) { + throw std::length_error{"fixed size data store exhausted"}; + } + std::copy_n(data, count, m_data + m_size); + m_size += count; + } + + void append_zeros(std::size_t count) { + if (m_size + count > m_capacity) { + throw std::length_error{"fixed size data store exhausted"}; + } + std::fill_n(m_data + m_size, count, '\0'); + m_size += count; + } + + void resize(std::size_t size) { + protozero_assert(size < m_size); + if (size > m_capacity) { + throw std::length_error{"fixed size data store exhausted"}; + } + m_size = size; + } + + void erase_range(std::size_t from, std::size_t to) { + protozero_assert(from <= m_size); + protozero_assert(to <= m_size); + protozero_assert(from < to); + std::copy(m_data + to, m_data + m_size, m_data + from); + m_size -= (to - from); + } + + char* at_pos(std::size_t pos) { + protozero_assert(pos <= m_size); + return m_data + pos; + } + + void push_back(char ch) { + if (m_size >= m_capacity) { + throw std::length_error{"fixed size data store exhausted"}; + } + m_data[m_size++] = ch; + } +/// @endcond + +}; // class fixed_size_buffer_adaptor + +/// @cond INTERNAL +template <> +struct buffer_customization { + + static std::size_t size(const fixed_size_buffer_adaptor* buffer) noexcept { + return buffer->size(); + } + + static void append(fixed_size_buffer_adaptor* buffer, const char* data, std::size_t count) { + buffer->append(data, count); + } + + static void append_zeros(fixed_size_buffer_adaptor* buffer, std::size_t count) { + buffer->append_zeros(count); + } + + static void resize(fixed_size_buffer_adaptor* buffer, std::size_t size) { + buffer->resize(size); + } + + static void reserve_additional(fixed_size_buffer_adaptor* /*buffer*/, std::size_t /*size*/) { + /* nothing to be done for fixed-size buffers */ + } + + static void erase_range(fixed_size_buffer_adaptor* buffer, std::size_t from, std::size_t to) { + buffer->erase_range(from, to); + } + + static char* at_pos(fixed_size_buffer_adaptor* buffer, std::size_t pos) { + return buffer->at_pos(pos); + } + + static void push_back(fixed_size_buffer_adaptor* buffer, char ch) { + buffer->push_back(ch); + } + +}; +/// @endcond + +} // namespace protozero + +#endif // PROTOZERO_BUFFER_FIXED_HPP diff --git a/contrib/protozero/1.7.0/include/protozero/buffer_string.hpp b/contrib/protozero/1.7.0/include/protozero/buffer_string.hpp new file mode 100644 index 0000000..1f0f902 --- /dev/null +++ b/contrib/protozero/1.7.0/include/protozero/buffer_string.hpp @@ -0,0 +1,76 @@ +#ifndef PROTOZERO_BUFFER_STRING_HPP +#define PROTOZERO_BUFFER_STRING_HPP + +/***************************************************************************** + +protozero - Minimalistic protocol buffer decoder and encoder in C++. + +This file is from https://github.com/mapbox/protozero where you can find more +documentation. + +*****************************************************************************/ + +/** + * @file buffer_string.hpp + * + * @brief Contains the customization points for buffer implementation based + * on std::string + */ + +#include "buffer_tmpl.hpp" + +#include +#include +#include + +namespace protozero { + +// Implementation of buffer customizations points for std::string + +/// @cond INTERNAL +template <> +struct buffer_customization { + + static std::size_t size(const std::string* buffer) noexcept { + return buffer->size(); + } + + static void append(std::string* buffer, const char* data, std::size_t count) { + buffer->append(data, count); + } + + static void append_zeros(std::string* buffer, std::size_t count) { + buffer->append(count, '\0'); + } + + static void resize(std::string* buffer, std::size_t size) { + protozero_assert(size < buffer->size()); + buffer->resize(size); + } + + static void reserve_additional(std::string* buffer, std::size_t size) { + buffer->reserve(buffer->size() + size); + } + + static void erase_range(std::string* buffer, std::size_t from, std::size_t to) { + protozero_assert(from <= buffer->size()); + protozero_assert(to <= buffer->size()); + protozero_assert(from <= to); + buffer->erase(std::next(buffer->begin(), from), std::next(buffer->begin(), to)); + } + + static char* at_pos(std::string* buffer, std::size_t pos) { + protozero_assert(pos <= buffer->size()); + return (&*buffer->begin()) + pos; + } + + static void push_back(std::string* buffer, char ch) { + buffer->push_back(ch); + } + +}; +/// @endcond + +} // namespace protozero + +#endif // PROTOZERO_BUFFER_STRING_HPP diff --git a/contrib/protozero/1.7.0/include/protozero/buffer_tmpl.hpp b/contrib/protozero/1.7.0/include/protozero/buffer_tmpl.hpp new file mode 100644 index 0000000..ac22399 --- /dev/null +++ b/contrib/protozero/1.7.0/include/protozero/buffer_tmpl.hpp @@ -0,0 +1,113 @@ +#ifndef PROTOZERO_BUFFER_TMPL_HPP +#define PROTOZERO_BUFFER_TMPL_HPP + +/***************************************************************************** + +protozero - Minimalistic protocol buffer decoder and encoder in C++. + +This file is from https://github.com/mapbox/protozero where you can find more +documentation. + +*****************************************************************************/ + +/** + * @file buffer_tmpl.hpp + * + * @brief Contains the customization points for buffer implementations. + */ + +#include +#include +#include + +namespace protozero { + +// Implementation of buffer customizations points for std::string + +/// @cond INTERNAL +template +struct buffer_customization { + + /** + * Get the number of bytes currently used in the buffer. + * + * @param buffer Pointer to the buffer. + * @returns number of bytes used in the buffer. + */ + static std::size_t size(const std::string* buffer); + + /** + * Append count bytes from data to the buffer. + * + * @param buffer Pointer to the buffer. + * @param data Pointer to the data. + * @param count Number of bytes to be added to the buffer. + */ + static void append(std::string* buffer, const char* data, std::size_t count); + + /** + * Append count zero bytes to the buffer. + * + * @param buffer Pointer to the buffer. + * @param count Number of bytes to be added to the buffer. + */ + static void append_zeros(std::string* buffer, std::size_t count); + + /** + * Shrink the buffer to the specified size. The new size will always be + * smaller than the current size. + * + * @param buffer Pointer to the buffer. + * @param size New size of the buffer. + * + * @pre size < current size of buffer + */ + static void resize(std::string* buffer, std::size_t size); + + /** + * Reserve an additional size bytes for use in the buffer. This is used for + * variable-sized buffers to tell the buffer implementation that soon more + * memory will be used. The implementation can ignore this. + * + * @param buffer Pointer to the buffer. + * @param size Number of bytes to reserve. + */ + static void reserve_additional(std::string* buffer, std::size_t size); + + /** + * Delete data from the buffer. This must move back the data after the + * part being deleted and resize the buffer accordingly. + * + * @param buffer Pointer to the buffer. + * @param from Offset into the buffer where we want to erase from. + * @param to Offset into the buffer one past the last byte we want to erase. + * + * @pre from, to <= size of the buffer, from < to + */ + static void erase_range(std::string* buffer, std::size_t from, std::size_t to); + + /** + * Return a pointer to the memory at the specified position in the buffer. + * + * @param buffer Pointer to the buffer. + * @param pos The position in the buffer. + * @returns pointer to the memory in the buffer at the specified position. + * + * @pre pos <= size of the buffer + */ + static char* at_pos(std::string* buffer, std::size_t pos); + + /** + * Add a char to the buffer incrementing the number of chars in the buffer. + * + * @param buffer Pointer to the buffer. + * @param ch The character to add. + */ + static void push_back(std::string* buffer, char ch); + +}; +/// @endcond + +} // namespace protozero + +#endif // PROTOZERO_BUFFER_TMPL_HPP diff --git a/contrib/protozero/1.7.0/include/protozero/buffer_vector.hpp b/contrib/protozero/1.7.0/include/protozero/buffer_vector.hpp new file mode 100644 index 0000000..6a34b07 --- /dev/null +++ b/contrib/protozero/1.7.0/include/protozero/buffer_vector.hpp @@ -0,0 +1,76 @@ +#ifndef PROTOZERO_BUFFER_VECTOR_HPP +#define PROTOZERO_BUFFER_VECTOR_HPP + +/***************************************************************************** + +protozero - Minimalistic protocol buffer decoder and encoder in C++. + +This file is from https://github.com/mapbox/protozero where you can find more +documentation. + +*****************************************************************************/ + +/** + * @file buffer_vector.hpp + * + * @brief Contains the customization points for buffer implementation based + * on std::vector + */ + +#include "buffer_tmpl.hpp" + +#include +#include +#include + +namespace protozero { + +// Implementation of buffer customizations points for std::vector + +/// @cond INTERNAL +template <> +struct buffer_customization> { + + static std::size_t size(const std::vector* buffer) noexcept { + return buffer->size(); + } + + static void append(std::vector* buffer, const char* data, std::size_t count) { + buffer->insert(buffer->end(), data, data + count); + } + + static void append_zeros(std::vector* buffer, std::size_t count) { + buffer->insert(buffer->end(), count, '\0'); + } + + static void resize(std::vector* buffer, std::size_t size) { + protozero_assert(size < buffer->size()); + buffer->resize(size); + } + + static void reserve_additional(std::vector* buffer, std::size_t size) { + buffer->reserve(buffer->size() + size); + } + + static void erase_range(std::vector* buffer, std::size_t from, std::size_t to) { + protozero_assert(from <= buffer->size()); + protozero_assert(to <= buffer->size()); + protozero_assert(from <= to); + buffer->erase(std::next(buffer->begin(), from), std::next(buffer->begin(), to)); + } + + static char* at_pos(std::vector* buffer, std::size_t pos) { + protozero_assert(pos <= buffer->size()); + return (&*buffer->begin()) + pos; + } + + static void push_back(std::vector* buffer, char ch) { + buffer->push_back(ch); + } + +}; +/// @endcond + +} // namespace protozero + +#endif // PROTOZERO_BUFFER_VECTOR_HPP diff --git a/contrib/protozero/1.7.0/include/protozero/byteswap.hpp b/contrib/protozero/1.7.0/include/protozero/byteswap.hpp new file mode 100644 index 0000000..799d179 --- /dev/null +++ b/contrib/protozero/1.7.0/include/protozero/byteswap.hpp @@ -0,0 +1,99 @@ +#ifndef PROTOZERO_BYTESWAP_HPP +#define PROTOZERO_BYTESWAP_HPP + +/***************************************************************************** + +protozero - Minimalistic protocol buffer decoder and encoder in C++. + +This file is from https://github.com/mapbox/protozero where you can find more +documentation. + +*****************************************************************************/ + +/** + * @file byteswap.hpp + * + * @brief Contains functions to swap bytes in values (for different endianness). + */ + +#include "config.hpp" + +#include + +namespace protozero { +namespace detail { + +inline uint32_t byteswap_impl(uint32_t value) noexcept { +#ifdef PROTOZERO_USE_BUILTIN_BSWAP + return __builtin_bswap32(value); +#else + return ((value & 0xff000000U) >> 24U) | + ((value & 0x00ff0000U) >> 8U) | + ((value & 0x0000ff00U) << 8U) | + ((value & 0x000000ffU) << 24U); +#endif +} + +inline uint64_t byteswap_impl(uint64_t value) noexcept { +#ifdef PROTOZERO_USE_BUILTIN_BSWAP + return __builtin_bswap64(value); +#else + return ((value & 0xff00000000000000ULL) >> 56U) | + ((value & 0x00ff000000000000ULL) >> 40U) | + ((value & 0x0000ff0000000000ULL) >> 24U) | + ((value & 0x000000ff00000000ULL) >> 8U) | + ((value & 0x00000000ff000000ULL) << 8U) | + ((value & 0x0000000000ff0000ULL) << 24U) | + ((value & 0x000000000000ff00ULL) << 40U) | + ((value & 0x00000000000000ffULL) << 56U); +#endif +} + +} // end namespace detail + +/// byteswap the data pointed to by ptr in-place. +inline void byteswap_inplace(uint32_t* ptr) noexcept { + *ptr = detail::byteswap_impl(*ptr); +} + +/// byteswap the data pointed to by ptr in-place. +inline void byteswap_inplace(uint64_t* ptr) noexcept { + *ptr = detail::byteswap_impl(*ptr); +} + +/// byteswap the data pointed to by ptr in-place. +inline void byteswap_inplace(int32_t* ptr) noexcept { + auto* bptr = reinterpret_cast(ptr); + *bptr = detail::byteswap_impl(*bptr); +} + +/// byteswap the data pointed to by ptr in-place. +inline void byteswap_inplace(int64_t* ptr) noexcept { + auto* bptr = reinterpret_cast(ptr); + *bptr = detail::byteswap_impl(*bptr); +} + +/// byteswap the data pointed to by ptr in-place. +inline void byteswap_inplace(float* ptr) noexcept { + auto* bptr = reinterpret_cast(ptr); + *bptr = detail::byteswap_impl(*bptr); +} + +/// byteswap the data pointed to by ptr in-place. +inline void byteswap_inplace(double* ptr) noexcept { + auto* bptr = reinterpret_cast(ptr); + *bptr = detail::byteswap_impl(*bptr); +} + +namespace detail { + + // Added for backwards compatibility with any code that might use this + // function (even if it shouldn't have). Will be removed in a later + // version of protozero. + using ::protozero::byteswap_inplace; + +} // end namespace detail + +} // end namespace protozero + +#endif // PROTOZERO_BYTESWAP_HPP diff --git a/contrib/protozero/1.7.0/include/protozero/config.hpp b/contrib/protozero/1.7.0/include/protozero/config.hpp new file mode 100644 index 0000000..6fc7749 --- /dev/null +++ b/contrib/protozero/1.7.0/include/protozero/config.hpp @@ -0,0 +1,48 @@ +#ifndef PROTOZERO_CONFIG_HPP +#define PROTOZERO_CONFIG_HPP + +/***************************************************************************** + +protozero - Minimalistic protocol buffer decoder and encoder in C++. + +This file is from https://github.com/mapbox/protozero where you can find more +documentation. + +*****************************************************************************/ + +#include + +/** + * @file config.hpp + * + * @brief Contains macro checks for different configurations. + */ + +#define PROTOZERO_LITTLE_ENDIAN 1234 +#define PROTOZERO_BIG_ENDIAN 4321 + +// Find out which byte order the machine has. +#if defined(__BYTE_ORDER) +# if (__BYTE_ORDER == __LITTLE_ENDIAN) +# define PROTOZERO_BYTE_ORDER PROTOZERO_LITTLE_ENDIAN +# endif +# if (__BYTE_ORDER == __BIG_ENDIAN) +# define PROTOZERO_BYTE_ORDER PROTOZERO_BIG_ENDIAN +# endif +#else +// This probably isn't a very good default, but might do until we figure +// out something better. +# define PROTOZERO_BYTE_ORDER PROTOZERO_LITTLE_ENDIAN +#endif + +// Check whether __builtin_bswap is available +#if defined(__GNUC__) || defined(__clang__) +# define PROTOZERO_USE_BUILTIN_BSWAP +#endif + +// Wrapper for assert() used for testing +#ifndef protozero_assert +# define protozero_assert(x) assert(x) +#endif + +#endif // PROTOZERO_CONFIG_HPP diff --git a/contrib/protozero/1.7.0/include/protozero/data_view.hpp b/contrib/protozero/1.7.0/include/protozero/data_view.hpp new file mode 100644 index 0000000..3ec87af --- /dev/null +++ b/contrib/protozero/1.7.0/include/protozero/data_view.hpp @@ -0,0 +1,236 @@ +#ifndef PROTOZERO_DATA_VIEW_HPP +#define PROTOZERO_DATA_VIEW_HPP + +/***************************************************************************** + +protozero - Minimalistic protocol buffer decoder and encoder in C++. + +This file is from https://github.com/mapbox/protozero where you can find more +documentation. + +*****************************************************************************/ + +/** + * @file data_view.hpp + * + * @brief Contains the implementation of the data_view class. + */ + +#include "config.hpp" + +#include +#include +#include +#include +#include + +namespace protozero { + +#ifdef PROTOZERO_USE_VIEW +using data_view = PROTOZERO_USE_VIEW; +#else + +/** + * Holds a pointer to some data and a length. + * + * This class is supposed to be compatible with the std::string_view + * that will be available in C++17. + */ +class data_view { + + const char* m_data = nullptr; + std::size_t m_size = 0; + +public: + + /** + * Default constructor. Construct an empty data_view. + */ + constexpr data_view() noexcept = default; + + /** + * Create data_view from pointer and size. + * + * @param ptr Pointer to the data. + * @param length Length of the data. + */ + constexpr data_view(const char* ptr, std::size_t length) noexcept + : m_data{ptr}, + m_size{length} { + } + + /** + * Create data_view from string. + * + * @param str String with the data. + */ + data_view(const std::string& str) noexcept // NOLINT(google-explicit-constructor, hicpp-explicit-conversions) + : m_data{str.data()}, + m_size{str.size()} { + } + + /** + * Create data_view from zero-terminated string. + * + * @param ptr Pointer to the data. + */ + data_view(const char* ptr) noexcept // NOLINT(google-explicit-constructor, hicpp-explicit-conversions) + : m_data{ptr}, + m_size{std::strlen(ptr)} { + } + + /** + * Swap the contents of this object with the other. + * + * @param other Other object to swap data with. + */ + void swap(data_view& other) noexcept { + using std::swap; + swap(m_data, other.m_data); + swap(m_size, other.m_size); + } + + /// Return pointer to data. + constexpr const char* data() const noexcept { + return m_data; + } + + /// Return length of data in bytes. + constexpr std::size_t size() const noexcept { + return m_size; + } + + /// Returns true if size is 0. + constexpr bool empty() const noexcept { + return m_size == 0; + } + +#ifndef PROTOZERO_STRICT_API + /** + * Convert data view to string. + * + * @pre Must not be default constructed data_view. + * + * @deprecated to_string() is not available in C++17 string_view so it + * should not be used to make conversion to that class easier + * in the future. + */ + std::string to_string() const { + protozero_assert(m_data); + return {m_data, m_size}; + } +#endif + + /** + * Convert data view to string. + * + * @pre Must not be default constructed data_view. + */ + explicit operator std::string() const { + protozero_assert(m_data); + return {m_data, m_size}; + } + + /** + * Compares the contents of this object with the given other object. + * + * @returns 0 if they are the same, <0 if this object is smaller than + * the other or >0 if it is larger. If both objects have the + * same size returns <0 if this object is lexicographically + * before the other, >0 otherwise. + * + * @pre Must not be default constructed data_view. + */ + int compare(data_view other) const noexcept { + assert(m_data && other.m_data); + const int cmp = std::memcmp(data(), other.data(), + std::min(size(), other.size())); + if (cmp == 0) { + if (size() == other.size()) { + return 0; + } + return size() < other.size() ? -1 : 1; + } + return cmp; + } + +}; // class data_view + +/** + * Swap two data_view objects. + * + * @param lhs First object. + * @param rhs Second object. + */ +inline void swap(data_view& lhs, data_view& rhs) noexcept { + lhs.swap(rhs); +} + +/** + * Two data_view instances are equal if they have the same size and the + * same content. + * + * @param lhs First object. + * @param rhs Second object. + */ +inline constexpr bool operator==(const data_view lhs, const data_view rhs) noexcept { + return lhs.size() == rhs.size() && + std::equal(lhs.data(), lhs.data() + lhs.size(), rhs.data()); +} + +/** + * Two data_view instances are not equal if they have different sizes or the + * content differs. + * + * @param lhs First object. + * @param rhs Second object. + */ +inline constexpr bool operator!=(const data_view lhs, const data_view rhs) noexcept { + return !(lhs == rhs); +} + +/** + * Returns true if lhs.compare(rhs) < 0. + * + * @param lhs First object. + * @param rhs Second object. + */ +inline bool operator<(const data_view lhs, const data_view rhs) noexcept { + return lhs.compare(rhs) < 0; +} + +/** + * Returns true if lhs.compare(rhs) <= 0. + * + * @param lhs First object. + * @param rhs Second object. + */ +inline bool operator<=(const data_view lhs, const data_view rhs) noexcept { + return lhs.compare(rhs) <= 0; +} + +/** + * Returns true if lhs.compare(rhs) > 0. + * + * @param lhs First object. + * @param rhs Second object. + */ +inline bool operator>(const data_view lhs, const data_view rhs) noexcept { + return lhs.compare(rhs) > 0; +} + +/** + * Returns true if lhs.compare(rhs) >= 0. + * + * @param lhs First object. + * @param rhs Second object. + */ +inline bool operator>=(const data_view lhs, const data_view rhs) noexcept { + return lhs.compare(rhs) >= 0; +} + +#endif + +} // end namespace protozero + +#endif // PROTOZERO_DATA_VIEW_HPP diff --git a/contrib/protozero/1.7.0/include/protozero/exception.hpp b/contrib/protozero/1.7.0/include/protozero/exception.hpp new file mode 100644 index 0000000..a3cd0f1 --- /dev/null +++ b/contrib/protozero/1.7.0/include/protozero/exception.hpp @@ -0,0 +1,101 @@ +#ifndef PROTOZERO_EXCEPTION_HPP +#define PROTOZERO_EXCEPTION_HPP + +/***************************************************************************** + +protozero - Minimalistic protocol buffer decoder and encoder in C++. + +This file is from https://github.com/mapbox/protozero where you can find more +documentation. + +*****************************************************************************/ + +/** + * @file exception.hpp + * + * @brief Contains the exceptions used in the protozero library. + */ + +#include + +/** + * @brief All parts of the protozero header-only library are in this namespace. + */ +namespace protozero { + +/** + * All exceptions explicitly thrown by the functions of the protozero library + * derive from this exception. + */ +struct exception : std::exception { + /// Returns the explanatory string. + const char* what() const noexcept override { + return "pbf exception"; + } +}; + +/** + * This exception is thrown when parsing a varint thats larger than allowed. + * This should never happen unless the data is corrupted. + */ +struct varint_too_long_exception : exception { + /// Returns the explanatory string. + const char* what() const noexcept override { + return "varint too long exception"; + } +}; + +/** + * This exception is thrown when the wire type of a pdf field is unknown. + * This should never happen unless the data is corrupted. + */ +struct unknown_pbf_wire_type_exception : exception { + /// Returns the explanatory string. + const char* what() const noexcept override { + return "unknown pbf field type exception"; + } +}; + +/** + * This exception is thrown when we are trying to read a field and there + * are not enough bytes left in the buffer to read it. Almost all functions + * of the pbf_reader class can throw this exception. + * + * This should never happen unless the data is corrupted or you have + * initialized the pbf_reader object with incomplete data. + */ +struct end_of_buffer_exception : exception { + /// Returns the explanatory string. + const char* what() const noexcept override { + return "end of buffer exception"; + } +}; + +/** + * This exception is thrown when a tag has an invalid value. Tags must be + * unsigned integers between 1 and 2^29-1. Tags between 19000 and 19999 are + * not allowed. See + * https://developers.google.com/protocol-buffers/docs/proto#assigning-tags + */ +struct invalid_tag_exception : exception { + /// Returns the explanatory string. + const char* what() const noexcept override { + return "invalid tag exception"; + } +}; + +/** + * This exception is thrown when a length field of a packed repeated field is + * invalid. For fixed size types the length must be a multiple of the size of + * the type. + */ +struct invalid_length_exception : exception { + /// Returns the explanatory string. + const char* what() const noexcept override { + return "invalid length exception"; + } +}; + +} // end namespace protozero + +#endif // PROTOZERO_EXCEPTION_HPP diff --git a/contrib/protozero/1.7.0/include/protozero/iterators.hpp b/contrib/protozero/1.7.0/include/protozero/iterators.hpp new file mode 100644 index 0000000..ee8ef8e --- /dev/null +++ b/contrib/protozero/1.7.0/include/protozero/iterators.hpp @@ -0,0 +1,481 @@ +#ifndef PROTOZERO_ITERATORS_HPP +#define PROTOZERO_ITERATORS_HPP + +/***************************************************************************** + +protozero - Minimalistic protocol buffer decoder and encoder in C++. + +This file is from https://github.com/mapbox/protozero where you can find more +documentation. + +*****************************************************************************/ + +/** + * @file iterators.hpp + * + * @brief Contains the iterators for access to packed repeated fields. + */ + +#include "config.hpp" +#include "varint.hpp" + +#if PROTOZERO_BYTE_ORDER != PROTOZERO_LITTLE_ENDIAN +# include +#endif + +#include +#include +#include +#include + +namespace protozero { + +/** + * A range of iterators based on std::pair. Created from beginning and + * end iterators. Used as a return type from some pbf_reader methods + * that is easy to use with range-based for loops. + */ +template > +class iterator_range : +#ifdef PROTOZERO_STRICT_API + protected +#else + public +#endif + P { + +public: + + /// The type of the iterators in this range. + using iterator = T; + + /// The value type of the underlying iterator. + using value_type = typename std::iterator_traits::value_type; + + /** + * Default constructor. Create empty iterator_range. + */ + constexpr iterator_range() : + P{iterator{}, iterator{}} { + } + + /** + * Create iterator range from two iterators. + * + * @param first_iterator Iterator to beginning of range. + * @param last_iterator Iterator to end of range. + */ + constexpr iterator_range(iterator&& first_iterator, iterator&& last_iterator) : + P{std::forward(first_iterator), + std::forward(last_iterator)} { + } + + /// Return iterator to beginning of range. + constexpr iterator begin() const noexcept { + return this->first; + } + + /// Return iterator to end of range. + constexpr iterator end() const noexcept { + return this->second; + } + + /// Return iterator to beginning of range. + constexpr iterator cbegin() const noexcept { + return this->first; + } + + /// Return iterator to end of range. + constexpr iterator cend() const noexcept { + return this->second; + } + + /** + * Return true if this range is empty. + * + * Complexity: Constant. + */ + constexpr bool empty() const noexcept { + return begin() == end(); + } + + /** + * Get the size of the range, ie the number of elements it contains. + * + * Complexity: Constant or linear depending on the underlaying iterator. + */ + std::size_t size() const noexcept { + return static_cast(std::distance(begin(), end())); + } + + /** + * Get element at the beginning of the range. + * + * @pre Range must not be empty. + */ + value_type front() const { + protozero_assert(!empty()); + return *(this->first); + } + + /** + * Advance beginning of range by one. + * + * @pre Range must not be empty. + */ + void drop_front() { + protozero_assert(!empty()); + ++this->first; + } + + /** + * Swap the contents of this range with the other. + * + * @param other Other range to swap data with. + */ + void swap(iterator_range& other) noexcept { + using std::swap; + swap(this->first, other.first); + swap(this->second, other.second); + } + +}; // struct iterator_range + +/** + * Swap two iterator_ranges. + * + * @param lhs First range. + * @param rhs Second range. + */ +template +inline void swap(iterator_range& lhs, iterator_range& rhs) noexcept { + lhs.swap(rhs); +} + +/** + * A forward iterator used for accessing packed repeated fields of fixed + * length (fixed32, sfixed32, float, double). + */ +template +class const_fixed_iterator { + + /// Pointer to current iterator position + const char* m_data = nullptr; + +public: + + /// @cond usual iterator functions not documented + + using iterator_category = std::random_access_iterator_tag; + using value_type = T; + using difference_type = std::ptrdiff_t; + using pointer = value_type*; + using reference = value_type&; + + const_fixed_iterator() noexcept = default; + + explicit const_fixed_iterator(const char* data) noexcept : + m_data{data} { + } + + const_fixed_iterator(const const_fixed_iterator&) noexcept = default; + const_fixed_iterator(const_fixed_iterator&&) noexcept = default; + + const_fixed_iterator& operator=(const const_fixed_iterator&) noexcept = default; + const_fixed_iterator& operator=(const_fixed_iterator&&) noexcept = default; + + ~const_fixed_iterator() noexcept = default; + + value_type operator*() const noexcept { + value_type result; + std::memcpy(&result, m_data, sizeof(value_type)); +#if PROTOZERO_BYTE_ORDER != PROTOZERO_LITTLE_ENDIAN + byteswap_inplace(&result); +#endif + return result; + } + + const_fixed_iterator& operator++() noexcept { + m_data += sizeof(value_type); + return *this; + } + + const_fixed_iterator operator++(int) noexcept { + const const_fixed_iterator tmp{*this}; + ++(*this); + return tmp; + } + + const_fixed_iterator& operator--() noexcept { + m_data -= sizeof(value_type); + return *this; + } + + const_fixed_iterator operator--(int) noexcept { + const const_fixed_iterator tmp{*this}; + --(*this); + return tmp; + } + + friend bool operator==(const_fixed_iterator lhs, const_fixed_iterator rhs) noexcept { + return lhs.m_data == rhs.m_data; + } + + friend bool operator!=(const_fixed_iterator lhs, const_fixed_iterator rhs) noexcept { + return !(lhs == rhs); + } + + friend bool operator<(const_fixed_iterator lhs, const_fixed_iterator rhs) noexcept { + return lhs.m_data < rhs.m_data; + } + + friend bool operator>(const_fixed_iterator lhs, const_fixed_iterator rhs) noexcept { + return rhs < lhs; + } + + friend bool operator<=(const_fixed_iterator lhs, const_fixed_iterator rhs) noexcept { + return !(lhs > rhs); + } + + friend bool operator>=(const_fixed_iterator lhs, const_fixed_iterator rhs) noexcept { + return !(lhs < rhs); + } + + const_fixed_iterator& operator+=(difference_type val) noexcept { + m_data += (sizeof(value_type) * val); + return *this; + } + + friend const_fixed_iterator operator+(const_fixed_iterator lhs, difference_type rhs) noexcept { + const_fixed_iterator tmp{lhs}; + tmp.m_data += (sizeof(value_type) * rhs); + return tmp; + } + + friend const_fixed_iterator operator+(difference_type lhs, const_fixed_iterator rhs) noexcept { + const_fixed_iterator tmp{rhs}; + tmp.m_data += (sizeof(value_type) * lhs); + return tmp; + } + + const_fixed_iterator& operator-=(difference_type val) noexcept { + m_data -= (sizeof(value_type) * val); + return *this; + } + + friend const_fixed_iterator operator-(const_fixed_iterator lhs, difference_type rhs) noexcept { + const_fixed_iterator tmp{lhs}; + tmp.m_data -= (sizeof(value_type) * rhs); + return tmp; + } + + friend difference_type operator-(const_fixed_iterator lhs, const_fixed_iterator rhs) noexcept { + return static_cast(lhs.m_data - rhs.m_data) / static_cast(sizeof(T)); + } + + value_type operator[](difference_type n) const noexcept { + return *(*this + n); + } + + /// @endcond + +}; // class const_fixed_iterator + +/** + * A forward iterator used for accessing packed repeated varint fields + * (int32, uint32, int64, uint64, bool, enum). + */ +template +class const_varint_iterator { + +protected: + + /// Pointer to current iterator position + const char* m_data = nullptr; // NOLINT(misc-non-private-member-variables-in-classes, cppcoreguidelines-non-private-member-variables-in-classes,-warnings-as-errors) + + /// Pointer to end iterator position + const char* m_end = nullptr; // NOLINT(misc-non-private-member-variables-in-classes, cppcoreguidelines-non-private-member-variables-in-classes,-warnings-as-errors) + +public: + + /// @cond usual iterator functions not documented + + using iterator_category = std::forward_iterator_tag; + using value_type = T; + using difference_type = std::ptrdiff_t; + using pointer = value_type*; + using reference = value_type&; + + static difference_type distance(const_varint_iterator begin, const_varint_iterator end) noexcept { + // The "distance" between default initialized const_varint_iterator's + // is always 0. + if (!begin.m_data) { + return 0; + } + // We know that each varint contains exactly one byte with the most + // significant bit not set. We can use this to quickly figure out + // how many varints there are without actually decoding the varints. + return std::count_if(begin.m_data, end.m_data, [](char c) noexcept { + return (static_cast(c) & 0x80U) == 0; + }); + } + + const_varint_iterator() noexcept = default; + + const_varint_iterator(const char* data, const char* end) noexcept : + m_data{data}, + m_end{end} { + } + + const_varint_iterator(const const_varint_iterator&) noexcept = default; + const_varint_iterator(const_varint_iterator&&) noexcept = default; + + const_varint_iterator& operator=(const const_varint_iterator&) noexcept = default; + const_varint_iterator& operator=(const_varint_iterator&&) noexcept = default; + + ~const_varint_iterator() noexcept = default; + + value_type operator*() const { + protozero_assert(m_data); + const char* d = m_data; // will be thrown away + return static_cast(decode_varint(&d, m_end)); + } + + const_varint_iterator& operator++() { + protozero_assert(m_data); + skip_varint(&m_data, m_end); + return *this; + } + + const_varint_iterator operator++(int) { + protozero_assert(m_data); + const const_varint_iterator tmp{*this}; + ++(*this); + return tmp; + } + + bool operator==(const const_varint_iterator& rhs) const noexcept { + return m_data == rhs.m_data && m_end == rhs.m_end; + } + + bool operator!=(const const_varint_iterator& rhs) const noexcept { + return !(*this == rhs); + } + + /// @endcond + +}; // class const_varint_iterator + +/** + * A forward iterator used for accessing packed repeated svarint fields + * (sint32, sint64). + */ +template +class const_svarint_iterator : public const_varint_iterator { + +public: + + /// @cond usual iterator functions not documented + + using iterator_category = std::forward_iterator_tag; + using value_type = T; + using difference_type = std::ptrdiff_t; + using pointer = value_type*; + using reference = value_type&; + + const_svarint_iterator() noexcept : + const_varint_iterator{} { + } + + const_svarint_iterator(const char* data, const char* end) noexcept : + const_varint_iterator{data, end} { + } + + const_svarint_iterator(const const_svarint_iterator&) = default; + const_svarint_iterator(const_svarint_iterator&&) noexcept = default; + + const_svarint_iterator& operator=(const const_svarint_iterator&) = default; + const_svarint_iterator& operator=(const_svarint_iterator&&) noexcept = default; + + ~const_svarint_iterator() = default; + + value_type operator*() const { + protozero_assert(this->m_data); + const char* d = this->m_data; // will be thrown away + return static_cast(decode_zigzag64(decode_varint(&d, this->m_end))); + } + + const_svarint_iterator& operator++() { + protozero_assert(this->m_data); + skip_varint(&this->m_data, this->m_end); + return *this; + } + + const_svarint_iterator operator++(int) { + protozero_assert(this->m_data); + const const_svarint_iterator tmp{*this}; + ++(*this); + return tmp; + } + + /// @endcond + +}; // class const_svarint_iterator + +} // end namespace protozero + +namespace std { + + // Specialize std::distance for all the protozero iterators. Because + // functions can't be partially specialized, we have to do this for + // every value_type we are using. + + /// @cond individual overloads do not need to be documented + + template <> + inline typename protozero::const_varint_iterator::difference_type + distance>(protozero::const_varint_iterator first, // NOLINT(readability-inconsistent-declaration-parameter-name) + protozero::const_varint_iterator last) { + return protozero::const_varint_iterator::distance(first, last); + } + + template <> + inline typename protozero::const_varint_iterator::difference_type + distance>(protozero::const_varint_iterator first, // NOLINT(readability-inconsistent-declaration-parameter-name) + protozero::const_varint_iterator last) { + return protozero::const_varint_iterator::distance(first, last); + } + + template <> + inline typename protozero::const_varint_iterator::difference_type + distance>(protozero::const_varint_iterator first, // NOLINT(readability-inconsistent-declaration-parameter-name) + protozero::const_varint_iterator last) { + return protozero::const_varint_iterator::distance(first, last); + } + + template <> + inline typename protozero::const_varint_iterator::difference_type + distance>(protozero::const_varint_iterator first, // NOLINT(readability-inconsistent-declaration-parameter-name) + protozero::const_varint_iterator last) { + return protozero::const_varint_iterator::distance(first, last); + } + + template <> + inline typename protozero::const_svarint_iterator::difference_type + distance>(protozero::const_svarint_iterator first, // NOLINT(readability-inconsistent-declaration-parameter-name) + protozero::const_svarint_iterator last) { + return protozero::const_svarint_iterator::distance(first, last); + } + + template <> + inline typename protozero::const_svarint_iterator::difference_type + distance>(protozero::const_svarint_iterator first, // NOLINT(readability-inconsistent-declaration-parameter-name) + protozero::const_svarint_iterator last) { + return protozero::const_svarint_iterator::distance(first, last); + } + + /// @endcond + +} // end namespace std + +#endif // PROTOZERO_ITERATORS_HPP diff --git a/contrib/protozero/1.7.0/include/protozero/pbf_builder.hpp b/contrib/protozero/1.7.0/include/protozero/pbf_builder.hpp new file mode 100644 index 0000000..71a2dec --- /dev/null +++ b/contrib/protozero/1.7.0/include/protozero/pbf_builder.hpp @@ -0,0 +1,32 @@ +#ifndef PROTOZERO_PBF_BUILDER_HPP +#define PROTOZERO_PBF_BUILDER_HPP + +/***************************************************************************** + +protozero - Minimalistic protocol buffer decoder and encoder in C++. + +This file is from https://github.com/mapbox/protozero where you can find more +documentation. + +*****************************************************************************/ + +/** + * @file pbf_builder.hpp + * + * @brief Contains the pbf_builder template class. + */ + +#include "basic_pbf_builder.hpp" +#include "pbf_writer.hpp" + +#include + +namespace protozero { + +/// Specialization of basic_pbf_builder using std::string as buffer type. +template +using pbf_builder = basic_pbf_builder; + +} // end namespace protozero + +#endif // PROTOZERO_PBF_BUILDER_HPP diff --git a/contrib/protozero/1.7.0/include/protozero/pbf_message.hpp b/contrib/protozero/1.7.0/include/protozero/pbf_message.hpp new file mode 100644 index 0000000..d7fd8b5 --- /dev/null +++ b/contrib/protozero/1.7.0/include/protozero/pbf_message.hpp @@ -0,0 +1,184 @@ +#ifndef PROTOZERO_PBF_MESSAGE_HPP +#define PROTOZERO_PBF_MESSAGE_HPP + +/***************************************************************************** + +protozero - Minimalistic protocol buffer decoder and encoder in C++. + +This file is from https://github.com/mapbox/protozero where you can find more +documentation. + +*****************************************************************************/ + +/** + * @file pbf_message.hpp + * + * @brief Contains the pbf_message template class. + */ + +#include "pbf_reader.hpp" +#include "types.hpp" + +#include + +namespace protozero { + +/** + * This class represents a protobuf message. Either a top-level message or + * a nested sub-message. Top-level messages can be created from any buffer + * with a pointer and length: + * + * @code + * enum class Message : protozero::pbf_tag_type { + * ... + * }; + * + * std::string buffer; + * // fill buffer... + * pbf_message message{buffer.data(), buffer.size()}; + * @endcode + * + * Sub-messages are created using get_message(): + * + * @code + * enum class SubMessage : protozero::pbf_tag_type { + * ... + * }; + * + * pbf_message message{...}; + * message.next(); + * pbf_message submessage = message.get_message(); + * @endcode + * + * All methods of the pbf_message class except get_bytes() and get_string() + * provide the strong exception guarantee, ie they either succeed or do not + * change the pbf_message object they are called on. Use the get_data() method + * instead of get_bytes() or get_string(), if you need this guarantee. + * + * This template class is based on the pbf_reader class and has all the same + * methods. The difference is that whereever the pbf_reader class takes an + * integer tag, this template class takes a tag of the template type T. + * + * Read the tutorial to understand how this class is used. + */ +template +class pbf_message : public pbf_reader { + + static_assert(std::is_same::type>::value, + "T must be enum with underlying type protozero::pbf_tag_type"); + +public: + + /// The type of messages this class will read. + using enum_type = T; + + /** + * Construct a pbf_message. All arguments are forwarded to the pbf_reader + * parent class. + */ + template + pbf_message(Args&&... args) noexcept : // NOLINT(google-explicit-constructor, hicpp-explicit-conversions) + pbf_reader{std::forward(args)...} { + } + + /** + * Set next field in the message as the current field. This is usually + * called in a while loop: + * + * @code + * pbf_message<...> message(...); + * while (message.next()) { + * // handle field + * } + * @endcode + * + * @returns `true` if there is a next field, `false` if not. + * @pre There must be no current field. + * @post If it returns `true` there is a current field now. + */ + bool next() { + return pbf_reader::next(); + } + + /** + * Set next field with given tag in the message as the current field. + * Fields with other tags are skipped. This is usually called in a while + * loop for repeated fields: + * + * @code + * pbf_message message{...}; + * while (message.next(Example1::repeated_fixed64_r)) { + * // handle field + * } + * @endcode + * + * or you can call it just once to get the one field with this tag: + * + * @code + * pbf_message message{...}; + * if (message.next(Example1::required_uint32_x)) { + * // handle field + * } + * @endcode + * + * Note that this will not check the wire type. The two-argument version + * of this function will also check the wire type. + * + * @returns `true` if there is a next field with this tag. + * @pre There must be no current field. + * @post If it returns `true` there is a current field now with the given tag. + */ + bool next(T next_tag) { + return pbf_reader::next(pbf_tag_type(next_tag)); + } + + /** + * Set next field with given tag and wire type in the message as the + * current field. Fields with other tags are skipped. This is usually + * called in a while loop for repeated fields: + * + * @code + * pbf_message message{...}; + * while (message.next(Example1::repeated_fixed64_r, pbf_wire_type::varint)) { + * // handle field + * } + * @endcode + * + * or you can call it just once to get the one field with this tag: + * + * @code + * pbf_message message{...}; + * if (message.next(Example1::required_uint32_x, pbf_wire_type::varint)) { + * // handle field + * } + * @endcode + * + * Note that this will also check the wire type. The one-argument version + * of this function will not check the wire type. + * + * @returns `true` if there is a next field with this tag. + * @pre There must be no current field. + * @post If it returns `true` there is a current field now with the given tag. + */ + bool next(T next_tag, pbf_wire_type type) { + return pbf_reader::next(pbf_tag_type(next_tag), type); + } + + /** + * The tag of the current field. The tag is the enum value for the field + * number from the description in the .proto file. + * + * Call next() before calling this function to set the current field. + * + * @returns tag of the current field. + * @pre There must be a current field (ie. next() must have returned `true`). + */ + T tag() const noexcept { + return T(pbf_reader::tag()); + } + +}; // class pbf_message + +} // end namespace protozero + +#endif // PROTOZERO_PBF_MESSAGE_HPP diff --git a/contrib/protozero/1.7.0/include/protozero/pbf_reader.hpp b/contrib/protozero/1.7.0/include/protozero/pbf_reader.hpp new file mode 100644 index 0000000..92bfdee --- /dev/null +++ b/contrib/protozero/1.7.0/include/protozero/pbf_reader.hpp @@ -0,0 +1,977 @@ +#ifndef PROTOZERO_PBF_READER_HPP +#define PROTOZERO_PBF_READER_HPP + +/***************************************************************************** + +protozero - Minimalistic protocol buffer decoder and encoder in C++. + +This file is from https://github.com/mapbox/protozero where you can find more +documentation. + +*****************************************************************************/ + +/** + * @file pbf_reader.hpp + * + * @brief Contains the pbf_reader class. + */ + +#include "config.hpp" +#include "data_view.hpp" +#include "exception.hpp" +#include "iterators.hpp" +#include "types.hpp" +#include "varint.hpp" + +#if PROTOZERO_BYTE_ORDER != PROTOZERO_LITTLE_ENDIAN +# include +#endif + +#include +#include +#include +#include +#include + +namespace protozero { + +/** + * This class represents a protobuf message. Either a top-level message or + * a nested sub-message. Top-level messages can be created from any buffer + * with a pointer and length: + * + * @code + * std::string buffer; + * // fill buffer... + * pbf_reader message{buffer.data(), buffer.size()}; + * @endcode + * + * Sub-messages are created using get_message(): + * + * @code + * pbf_reader message{...}; + * message.next(); + * pbf_reader submessage = message.get_message(); + * @endcode + * + * All methods of the pbf_reader class except get_bytes() and get_string() + * provide the strong exception guarantee, ie they either succeed or do not + * change the pbf_reader object they are called on. Use the get_view() method + * instead of get_bytes() or get_string(), if you need this guarantee. + */ +class pbf_reader { + + // A pointer to the next unread data. + const char* m_data = nullptr; + + // A pointer to one past the end of data. + const char* m_end = nullptr; + + // The wire type of the current field. + pbf_wire_type m_wire_type = pbf_wire_type::unknown; + + // The tag of the current field. + pbf_tag_type m_tag = 0; + + template + T get_fixed() { + T result; + const char* data = m_data; + skip_bytes(sizeof(T)); + std::memcpy(&result, data, sizeof(T)); +#if PROTOZERO_BYTE_ORDER != PROTOZERO_LITTLE_ENDIAN + byteswap_inplace(&result); +#endif + return result; + } + + template + iterator_range> packed_fixed() { + protozero_assert(tag() != 0 && "call next() before accessing field value"); + const auto len = get_len_and_skip(); + if (len % sizeof(T) != 0) { + throw invalid_length_exception{}; + } + return {const_fixed_iterator(m_data - len), + const_fixed_iterator(m_data)}; + } + + template + T get_varint() { + const auto val = static_cast(decode_varint(&m_data, m_end)); + return val; + } + + template + T get_svarint() { + protozero_assert((has_wire_type(pbf_wire_type::varint) || has_wire_type(pbf_wire_type::length_delimited)) && "not a varint"); + return static_cast(decode_zigzag64(decode_varint(&m_data, m_end))); + } + + pbf_length_type get_length() { + return get_varint(); + } + + void skip_bytes(pbf_length_type len) { + if (m_end - m_data < static_cast(len)) { + throw end_of_buffer_exception{}; + } + m_data += len; + +#ifndef NDEBUG + // In debug builds reset the tag to zero so that we can detect (some) + // wrong code. + m_tag = 0; +#endif + } + + pbf_length_type get_len_and_skip() { + const auto len = get_length(); + skip_bytes(len); + return len; + } + + template + iterator_range get_packed() { + protozero_assert(tag() != 0 && "call next() before accessing field value"); + const auto len = get_len_and_skip(); + return {T{m_data - len, m_data}, + T{m_data, m_data}}; + } + +public: + + /** + * Construct a pbf_reader message from a data_view. The pointer from the + * data_view will be stored inside the pbf_reader object, no data is + * copied. So you must make sure the view stays valid as long as the + * pbf_reader object is used. + * + * The buffer must contain a complete protobuf message. + * + * @post There is no current field. + */ + explicit pbf_reader(const data_view& view) noexcept + : m_data{view.data()}, + m_end{view.data() + view.size()} { + } + + /** + * Construct a pbf_reader message from a data pointer and a length. The + * pointer will be stored inside the pbf_reader object, no data is copied. + * So you must make sure the buffer stays valid as long as the pbf_reader + * object is used. + * + * The buffer must contain a complete protobuf message. + * + * @post There is no current field. + */ + pbf_reader(const char* data, std::size_t size) noexcept + : m_data{data}, + m_end{data + size} { + } + +#ifndef PROTOZERO_STRICT_API + /** + * Construct a pbf_reader message from a data pointer and a length. The + * pointer will be stored inside the pbf_reader object, no data is copied. + * So you must make sure the buffer stays valid as long as the pbf_reader + * object is used. + * + * The buffer must contain a complete protobuf message. + * + * @post There is no current field. + * @deprecated Use one of the other constructors. + */ + explicit pbf_reader(const std::pair& data) noexcept + : m_data{data.first}, + m_end{data.first + data.second} { + } +#endif + + /** + * Construct a pbf_reader message from a std::string. A pointer to the + * string internals will be stored inside the pbf_reader object, no data + * is copied. So you must make sure the string is unchanged as long as the + * pbf_reader object is used. + * + * The string must contain a complete protobuf message. + * + * @post There is no current field. + */ + explicit pbf_reader(const std::string& data) noexcept + : m_data{data.data()}, + m_end{data.data() + data.size()} { + } + + /** + * pbf_reader can be default constructed and behaves like it has an empty + * buffer. + */ + pbf_reader() noexcept = default; + + /// pbf_reader messages can be copied trivially. + pbf_reader(const pbf_reader&) noexcept = default; + + /// pbf_reader messages can be moved trivially. + pbf_reader(pbf_reader&&) noexcept = default; + + /// pbf_reader messages can be copied trivially. + pbf_reader& operator=(const pbf_reader& other) noexcept = default; + + /// pbf_reader messages can be moved trivially. + pbf_reader& operator=(pbf_reader&& other) noexcept = default; + + ~pbf_reader() = default; + + /** + * Swap the contents of this object with the other. + * + * @param other Other object to swap data with. + */ + void swap(pbf_reader& other) noexcept { + using std::swap; + swap(m_data, other.m_data); + swap(m_end, other.m_end); + swap(m_wire_type, other.m_wire_type); + swap(m_tag, other.m_tag); + } + + /** + * In a boolean context the pbf_reader class evaluates to `true` if there + * are still fields available and to `false` if the last field has been + * read. + */ + operator bool() const noexcept { // NOLINT(google-explicit-constructor, hicpp-explicit-conversions) + return m_data != m_end; + } + + /** + * Get a view of the not yet read data. + */ + data_view data() const noexcept { + return {m_data, static_cast(m_end - m_data)}; + } + + /** + * Return the length in bytes of the current message. If you have + * already called next() and/or any of the get_*() functions, this will + * return the remaining length. + * + * This can, for instance, be used to estimate the space needed for a + * buffer. Of course you have to know reasonably well what data to expect + * and how it is encoded for this number to have any meaning. + */ + std::size_t length() const noexcept { + return std::size_t(m_end - m_data); + } + + /** + * Set next field in the message as the current field. This is usually + * called in a while loop: + * + * @code + * pbf_reader message(...); + * while (message.next()) { + * // handle field + * } + * @endcode + * + * @returns `true` if there is a next field, `false` if not. + * @pre There must be no current field. + * @post If it returns `true` there is a current field now. + */ + bool next() { + if (m_data == m_end) { + return false; + } + + const auto value = get_varint(); + m_tag = pbf_tag_type(value >> 3U); + + // tags 0 and 19000 to 19999 are not allowed as per + // https://developers.google.com/protocol-buffers/docs/proto#assigning-tags + if (m_tag == 0 || (m_tag >= 19000 && m_tag <= 19999)) { + throw invalid_tag_exception{}; + } + + m_wire_type = pbf_wire_type(value & 0x07U); + switch (m_wire_type) { + case pbf_wire_type::varint: + case pbf_wire_type::fixed64: + case pbf_wire_type::length_delimited: + case pbf_wire_type::fixed32: + break; + default: + throw unknown_pbf_wire_type_exception{}; + } + + return true; + } + + /** + * Set next field with given tag in the message as the current field. + * Fields with other tags are skipped. This is usually called in a while + * loop for repeated fields: + * + * @code + * pbf_reader message{...}; + * while (message.next(17)) { + * // handle field + * } + * @endcode + * + * or you can call it just once to get the one field with this tag: + * + * @code + * pbf_reader message{...}; + * if (message.next(17)) { + * // handle field + * } + * @endcode + * + * Note that this will not check the wire type. The two-argument version + * of this function will also check the wire type. + * + * @returns `true` if there is a next field with this tag. + * @pre There must be no current field. + * @post If it returns `true` there is a current field now with the given tag. + */ + bool next(pbf_tag_type next_tag) { + while (next()) { + if (m_tag == next_tag) { + return true; + } + skip(); + } + return false; + } + + /** + * Set next field with given tag and wire type in the message as the + * current field. Fields with other tags are skipped. This is usually + * called in a while loop for repeated fields: + * + * @code + * pbf_reader message{...}; + * while (message.next(17, pbf_wire_type::varint)) { + * // handle field + * } + * @endcode + * + * or you can call it just once to get the one field with this tag: + * + * @code + * pbf_reader message{...}; + * if (message.next(17, pbf_wire_type::varint)) { + * // handle field + * } + * @endcode + * + * Note that this will also check the wire type. The one-argument version + * of this function will not check the wire type. + * + * @returns `true` if there is a next field with this tag. + * @pre There must be no current field. + * @post If it returns `true` there is a current field now with the given tag. + */ + bool next(pbf_tag_type next_tag, pbf_wire_type type) { + while (next()) { + if (m_tag == next_tag && m_wire_type == type) { + return true; + } + skip(); + } + return false; + } + + /** + * The tag of the current field. The tag is the field number from the + * description in the .proto file. + * + * Call next() before calling this function to set the current field. + * + * @returns tag of the current field. + * @pre There must be a current field (ie. next() must have returned `true`). + */ + pbf_tag_type tag() const noexcept { + return m_tag; + } + + /** + * Get the wire type of the current field. The wire types are: + * + * * 0 - varint + * * 1 - 64 bit + * * 2 - length-delimited + * * 5 - 32 bit + * + * All other types are illegal. + * + * Call next() before calling this function to set the current field. + * + * @returns wire type of the current field. + * @pre There must be a current field (ie. next() must have returned `true`). + */ + pbf_wire_type wire_type() const noexcept { + return m_wire_type; + } + + /** + * Get the tag and wire type of the current field in one integer suitable + * for comparison with a switch statement. + * + * Use it like this: + * + * @code + * pbf_reader message{...}; + * while (message.next()) { + * switch (message.tag_and_type()) { + * case tag_and_type(17, pbf_wire_type::length_delimited): + * .... + * break; + * case tag_and_type(21, pbf_wire_type::varint): + * .... + * break; + * default: + * message.skip(); + * } + * } + * @endcode + */ + uint32_t tag_and_type() const noexcept { + return protozero::tag_and_type(tag(), wire_type()); + } + + /** + * Check the wire type of the current field. + * + * @returns `true` if the current field has the given wire type. + * @pre There must be a current field (ie. next() must have returned `true`). + */ + bool has_wire_type(pbf_wire_type type) const noexcept { + return wire_type() == type; + } + + /** + * Consume the current field. + * + * @pre There must be a current field (ie. next() must have returned `true`). + * @post The current field was consumed and there is no current field now. + */ + void skip() { + protozero_assert(tag() != 0 && "call next() before calling skip()"); + switch (wire_type()) { + case pbf_wire_type::varint: + skip_varint(&m_data, m_end); + break; + case pbf_wire_type::fixed64: + skip_bytes(8); + break; + case pbf_wire_type::length_delimited: + skip_bytes(get_length()); + break; + case pbf_wire_type::fixed32: + skip_bytes(4); + break; + default: + break; + } + } + + ///@{ + /** + * @name Scalar field accessor functions + */ + + /** + * Consume and return value of current "bool" field. + * + * @pre There must be a current field (ie. next() must have returned `true`). + * @pre The current field must be of type "bool". + * @post The current field was consumed and there is no current field now. + */ + bool get_bool() { + protozero_assert(tag() != 0 && "call next() before accessing field value"); + protozero_assert(has_wire_type(pbf_wire_type::varint) && "not a varint"); + const bool result = m_data[0] != 0; + skip_varint(&m_data, m_end); + return result; + } + + /** + * Consume and return value of current "enum" field. + * + * @pre There must be a current field (ie. next() must have returned `true`). + * @pre The current field must be of type "enum". + * @post The current field was consumed and there is no current field now. + */ + int32_t get_enum() { + protozero_assert(has_wire_type(pbf_wire_type::varint) && "not a varint"); + return get_varint(); + } + + /** + * Consume and return value of current "int32" varint field. + * + * @pre There must be a current field (ie. next() must have returned `true`). + * @pre The current field must be of type "int32". + * @post The current field was consumed and there is no current field now. + */ + int32_t get_int32() { + protozero_assert(has_wire_type(pbf_wire_type::varint) && "not a varint"); + return get_varint(); + } + + /** + * Consume and return value of current "sint32" varint field. + * + * @pre There must be a current field (ie. next() must have returned `true`). + * @pre The current field must be of type "sint32". + * @post The current field was consumed and there is no current field now. + */ + int32_t get_sint32() { + protozero_assert(has_wire_type(pbf_wire_type::varint) && "not a varint"); + return get_svarint(); + } + + /** + * Consume and return value of current "uint32" varint field. + * + * @pre There must be a current field (ie. next() must have returned `true`). + * @pre The current field must be of type "uint32". + * @post The current field was consumed and there is no current field now. + */ + uint32_t get_uint32() { + protozero_assert(has_wire_type(pbf_wire_type::varint) && "not a varint"); + return get_varint(); + } + + /** + * Consume and return value of current "int64" varint field. + * + * @pre There must be a current field (ie. next() must have returned `true`). + * @pre The current field must be of type "int64". + * @post The current field was consumed and there is no current field now. + */ + int64_t get_int64() { + protozero_assert(has_wire_type(pbf_wire_type::varint) && "not a varint"); + return get_varint(); + } + + /** + * Consume and return value of current "sint64" varint field. + * + * @pre There must be a current field (ie. next() must have returned `true`). + * @pre The current field must be of type "sint64". + * @post The current field was consumed and there is no current field now. + */ + int64_t get_sint64() { + protozero_assert(has_wire_type(pbf_wire_type::varint) && "not a varint"); + return get_svarint(); + } + + /** + * Consume and return value of current "uint64" varint field. + * + * @pre There must be a current field (ie. next() must have returned `true`). + * @pre The current field must be of type "uint64". + * @post The current field was consumed and there is no current field now. + */ + uint64_t get_uint64() { + protozero_assert(has_wire_type(pbf_wire_type::varint) && "not a varint"); + return get_varint(); + } + + /** + * Consume and return value of current "fixed32" field. + * + * @pre There must be a current field (ie. next() must have returned `true`). + * @pre The current field must be of type "fixed32". + * @post The current field was consumed and there is no current field now. + */ + uint32_t get_fixed32() { + protozero_assert(tag() != 0 && "call next() before accessing field value"); + protozero_assert(has_wire_type(pbf_wire_type::fixed32) && "not a 32-bit fixed"); + return get_fixed(); + } + + /** + * Consume and return value of current "sfixed32" field. + * + * @pre There must be a current field (ie. next() must have returned `true`). + * @pre The current field must be of type "sfixed32". + * @post The current field was consumed and there is no current field now. + */ + int32_t get_sfixed32() { + protozero_assert(tag() != 0 && "call next() before accessing field value"); + protozero_assert(has_wire_type(pbf_wire_type::fixed32) && "not a 32-bit fixed"); + return get_fixed(); + } + + /** + * Consume and return value of current "fixed64" field. + * + * @pre There must be a current field (ie. next() must have returned `true`). + * @pre The current field must be of type "fixed64". + * @post The current field was consumed and there is no current field now. + */ + uint64_t get_fixed64() { + protozero_assert(tag() != 0 && "call next() before accessing field value"); + protozero_assert(has_wire_type(pbf_wire_type::fixed64) && "not a 64-bit fixed"); + return get_fixed(); + } + + /** + * Consume and return value of current "sfixed64" field. + * + * @pre There must be a current field (ie. next() must have returned `true`). + * @pre The current field must be of type "sfixed64". + * @post The current field was consumed and there is no current field now. + */ + int64_t get_sfixed64() { + protozero_assert(tag() != 0 && "call next() before accessing field value"); + protozero_assert(has_wire_type(pbf_wire_type::fixed64) && "not a 64-bit fixed"); + return get_fixed(); + } + + /** + * Consume and return value of current "float" field. + * + * @pre There must be a current field (ie. next() must have returned `true`). + * @pre The current field must be of type "float". + * @post The current field was consumed and there is no current field now. + */ + float get_float() { + protozero_assert(tag() != 0 && "call next() before accessing field value"); + protozero_assert(has_wire_type(pbf_wire_type::fixed32) && "not a 32-bit fixed"); + return get_fixed(); + } + + /** + * Consume and return value of current "double" field. + * + * @pre There must be a current field (ie. next() must have returned `true`). + * @pre The current field must be of type "double". + * @post The current field was consumed and there is no current field now. + */ + double get_double() { + protozero_assert(tag() != 0 && "call next() before accessing field value"); + protozero_assert(has_wire_type(pbf_wire_type::fixed64) && "not a 64-bit fixed"); + return get_fixed(); + } + + /** + * Consume and return value of current "bytes", "string", or "message" + * field. + * + * @returns A data_view object. + * @pre There must be a current field (ie. next() must have returned `true`). + * @pre The current field must be of type "bytes", "string", or "message". + * @post The current field was consumed and there is no current field now. + */ + data_view get_view() { + protozero_assert(tag() != 0 && "call next() before accessing field value"); + protozero_assert(has_wire_type(pbf_wire_type::length_delimited) && "not of type string, bytes or message"); + const auto len = get_len_and_skip(); + return {m_data - len, len}; + } + +#ifndef PROTOZERO_STRICT_API + /** + * Consume and return value of current "bytes" or "string" field. + * + * @returns A pair with a pointer to the data and the length of the data. + * @pre There must be a current field (ie. next() must have returned `true`). + * @pre The current field must be of type "bytes" or "string". + * @post The current field was consumed and there is no current field now. + */ + std::pair get_data() { + protozero_assert(tag() != 0 && "call next() before accessing field value"); + protozero_assert(has_wire_type(pbf_wire_type::length_delimited) && "not of type string, bytes or message"); + const auto len = get_len_and_skip(); + return {m_data - len, len}; + } +#endif + + /** + * Consume and return value of current "bytes" field. + * + * @pre There must be a current field (ie. next() must have returned `true`). + * @pre The current field must be of type "bytes". + * @post The current field was consumed and there is no current field now. + */ + std::string get_bytes() { + return std::string(get_view()); + } + + /** + * Consume and return value of current "string" field. + * + * @pre There must be a current field (ie. next() must have returned `true`). + * @pre The current field must be of type "string". + * @post The current field was consumed and there is no current field now. + */ + std::string get_string() { + return std::string(get_view()); + } + + /** + * Consume and return value of current "message" field. + * + * @pre There must be a current field (ie. next() must have returned `true`). + * @pre The current field must be of type "message". + * @post The current field was consumed and there is no current field now. + */ + pbf_reader get_message() { + return pbf_reader{get_view()}; + } + + ///@} + + /// Forward iterator for iterating over bool (int32 varint) values. + using const_bool_iterator = const_varint_iterator< int32_t>; + + /// Forward iterator for iterating over enum (int32 varint) values. + using const_enum_iterator = const_varint_iterator< int32_t>; + + /// Forward iterator for iterating over int32 (varint) values. + using const_int32_iterator = const_varint_iterator< int32_t>; + + /// Forward iterator for iterating over sint32 (varint) values. + using const_sint32_iterator = const_svarint_iterator; + + /// Forward iterator for iterating over uint32 (varint) values. + using const_uint32_iterator = const_varint_iterator; + + /// Forward iterator for iterating over int64 (varint) values. + using const_int64_iterator = const_varint_iterator< int64_t>; + + /// Forward iterator for iterating over sint64 (varint) values. + using const_sint64_iterator = const_svarint_iterator; + + /// Forward iterator for iterating over uint64 (varint) values. + using const_uint64_iterator = const_varint_iterator; + + /// Forward iterator for iterating over fixed32 values. + using const_fixed32_iterator = const_fixed_iterator; + + /// Forward iterator for iterating over sfixed32 values. + using const_sfixed32_iterator = const_fixed_iterator; + + /// Forward iterator for iterating over fixed64 values. + using const_fixed64_iterator = const_fixed_iterator; + + /// Forward iterator for iterating over sfixed64 values. + using const_sfixed64_iterator = const_fixed_iterator; + + /// Forward iterator for iterating over float values. + using const_float_iterator = const_fixed_iterator; + + /// Forward iterator for iterating over double values. + using const_double_iterator = const_fixed_iterator; + + ///@{ + /** + * @name Repeated packed field accessor functions + */ + + /** + * Consume current "repeated packed bool" field. + * + * @returns a pair of iterators to the beginning and one past the end of + * the data. + * @pre There must be a current field (ie. next() must have returned `true`). + * @pre The current field must be of type "repeated packed bool". + * @post The current field was consumed and there is no current field now. + */ + iterator_range get_packed_bool() { + return get_packed(); + } + + /** + * Consume current "repeated packed enum" field. + * + * @returns a pair of iterators to the beginning and one past the end of + * the data. + * @pre There must be a current field (ie. next() must have returned `true`). + * @pre The current field must be of type "repeated packed enum". + * @post The current field was consumed and there is no current field now. + */ + iterator_range get_packed_enum() { + return get_packed(); + } + + /** + * Consume current "repeated packed int32" field. + * + * @returns a pair of iterators to the beginning and one past the end of + * the data. + * @pre There must be a current field (ie. next() must have returned `true`). + * @pre The current field must be of type "repeated packed int32". + * @post The current field was consumed and there is no current field now. + */ + iterator_range get_packed_int32() { + return get_packed(); + } + + /** + * Consume current "repeated packed sint32" field. + * + * @returns a pair of iterators to the beginning and one past the end of + * the data. + * @pre There must be a current field (ie. next() must have returned `true`). + * @pre The current field must be of type "repeated packed sint32". + * @post The current field was consumed and there is no current field now. + */ + iterator_range get_packed_sint32() { + return get_packed(); + } + + /** + * Consume current "repeated packed uint32" field. + * + * @returns a pair of iterators to the beginning and one past the end of + * the data. + * @pre There must be a current field (ie. next() must have returned `true`). + * @pre The current field must be of type "repeated packed uint32". + * @post The current field was consumed and there is no current field now. + */ + iterator_range get_packed_uint32() { + return get_packed(); + } + + /** + * Consume current "repeated packed int64" field. + * + * @returns a pair of iterators to the beginning and one past the end of + * the data. + * @pre There must be a current field (ie. next() must have returned `true`). + * @pre The current field must be of type "repeated packed int64". + * @post The current field was consumed and there is no current field now. + */ + iterator_range get_packed_int64() { + return get_packed(); + } + + /** + * Consume current "repeated packed sint64" field. + * + * @returns a pair of iterators to the beginning and one past the end of + * the data. + * @pre There must be a current field (ie. next() must have returned `true`). + * @pre The current field must be of type "repeated packed sint64". + * @post The current field was consumed and there is no current field now. + */ + iterator_range get_packed_sint64() { + return get_packed(); + } + + /** + * Consume current "repeated packed uint64" field. + * + * @returns a pair of iterators to the beginning and one past the end of + * the data. + * @pre There must be a current field (ie. next() must have returned `true`). + * @pre The current field must be of type "repeated packed uint64". + * @post The current field was consumed and there is no current field now. + */ + iterator_range get_packed_uint64() { + return get_packed(); + } + + /** + * Consume current "repeated packed fixed32" field. + * + * @returns a pair of iterators to the beginning and one past the end of + * the data. + * @pre There must be a current field (ie. next() must have returned `true`). + * @pre The current field must be of type "repeated packed fixed32". + * @post The current field was consumed and there is no current field now. + */ + iterator_range get_packed_fixed32() { + return packed_fixed(); + } + + /** + * Consume current "repeated packed sfixed32" field. + * + * @returns a pair of iterators to the beginning and one past the end of + * the data. + * @pre There must be a current field (ie. next() must have returned `true`). + * @pre The current field must be of type "repeated packed sfixed32". + * @post The current field was consumed and there is no current field now. + */ + iterator_range get_packed_sfixed32() { + return packed_fixed(); + } + + /** + * Consume current "repeated packed fixed64" field. + * + * @returns a pair of iterators to the beginning and one past the end of + * the data. + * @pre There must be a current field (ie. next() must have returned `true`). + * @pre The current field must be of type "repeated packed fixed64". + * @post The current field was consumed and there is no current field now. + */ + iterator_range get_packed_fixed64() { + return packed_fixed(); + } + + /** + * Consume current "repeated packed sfixed64" field. + * + * @returns a pair of iterators to the beginning and one past the end of + * the data. + * @pre There must be a current field (ie. next() must have returned `true`). + * @pre The current field must be of type "repeated packed sfixed64". + * @post The current field was consumed and there is no current field now. + */ + iterator_range get_packed_sfixed64() { + return packed_fixed(); + } + + /** + * Consume current "repeated packed float" field. + * + * @returns a pair of iterators to the beginning and one past the end of + * the data. + * @pre There must be a current field (ie. next() must have returned `true`). + * @pre The current field must be of type "repeated packed float". + * @post The current field was consumed and there is no current field now. + */ + iterator_range get_packed_float() { + return packed_fixed(); + } + + /** + * Consume current "repeated packed double" field. + * + * @returns a pair of iterators to the beginning and one past the end of + * the data. + * @pre There must be a current field (ie. next() must have returned `true`). + * @pre The current field must be of type "repeated packed double". + * @post The current field was consumed and there is no current field now. + */ + iterator_range get_packed_double() { + return packed_fixed(); + } + + ///@} + +}; // class pbf_reader + +/** + * Swap two pbf_reader objects. + * + * @param lhs First object. + * @param rhs Second object. + */ +inline void swap(pbf_reader& lhs, pbf_reader& rhs) noexcept { + lhs.swap(rhs); +} + +} // end namespace protozero + +#endif // PROTOZERO_PBF_READER_HPP diff --git a/contrib/protozero/1.7.0/include/protozero/pbf_writer.hpp b/contrib/protozero/1.7.0/include/protozero/pbf_writer.hpp new file mode 100644 index 0000000..9a07bd5 --- /dev/null +++ b/contrib/protozero/1.7.0/include/protozero/pbf_writer.hpp @@ -0,0 +1,76 @@ +#ifndef PROTOZERO_PBF_WRITER_HPP +#define PROTOZERO_PBF_WRITER_HPP + +/***************************************************************************** + +protozero - Minimalistic protocol buffer decoder and encoder in C++. + +This file is from https://github.com/mapbox/protozero where you can find more +documentation. + +*****************************************************************************/ + +/** + * @file pbf_writer.hpp + * + * @brief Contains the pbf_writer class. + */ + +#include "basic_pbf_writer.hpp" +#include "buffer_string.hpp" + +#include +#include + +namespace protozero { + +/** + * Specialization of basic_pbf_writer using std::string as buffer type. + */ +using pbf_writer = basic_pbf_writer; + +/// Class for generating packed repeated bool fields. +using packed_field_bool = detail::packed_field_varint; + +/// Class for generating packed repeated enum fields. +using packed_field_enum = detail::packed_field_varint; + +/// Class for generating packed repeated int32 fields. +using packed_field_int32 = detail::packed_field_varint; + +/// Class for generating packed repeated sint32 fields. +using packed_field_sint32 = detail::packed_field_svarint; + +/// Class for generating packed repeated uint32 fields. +using packed_field_uint32 = detail::packed_field_varint; + +/// Class for generating packed repeated int64 fields. +using packed_field_int64 = detail::packed_field_varint; + +/// Class for generating packed repeated sint64 fields. +using packed_field_sint64 = detail::packed_field_svarint; + +/// Class for generating packed repeated uint64 fields. +using packed_field_uint64 = detail::packed_field_varint; + +/// Class for generating packed repeated fixed32 fields. +using packed_field_fixed32 = detail::packed_field_fixed; + +/// Class for generating packed repeated sfixed32 fields. +using packed_field_sfixed32 = detail::packed_field_fixed; + +/// Class for generating packed repeated fixed64 fields. +using packed_field_fixed64 = detail::packed_field_fixed; + +/// Class for generating packed repeated sfixed64 fields. +using packed_field_sfixed64 = detail::packed_field_fixed; + +/// Class for generating packed repeated float fields. +using packed_field_float = detail::packed_field_fixed; + +/// Class for generating packed repeated double fields. +using packed_field_double = detail::packed_field_fixed; + +} // end namespace protozero + +#endif // PROTOZERO_PBF_WRITER_HPP diff --git a/contrib/protozero/1.7.0/include/protozero/types.hpp b/contrib/protozero/1.7.0/include/protozero/types.hpp new file mode 100644 index 0000000..3aefddf --- /dev/null +++ b/contrib/protozero/1.7.0/include/protozero/types.hpp @@ -0,0 +1,66 @@ +#ifndef PROTOZERO_TYPES_HPP +#define PROTOZERO_TYPES_HPP + +/***************************************************************************** + +protozero - Minimalistic protocol buffer decoder and encoder in C++. + +This file is from https://github.com/mapbox/protozero where you can find more +documentation. + +*****************************************************************************/ + +/** + * @file types.hpp + * + * @brief Contains the declaration of low-level types used in the pbf format. + */ + +#include "config.hpp" + +#include +#include +#include +#include +#include +#include + +namespace protozero { + +/** + * The type used for field tags (field numbers). + */ +using pbf_tag_type = uint32_t; + +/** + * The type used to encode type information. + * See the table on + * https://developers.google.com/protocol-buffers/docs/encoding + */ +enum class pbf_wire_type : uint32_t { + varint = 0, // int32/64, uint32/64, sint32/64, bool, enum + fixed64 = 1, // fixed64, sfixed64, double + length_delimited = 2, // string, bytes, nested messages, packed repeated fields + fixed32 = 5, // fixed32, sfixed32, float + unknown = 99 // used for default setting in this library +}; + +/** + * Get the tag and wire type of the current field in one integer suitable + * for comparison with a switch statement. + * + * See pbf_reader.tag_and_type() for an example how to use this. + */ +template +constexpr inline uint32_t tag_and_type(T tag, pbf_wire_type wire_type) noexcept { + return (static_cast(static_cast(tag)) << 3U) | static_cast(wire_type); +} + +/** + * The type used for length values, such as the length of a field. + */ +using pbf_length_type = uint32_t; + +} // end namespace protozero + +#endif // PROTOZERO_TYPES_HPP diff --git a/contrib/protozero/1.7.0/include/protozero/varint.hpp b/contrib/protozero/1.7.0/include/protozero/varint.hpp new file mode 100644 index 0000000..b4648a4 --- /dev/null +++ b/contrib/protozero/1.7.0/include/protozero/varint.hpp @@ -0,0 +1,245 @@ +#ifndef PROTOZERO_VARINT_HPP +#define PROTOZERO_VARINT_HPP + +/***************************************************************************** + +protozero - Minimalistic protocol buffer decoder and encoder in C++. + +This file is from https://github.com/mapbox/protozero where you can find more +documentation. + +*****************************************************************************/ + +/** + * @file varint.hpp + * + * @brief Contains low-level varint and zigzag encoding and decoding functions. + */ + +#include "buffer_tmpl.hpp" +#include "exception.hpp" + +#include + +namespace protozero { + +/** + * The maximum length of a 64 bit varint. + */ +constexpr const int8_t max_varint_length = sizeof(uint64_t) * 8 / 7 + 1; + +namespace detail { + + // from https://github.com/facebook/folly/blob/master/folly/Varint.h + inline uint64_t decode_varint_impl(const char** data, const char* end) { + const auto* begin = reinterpret_cast(*data); + const auto* iend = reinterpret_cast(end); + const int8_t* p = begin; + uint64_t val = 0; + + if (iend - begin >= max_varint_length) { // fast path + do { + int64_t b = *p++; + val = ((uint64_t(b) & 0x7fU) ); if (b >= 0) { break; } + b = *p++; val |= ((uint64_t(b) & 0x7fU) << 7U); if (b >= 0) { break; } + b = *p++; val |= ((uint64_t(b) & 0x7fU) << 14U); if (b >= 0) { break; } + b = *p++; val |= ((uint64_t(b) & 0x7fU) << 21U); if (b >= 0) { break; } + b = *p++; val |= ((uint64_t(b) & 0x7fU) << 28U); if (b >= 0) { break; } + b = *p++; val |= ((uint64_t(b) & 0x7fU) << 35U); if (b >= 0) { break; } + b = *p++; val |= ((uint64_t(b) & 0x7fU) << 42U); if (b >= 0) { break; } + b = *p++; val |= ((uint64_t(b) & 0x7fU) << 49U); if (b >= 0) { break; } + b = *p++; val |= ((uint64_t(b) & 0x7fU) << 56U); if (b >= 0) { break; } + b = *p++; val |= ((uint64_t(b) & 0x01U) << 63U); if (b >= 0) { break; } + throw varint_too_long_exception{}; + } while (false); + } else { + unsigned int shift = 0; + while (p != iend && *p < 0) { + val |= (uint64_t(*p++) & 0x7fU) << shift; + shift += 7; + } + if (p == iend) { + throw end_of_buffer_exception{}; + } + val |= uint64_t(*p++) << shift; + } + + *data = reinterpret_cast(p); + return val; + } + +} // end namespace detail + +/** + * Decode a 64 bit varint. + * + * Strong exception guarantee: if there is an exception the data pointer will + * not be changed. + * + * @param[in,out] data Pointer to pointer to the input data. After the function + * returns this will point to the next data to be read. + * @param[in] end Pointer one past the end of the input data. + * @returns The decoded integer + * @throws varint_too_long_exception if the varint is longer then the maximum + * length that would fit in a 64 bit int. Usually this means your data + * is corrupted or you are trying to read something as a varint that + * isn't. + * @throws end_of_buffer_exception if the *end* of the buffer was reached + * before the end of the varint. + */ +inline uint64_t decode_varint(const char** data, const char* end) { + // If this is a one-byte varint, decode it here. + if (end != *data && ((static_cast(**data) & 0x80U) == 0)) { + const auto val = static_cast(**data); + ++(*data); + return val; + } + // If this varint is more than one byte, defer to complete implementation. + return detail::decode_varint_impl(data, end); +} + +/** + * Skip over a varint. + * + * Strong exception guarantee: if there is an exception the data pointer will + * not be changed. + * + * @param[in,out] data Pointer to pointer to the input data. After the function + * returns this will point to the next data to be read. + * @param[in] end Pointer one past the end of the input data. + * @throws end_of_buffer_exception if the *end* of the buffer was reached + * before the end of the varint. + */ +inline void skip_varint(const char** data, const char* end) { + const auto* begin = reinterpret_cast(*data); + const auto* iend = reinterpret_cast(end); + const int8_t* p = begin; + + while (p != iend && *p < 0) { + ++p; + } + + if (p - begin >= max_varint_length) { + throw varint_too_long_exception{}; + } + + if (p == iend) { + throw end_of_buffer_exception{}; + } + + ++p; + + *data = reinterpret_cast(p); +} + +/** + * Varint encode a 64 bit integer. + * + * @tparam T An output iterator type. + * @param data Output iterator the varint encoded value will be written to + * byte by byte. + * @param value The integer that will be encoded. + * @returns the number of bytes written + * @throws Any exception thrown by increment or dereference operator on data. + * @deprecated Use add_varint_to_buffer() instead. + */ +template +inline int write_varint(T data, uint64_t value) { + int n = 1; + + while (value >= 0x80U) { + *data++ = char((value & 0x7fU) | 0x80U); + value >>= 7U; + ++n; + } + *data = char(value); + + return n; +} + +/** + * Varint encode a 64 bit integer. + * + * @tparam TBuffer A buffer type. + * @param buffer Output buffer the varint will be written to. + * @param value The integer that will be encoded. + * @returns the number of bytes written + * @throws Any exception thrown by calling the buffer_push_back() function. + */ +template +inline void add_varint_to_buffer(TBuffer* buffer, uint64_t value) { + while (value >= 0x80U) { + buffer_customization::push_back(buffer, char((value & 0x7fU) | 0x80U)); + value >>= 7U; + } + buffer_customization::push_back(buffer, char(value)); +} + +/** + * Varint encode a 64 bit integer. + * + * @param data Where to add the varint. There must be enough space available! + * @param value The integer that will be encoded. + * @returns the number of bytes written + */ +inline int add_varint_to_buffer(char* data, uint64_t value) noexcept { + int n = 1; + + while (value >= 0x80U) { + *data++ = char((value & 0x7fU) | 0x80U); + value >>= 7U; + ++n; + } + *data = char(value); + + return n; +} + +/** + * Get the length of the varint the specified value would produce. + * + * @param value The integer to be encoded. + * @returns the number of bytes the varint would have if we created it. + */ +inline int length_of_varint(uint64_t value) noexcept { + int n = 1; + + while (value >= 0x80U) { + value >>= 7U; + ++n; + } + + return n; +} + +/** + * ZigZag encodes a 32 bit integer. + */ +inline constexpr uint32_t encode_zigzag32(int32_t value) noexcept { + return (static_cast(value) << 1U) ^ static_cast(-static_cast(static_cast(value) >> 31U)); +} + +/** + * ZigZag encodes a 64 bit integer. + */ +inline constexpr uint64_t encode_zigzag64(int64_t value) noexcept { + return (static_cast(value) << 1U) ^ static_cast(-static_cast(static_cast(value) >> 63U)); +} + +/** + * Decodes a 32 bit ZigZag-encoded integer. + */ +inline constexpr int32_t decode_zigzag32(uint32_t value) noexcept { + return static_cast((value >> 1U) ^ static_cast(-static_cast(value & 1U))); +} + +/** + * Decodes a 64 bit ZigZag-encoded integer. + */ +inline constexpr int64_t decode_zigzag64(uint64_t value) noexcept { + return static_cast((value >> 1U) ^ static_cast(-static_cast(value & 1U))); +} + +} // end namespace protozero + +#endif // PROTOZERO_VARINT_HPP diff --git a/contrib/protozero/1.7.0/include/protozero/version.hpp b/contrib/protozero/1.7.0/include/protozero/version.hpp new file mode 100644 index 0000000..9a0e4cc --- /dev/null +++ b/contrib/protozero/1.7.0/include/protozero/version.hpp @@ -0,0 +1,34 @@ +#ifndef PROTOZERO_VERSION_HPP +#define PROTOZERO_VERSION_HPP + +/***************************************************************************** + +protozero - Minimalistic protocol buffer decoder and encoder in C++. + +This file is from https://github.com/mapbox/protozero where you can find more +documentation. + +*****************************************************************************/ + +/** + * @file version.hpp + * + * @brief Contains macros defining the protozero version. + */ + +/// The major version number +#define PROTOZERO_VERSION_MAJOR 1 + +/// The minor version number +#define PROTOZERO_VERSION_MINOR 7 + +/// The patch number +#define PROTOZERO_VERSION_PATCH 0 + +/// The complete version number +#define PROTOZERO_VERSION_CODE (PROTOZERO_VERSION_MAJOR * 10000 + PROTOZERO_VERSION_MINOR * 100 + PROTOZERO_VERSION_PATCH) + +/// Version number as string +#define PROTOZERO_VERSION_STRING "1.7.0" + +#endif // PROTOZERO_VERSION_HPP diff --git a/contrib/rapidjson/1.1.0/include/rapidjson/allocators.h b/contrib/rapidjson/1.1.0/include/rapidjson/allocators.h new file mode 100644 index 0000000..98affe0 --- /dev/null +++ b/contrib/rapidjson/1.1.0/include/rapidjson/allocators.h @@ -0,0 +1,271 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_ALLOCATORS_H_ +#define RAPIDJSON_ALLOCATORS_H_ + +#include "rapidjson.h" + +RAPIDJSON_NAMESPACE_BEGIN + +/////////////////////////////////////////////////////////////////////////////// +// Allocator + +/*! \class rapidjson::Allocator + \brief Concept for allocating, resizing and freeing memory block. + + Note that Malloc() and Realloc() are non-static but Free() is static. + + So if an allocator need to support Free(), it needs to put its pointer in + the header of memory block. + +\code +concept Allocator { + static const bool kNeedFree; //!< Whether this allocator needs to call Free(). + + // Allocate a memory block. + // \param size of the memory block in bytes. + // \returns pointer to the memory block. + void* Malloc(size_t size); + + // Resize a memory block. + // \param originalPtr The pointer to current memory block. Null pointer is permitted. + // \param originalSize The current size in bytes. (Design issue: since some allocator may not book-keep this, explicitly pass to it can save memory.) + // \param newSize the new size in bytes. + void* Realloc(void* originalPtr, size_t originalSize, size_t newSize); + + // Free a memory block. + // \param pointer to the memory block. Null pointer is permitted. + static void Free(void *ptr); +}; +\endcode +*/ + +/////////////////////////////////////////////////////////////////////////////// +// CrtAllocator + +//! C-runtime library allocator. +/*! This class is just wrapper for standard C library memory routines. + \note implements Allocator concept +*/ +class CrtAllocator { +public: + static const bool kNeedFree = true; + void* Malloc(size_t size) { + if (size) // behavior of malloc(0) is implementation defined. + return std::malloc(size); + else + return NULL; // standardize to returning NULL. + } + void* Realloc(void* originalPtr, size_t originalSize, size_t newSize) { + (void)originalSize; + if (newSize == 0) { + std::free(originalPtr); + return NULL; + } + return std::realloc(originalPtr, newSize); + } + static void Free(void *ptr) { std::free(ptr); } +}; + +/////////////////////////////////////////////////////////////////////////////// +// MemoryPoolAllocator + +//! Default memory allocator used by the parser and DOM. +/*! This allocator allocate memory blocks from pre-allocated memory chunks. + + It does not free memory blocks. And Realloc() only allocate new memory. + + The memory chunks are allocated by BaseAllocator, which is CrtAllocator by default. + + User may also supply a buffer as the first chunk. + + If the user-buffer is full then additional chunks are allocated by BaseAllocator. + + The user-buffer is not deallocated by this allocator. + + \tparam BaseAllocator the allocator type for allocating memory chunks. Default is CrtAllocator. + \note implements Allocator concept +*/ +template +class MemoryPoolAllocator { +public: + static const bool kNeedFree = false; //!< Tell users that no need to call Free() with this allocator. (concept Allocator) + + //! Constructor with chunkSize. + /*! \param chunkSize The size of memory chunk. The default is kDefaultChunkSize. + \param baseAllocator The allocator for allocating memory chunks. + */ + MemoryPoolAllocator(size_t chunkSize = kDefaultChunkCapacity, BaseAllocator* baseAllocator = 0) : + chunkHead_(0), chunk_capacity_(chunkSize), userBuffer_(0), baseAllocator_(baseAllocator), ownBaseAllocator_(0) + { + } + + //! Constructor with user-supplied buffer. + /*! The user buffer will be used firstly. When it is full, memory pool allocates new chunk with chunk size. + + The user buffer will not be deallocated when this allocator is destructed. + + \param buffer User supplied buffer. + \param size Size of the buffer in bytes. It must at least larger than sizeof(ChunkHeader). + \param chunkSize The size of memory chunk. The default is kDefaultChunkSize. + \param baseAllocator The allocator for allocating memory chunks. + */ + MemoryPoolAllocator(void *buffer, size_t size, size_t chunkSize = kDefaultChunkCapacity, BaseAllocator* baseAllocator = 0) : + chunkHead_(0), chunk_capacity_(chunkSize), userBuffer_(buffer), baseAllocator_(baseAllocator), ownBaseAllocator_(0) + { + RAPIDJSON_ASSERT(buffer != 0); + RAPIDJSON_ASSERT(size > sizeof(ChunkHeader)); + chunkHead_ = reinterpret_cast(buffer); + chunkHead_->capacity = size - sizeof(ChunkHeader); + chunkHead_->size = 0; + chunkHead_->next = 0; + } + + //! Destructor. + /*! This deallocates all memory chunks, excluding the user-supplied buffer. + */ + ~MemoryPoolAllocator() { + Clear(); + RAPIDJSON_DELETE(ownBaseAllocator_); + } + + //! Deallocates all memory chunks, excluding the user-supplied buffer. + void Clear() { + while (chunkHead_ && chunkHead_ != userBuffer_) { + ChunkHeader* next = chunkHead_->next; + baseAllocator_->Free(chunkHead_); + chunkHead_ = next; + } + if (chunkHead_ && chunkHead_ == userBuffer_) + chunkHead_->size = 0; // Clear user buffer + } + + //! Computes the total capacity of allocated memory chunks. + /*! \return total capacity in bytes. + */ + size_t Capacity() const { + size_t capacity = 0; + for (ChunkHeader* c = chunkHead_; c != 0; c = c->next) + capacity += c->capacity; + return capacity; + } + + //! Computes the memory blocks allocated. + /*! \return total used bytes. + */ + size_t Size() const { + size_t size = 0; + for (ChunkHeader* c = chunkHead_; c != 0; c = c->next) + size += c->size; + return size; + } + + //! Allocates a memory block. (concept Allocator) + void* Malloc(size_t size) { + if (!size) + return NULL; + + size = RAPIDJSON_ALIGN(size); + if (chunkHead_ == 0 || chunkHead_->size + size > chunkHead_->capacity) + if (!AddChunk(chunk_capacity_ > size ? chunk_capacity_ : size)) + return NULL; + + void *buffer = reinterpret_cast(chunkHead_) + RAPIDJSON_ALIGN(sizeof(ChunkHeader)) + chunkHead_->size; + chunkHead_->size += size; + return buffer; + } + + //! Resizes a memory block (concept Allocator) + void* Realloc(void* originalPtr, size_t originalSize, size_t newSize) { + if (originalPtr == 0) + return Malloc(newSize); + + if (newSize == 0) + return NULL; + + originalSize = RAPIDJSON_ALIGN(originalSize); + newSize = RAPIDJSON_ALIGN(newSize); + + // Do not shrink if new size is smaller than original + if (originalSize >= newSize) + return originalPtr; + + // Simply expand it if it is the last allocation and there is sufficient space + if (originalPtr == reinterpret_cast(chunkHead_) + RAPIDJSON_ALIGN(sizeof(ChunkHeader)) + chunkHead_->size - originalSize) { + size_t increment = static_cast(newSize - originalSize); + if (chunkHead_->size + increment <= chunkHead_->capacity) { + chunkHead_->size += increment; + return originalPtr; + } + } + + // Realloc process: allocate and copy memory, do not free original buffer. + if (void* newBuffer = Malloc(newSize)) { + if (originalSize) + std::memcpy(newBuffer, originalPtr, originalSize); + return newBuffer; + } + else + return NULL; + } + + //! Frees a memory block (concept Allocator) + static void Free(void *ptr) { (void)ptr; } // Do nothing + +private: + //! Copy constructor is not permitted. + MemoryPoolAllocator(const MemoryPoolAllocator& rhs) /* = delete */; + //! Copy assignment operator is not permitted. + MemoryPoolAllocator& operator=(const MemoryPoolAllocator& rhs) /* = delete */; + + //! Creates a new chunk. + /*! \param capacity Capacity of the chunk in bytes. + \return true if success. + */ + bool AddChunk(size_t capacity) { + if (!baseAllocator_) + ownBaseAllocator_ = baseAllocator_ = RAPIDJSON_NEW(BaseAllocator()); + if (ChunkHeader* chunk = reinterpret_cast(baseAllocator_->Malloc(RAPIDJSON_ALIGN(sizeof(ChunkHeader)) + capacity))) { + chunk->capacity = capacity; + chunk->size = 0; + chunk->next = chunkHead_; + chunkHead_ = chunk; + return true; + } + else + return false; + } + + static const int kDefaultChunkCapacity = 64 * 1024; //!< Default chunk capacity. + + //! Chunk header for perpending to each chunk. + /*! Chunks are stored as a singly linked list. + */ + struct ChunkHeader { + size_t capacity; //!< Capacity of the chunk in bytes (excluding the header itself). + size_t size; //!< Current size of allocated memory in bytes. + ChunkHeader *next; //!< Next chunk in the linked list. + }; + + ChunkHeader *chunkHead_; //!< Head of the chunk linked-list. Only the head chunk serves allocation. + size_t chunk_capacity_; //!< The minimum capacity of chunk when they are allocated. + void *userBuffer_; //!< User supplied buffer. + BaseAllocator* baseAllocator_; //!< base allocator for allocating memory chunks. + BaseAllocator* ownBaseAllocator_; //!< base allocator created by this object. +}; + +RAPIDJSON_NAMESPACE_END + +#endif // RAPIDJSON_ENCODINGS_H_ diff --git a/contrib/rapidjson/1.1.0/include/rapidjson/document.h b/contrib/rapidjson/1.1.0/include/rapidjson/document.h new file mode 100644 index 0000000..e3e20df --- /dev/null +++ b/contrib/rapidjson/1.1.0/include/rapidjson/document.h @@ -0,0 +1,2575 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_DOCUMENT_H_ +#define RAPIDJSON_DOCUMENT_H_ + +/*! \file document.h */ + +#include "reader.h" +#include "internal/meta.h" +#include "internal/strfunc.h" +#include "memorystream.h" +#include "encodedstream.h" +#include // placement new +#include + +RAPIDJSON_DIAG_PUSH +#ifdef _MSC_VER +RAPIDJSON_DIAG_OFF(4127) // conditional expression is constant +RAPIDJSON_DIAG_OFF(4244) // conversion from kXxxFlags to 'uint16_t', possible loss of data +#endif + +#ifdef __clang__ +RAPIDJSON_DIAG_OFF(padded) +RAPIDJSON_DIAG_OFF(switch-enum) +RAPIDJSON_DIAG_OFF(c++98-compat) +#endif + +#ifdef __GNUC__ +RAPIDJSON_DIAG_OFF(effc++) +#if __GNUC__ >= 6 +RAPIDJSON_DIAG_OFF(terminate) // ignore throwing RAPIDJSON_ASSERT in RAPIDJSON_NOEXCEPT functions +#endif +#endif // __GNUC__ + +#ifndef RAPIDJSON_NOMEMBERITERATORCLASS +#include // std::iterator, std::random_access_iterator_tag +#endif + +#if RAPIDJSON_HAS_CXX11_RVALUE_REFS +#include // std::move +#endif + +RAPIDJSON_NAMESPACE_BEGIN + +// Forward declaration. +template +class GenericValue; + +template +class GenericDocument; + +//! Name-value pair in a JSON object value. +/*! + This class was internal to GenericValue. It used to be a inner struct. + But a compiler (IBM XL C/C++ for AIX) have reported to have problem with that so it moved as a namespace scope struct. + https://code.google.com/p/rapidjson/issues/detail?id=64 +*/ +template +struct GenericMember { + GenericValue name; //!< name of member (must be a string) + GenericValue value; //!< value of member. +}; + +/////////////////////////////////////////////////////////////////////////////// +// GenericMemberIterator + +#ifndef RAPIDJSON_NOMEMBERITERATORCLASS + +//! (Constant) member iterator for a JSON object value +/*! + \tparam Const Is this a constant iterator? + \tparam Encoding Encoding of the value. (Even non-string values need to have the same encoding in a document) + \tparam Allocator Allocator type for allocating memory of object, array and string. + + This class implements a Random Access Iterator for GenericMember elements + of a GenericValue, see ISO/IEC 14882:2003(E) C++ standard, 24.1 [lib.iterator.requirements]. + + \note This iterator implementation is mainly intended to avoid implicit + conversions from iterator values to \c NULL, + e.g. from GenericValue::FindMember. + + \note Define \c RAPIDJSON_NOMEMBERITERATORCLASS to fall back to a + pointer-based implementation, if your platform doesn't provide + the C++ header. + + \see GenericMember, GenericValue::MemberIterator, GenericValue::ConstMemberIterator + */ +template +class GenericMemberIterator + : public std::iterator >::Type> { + + friend class GenericValue; + template friend class GenericMemberIterator; + + typedef GenericMember PlainType; + typedef typename internal::MaybeAddConst::Type ValueType; + typedef std::iterator BaseType; + +public: + //! Iterator type itself + typedef GenericMemberIterator Iterator; + //! Constant iterator type + typedef GenericMemberIterator ConstIterator; + //! Non-constant iterator type + typedef GenericMemberIterator NonConstIterator; + + //! Pointer to (const) GenericMember + typedef typename BaseType::pointer Pointer; + //! Reference to (const) GenericMember + typedef typename BaseType::reference Reference; + //! Signed integer type (e.g. \c ptrdiff_t) + typedef typename BaseType::difference_type DifferenceType; + + //! Default constructor (singular value) + /*! Creates an iterator pointing to no element. + \note All operations, except for comparisons, are undefined on such values. + */ + GenericMemberIterator() : ptr_() {} + + //! Iterator conversions to more const + /*! + \param it (Non-const) iterator to copy from + + Allows the creation of an iterator from another GenericMemberIterator + that is "less const". Especially, creating a non-constant iterator + from a constant iterator are disabled: + \li const -> non-const (not ok) + \li const -> const (ok) + \li non-const -> const (ok) + \li non-const -> non-const (ok) + + \note If the \c Const template parameter is already \c false, this + constructor effectively defines a regular copy-constructor. + Otherwise, the copy constructor is implicitly defined. + */ + GenericMemberIterator(const NonConstIterator & it) : ptr_(it.ptr_) {} + Iterator& operator=(const NonConstIterator & it) { ptr_ = it.ptr_; return *this; } + + //! @name stepping + //@{ + Iterator& operator++(){ ++ptr_; return *this; } + Iterator& operator--(){ --ptr_; return *this; } + Iterator operator++(int){ Iterator old(*this); ++ptr_; return old; } + Iterator operator--(int){ Iterator old(*this); --ptr_; return old; } + //@} + + //! @name increment/decrement + //@{ + Iterator operator+(DifferenceType n) const { return Iterator(ptr_+n); } + Iterator operator-(DifferenceType n) const { return Iterator(ptr_-n); } + + Iterator& operator+=(DifferenceType n) { ptr_+=n; return *this; } + Iterator& operator-=(DifferenceType n) { ptr_-=n; return *this; } + //@} + + //! @name relations + //@{ + bool operator==(ConstIterator that) const { return ptr_ == that.ptr_; } + bool operator!=(ConstIterator that) const { return ptr_ != that.ptr_; } + bool operator<=(ConstIterator that) const { return ptr_ <= that.ptr_; } + bool operator>=(ConstIterator that) const { return ptr_ >= that.ptr_; } + bool operator< (ConstIterator that) const { return ptr_ < that.ptr_; } + bool operator> (ConstIterator that) const { return ptr_ > that.ptr_; } + //@} + + //! @name dereference + //@{ + Reference operator*() const { return *ptr_; } + Pointer operator->() const { return ptr_; } + Reference operator[](DifferenceType n) const { return ptr_[n]; } + //@} + + //! Distance + DifferenceType operator-(ConstIterator that) const { return ptr_-that.ptr_; } + +private: + //! Internal constructor from plain pointer + explicit GenericMemberIterator(Pointer p) : ptr_(p) {} + + Pointer ptr_; //!< raw pointer +}; + +#else // RAPIDJSON_NOMEMBERITERATORCLASS + +// class-based member iterator implementation disabled, use plain pointers + +template +struct GenericMemberIterator; + +//! non-const GenericMemberIterator +template +struct GenericMemberIterator { + //! use plain pointer as iterator type + typedef GenericMember* Iterator; +}; +//! const GenericMemberIterator +template +struct GenericMemberIterator { + //! use plain const pointer as iterator type + typedef const GenericMember* Iterator; +}; + +#endif // RAPIDJSON_NOMEMBERITERATORCLASS + +/////////////////////////////////////////////////////////////////////////////// +// GenericStringRef + +//! Reference to a constant string (not taking a copy) +/*! + \tparam CharType character type of the string + + This helper class is used to automatically infer constant string + references for string literals, especially from \c const \b (!) + character arrays. + + The main use is for creating JSON string values without copying the + source string via an \ref Allocator. This requires that the referenced + string pointers have a sufficient lifetime, which exceeds the lifetime + of the associated GenericValue. + + \b Example + \code + Value v("foo"); // ok, no need to copy & calculate length + const char foo[] = "foo"; + v.SetString(foo); // ok + + const char* bar = foo; + // Value x(bar); // not ok, can't rely on bar's lifetime + Value x(StringRef(bar)); // lifetime explicitly guaranteed by user + Value y(StringRef(bar, 3)); // ok, explicitly pass length + \endcode + + \see StringRef, GenericValue::SetString +*/ +template +struct GenericStringRef { + typedef CharType Ch; //!< character type of the string + + //! Create string reference from \c const character array +#ifndef __clang__ // -Wdocumentation + /*! + This constructor implicitly creates a constant string reference from + a \c const character array. It has better performance than + \ref StringRef(const CharType*) by inferring the string \ref length + from the array length, and also supports strings containing null + characters. + + \tparam N length of the string, automatically inferred + + \param str Constant character array, lifetime assumed to be longer + than the use of the string in e.g. a GenericValue + + \post \ref s == str + + \note Constant complexity. + \note There is a hidden, private overload to disallow references to + non-const character arrays to be created via this constructor. + By this, e.g. function-scope arrays used to be filled via + \c snprintf are excluded from consideration. + In such cases, the referenced string should be \b copied to the + GenericValue instead. + */ +#endif + template + GenericStringRef(const CharType (&str)[N]) RAPIDJSON_NOEXCEPT + : s(str), length(N-1) {} + + //! Explicitly create string reference from \c const character pointer +#ifndef __clang__ // -Wdocumentation + /*! + This constructor can be used to \b explicitly create a reference to + a constant string pointer. + + \see StringRef(const CharType*) + + \param str Constant character pointer, lifetime assumed to be longer + than the use of the string in e.g. a GenericValue + + \post \ref s == str + + \note There is a hidden, private overload to disallow references to + non-const character arrays to be created via this constructor. + By this, e.g. function-scope arrays used to be filled via + \c snprintf are excluded from consideration. + In such cases, the referenced string should be \b copied to the + GenericValue instead. + */ +#endif + explicit GenericStringRef(const CharType* str) + : s(str), length(internal::StrLen(str)){ RAPIDJSON_ASSERT(s != 0); } + + //! Create constant string reference from pointer and length +#ifndef __clang__ // -Wdocumentation + /*! \param str constant string, lifetime assumed to be longer than the use of the string in e.g. a GenericValue + \param len length of the string, excluding the trailing NULL terminator + + \post \ref s == str && \ref length == len + \note Constant complexity. + */ +#endif + GenericStringRef(const CharType* str, SizeType len) + : s(str), length(len) { RAPIDJSON_ASSERT(s != 0); } + + GenericStringRef(const GenericStringRef& rhs) : s(rhs.s), length(rhs.length) {} + + GenericStringRef& operator=(const GenericStringRef& rhs) { s = rhs.s; length = rhs.length; } + + //! implicit conversion to plain CharType pointer + operator const Ch *() const { return s; } + + const Ch* const s; //!< plain CharType pointer + const SizeType length; //!< length of the string (excluding the trailing NULL terminator) + +private: + //! Disallow construction from non-const array + template + GenericStringRef(CharType (&str)[N]) /* = delete */; +}; + +//! Mark a character pointer as constant string +/*! Mark a plain character pointer as a "string literal". This function + can be used to avoid copying a character string to be referenced as a + value in a JSON GenericValue object, if the string's lifetime is known + to be valid long enough. + \tparam CharType Character type of the string + \param str Constant string, lifetime assumed to be longer than the use of the string in e.g. a GenericValue + \return GenericStringRef string reference object + \relatesalso GenericStringRef + + \see GenericValue::GenericValue(StringRefType), GenericValue::operator=(StringRefType), GenericValue::SetString(StringRefType), GenericValue::PushBack(StringRefType, Allocator&), GenericValue::AddMember +*/ +template +inline GenericStringRef StringRef(const CharType* str) { + return GenericStringRef(str, internal::StrLen(str)); +} + +//! Mark a character pointer as constant string +/*! Mark a plain character pointer as a "string literal". This function + can be used to avoid copying a character string to be referenced as a + value in a JSON GenericValue object, if the string's lifetime is known + to be valid long enough. + + This version has better performance with supplied length, and also + supports string containing null characters. + + \tparam CharType character type of the string + \param str Constant string, lifetime assumed to be longer than the use of the string in e.g. a GenericValue + \param length The length of source string. + \return GenericStringRef string reference object + \relatesalso GenericStringRef +*/ +template +inline GenericStringRef StringRef(const CharType* str, size_t length) { + return GenericStringRef(str, SizeType(length)); +} + +#if RAPIDJSON_HAS_STDSTRING +//! Mark a string object as constant string +/*! Mark a string object (e.g. \c std::string) as a "string literal". + This function can be used to avoid copying a string to be referenced as a + value in a JSON GenericValue object, if the string's lifetime is known + to be valid long enough. + + \tparam CharType character type of the string + \param str Constant string, lifetime assumed to be longer than the use of the string in e.g. a GenericValue + \return GenericStringRef string reference object + \relatesalso GenericStringRef + \note Requires the definition of the preprocessor symbol \ref RAPIDJSON_HAS_STDSTRING. +*/ +template +inline GenericStringRef StringRef(const std::basic_string& str) { + return GenericStringRef(str.data(), SizeType(str.size())); +} +#endif + +/////////////////////////////////////////////////////////////////////////////// +// GenericValue type traits +namespace internal { + +template +struct IsGenericValueImpl : FalseType {}; + +// select candidates according to nested encoding and allocator types +template struct IsGenericValueImpl::Type, typename Void::Type> + : IsBaseOf, T>::Type {}; + +// helper to match arbitrary GenericValue instantiations, including derived classes +template struct IsGenericValue : IsGenericValueImpl::Type {}; + +} // namespace internal + +/////////////////////////////////////////////////////////////////////////////// +// TypeHelper + +namespace internal { + +template +struct TypeHelper {}; + +template +struct TypeHelper { + static bool Is(const ValueType& v) { return v.IsBool(); } + static bool Get(const ValueType& v) { return v.GetBool(); } + static ValueType& Set(ValueType& v, bool data) { return v.SetBool(data); } + static ValueType& Set(ValueType& v, bool data, typename ValueType::AllocatorType&) { return v.SetBool(data); } +}; + +template +struct TypeHelper { + static bool Is(const ValueType& v) { return v.IsInt(); } + static int Get(const ValueType& v) { return v.GetInt(); } + static ValueType& Set(ValueType& v, int data) { return v.SetInt(data); } + static ValueType& Set(ValueType& v, int data, typename ValueType::AllocatorType&) { return v.SetInt(data); } +}; + +template +struct TypeHelper { + static bool Is(const ValueType& v) { return v.IsUint(); } + static unsigned Get(const ValueType& v) { return v.GetUint(); } + static ValueType& Set(ValueType& v, unsigned data) { return v.SetUint(data); } + static ValueType& Set(ValueType& v, unsigned data, typename ValueType::AllocatorType&) { return v.SetUint(data); } +}; + +template +struct TypeHelper { + static bool Is(const ValueType& v) { return v.IsInt64(); } + static int64_t Get(const ValueType& v) { return v.GetInt64(); } + static ValueType& Set(ValueType& v, int64_t data) { return v.SetInt64(data); } + static ValueType& Set(ValueType& v, int64_t data, typename ValueType::AllocatorType&) { return v.SetInt64(data); } +}; + +template +struct TypeHelper { + static bool Is(const ValueType& v) { return v.IsUint64(); } + static uint64_t Get(const ValueType& v) { return v.GetUint64(); } + static ValueType& Set(ValueType& v, uint64_t data) { return v.SetUint64(data); } + static ValueType& Set(ValueType& v, uint64_t data, typename ValueType::AllocatorType&) { return v.SetUint64(data); } +}; + +template +struct TypeHelper { + static bool Is(const ValueType& v) { return v.IsDouble(); } + static double Get(const ValueType& v) { return v.GetDouble(); } + static ValueType& Set(ValueType& v, double data) { return v.SetDouble(data); } + static ValueType& Set(ValueType& v, double data, typename ValueType::AllocatorType&) { return v.SetDouble(data); } +}; + +template +struct TypeHelper { + static bool Is(const ValueType& v) { return v.IsFloat(); } + static float Get(const ValueType& v) { return v.GetFloat(); } + static ValueType& Set(ValueType& v, float data) { return v.SetFloat(data); } + static ValueType& Set(ValueType& v, float data, typename ValueType::AllocatorType&) { return v.SetFloat(data); } +}; + +template +struct TypeHelper { + typedef const typename ValueType::Ch* StringType; + static bool Is(const ValueType& v) { return v.IsString(); } + static StringType Get(const ValueType& v) { return v.GetString(); } + static ValueType& Set(ValueType& v, const StringType data) { return v.SetString(typename ValueType::StringRefType(data)); } + static ValueType& Set(ValueType& v, const StringType data, typename ValueType::AllocatorType& a) { return v.SetString(data, a); } +}; + +#if RAPIDJSON_HAS_STDSTRING +template +struct TypeHelper > { + typedef std::basic_string StringType; + static bool Is(const ValueType& v) { return v.IsString(); } + static StringType Get(const ValueType& v) { return StringType(v.GetString(), v.GetStringLength()); } + static ValueType& Set(ValueType& v, const StringType& data, typename ValueType::AllocatorType& a) { return v.SetString(data, a); } +}; +#endif + +template +struct TypeHelper { + typedef typename ValueType::Array ArrayType; + static bool Is(const ValueType& v) { return v.IsArray(); } + static ArrayType Get(ValueType& v) { return v.GetArray(); } + static ValueType& Set(ValueType& v, ArrayType data) { return v = data; } + static ValueType& Set(ValueType& v, ArrayType data, typename ValueType::AllocatorType&) { return v = data; } +}; + +template +struct TypeHelper { + typedef typename ValueType::ConstArray ArrayType; + static bool Is(const ValueType& v) { return v.IsArray(); } + static ArrayType Get(const ValueType& v) { return v.GetArray(); } +}; + +template +struct TypeHelper { + typedef typename ValueType::Object ObjectType; + static bool Is(const ValueType& v) { return v.IsObject(); } + static ObjectType Get(ValueType& v) { return v.GetObject(); } + static ValueType& Set(ValueType& v, ObjectType data) { return v = data; } + static ValueType& Set(ValueType& v, ObjectType data, typename ValueType::AllocatorType&) { v = data; } +}; + +template +struct TypeHelper { + typedef typename ValueType::ConstObject ObjectType; + static bool Is(const ValueType& v) { return v.IsObject(); } + static ObjectType Get(const ValueType& v) { return v.GetObject(); } +}; + +} // namespace internal + +// Forward declarations +template class GenericArray; +template class GenericObject; + +/////////////////////////////////////////////////////////////////////////////// +// GenericValue + +//! Represents a JSON value. Use Value for UTF8 encoding and default allocator. +/*! + A JSON value can be one of 7 types. This class is a variant type supporting + these types. + + Use the Value if UTF8 and default allocator + + \tparam Encoding Encoding of the value. (Even non-string values need to have the same encoding in a document) + \tparam Allocator Allocator type for allocating memory of object, array and string. +*/ +template > +class GenericValue { +public: + //! Name-value pair in an object. + typedef GenericMember Member; + typedef Encoding EncodingType; //!< Encoding type from template parameter. + typedef Allocator AllocatorType; //!< Allocator type from template parameter. + typedef typename Encoding::Ch Ch; //!< Character type derived from Encoding. + typedef GenericStringRef StringRefType; //!< Reference to a constant string + typedef typename GenericMemberIterator::Iterator MemberIterator; //!< Member iterator for iterating in object. + typedef typename GenericMemberIterator::Iterator ConstMemberIterator; //!< Constant member iterator for iterating in object. + typedef GenericValue* ValueIterator; //!< Value iterator for iterating in array. + typedef const GenericValue* ConstValueIterator; //!< Constant value iterator for iterating in array. + typedef GenericValue ValueType; //!< Value type of itself. + typedef GenericArray Array; + typedef GenericArray ConstArray; + typedef GenericObject Object; + typedef GenericObject ConstObject; + + //!@name Constructors and destructor. + //@{ + + //! Default constructor creates a null value. + GenericValue() RAPIDJSON_NOEXCEPT : data_() { data_.f.flags = kNullFlag; } + +#if RAPIDJSON_HAS_CXX11_RVALUE_REFS + //! Move constructor in C++11 + GenericValue(GenericValue&& rhs) RAPIDJSON_NOEXCEPT : data_(rhs.data_) { + rhs.data_.f.flags = kNullFlag; // give up contents + } +#endif + +private: + //! Copy constructor is not permitted. + GenericValue(const GenericValue& rhs); + +#if RAPIDJSON_HAS_CXX11_RVALUE_REFS + //! Moving from a GenericDocument is not permitted. + template + GenericValue(GenericDocument&& rhs); + + //! Move assignment from a GenericDocument is not permitted. + template + GenericValue& operator=(GenericDocument&& rhs); +#endif + +public: + + //! Constructor with JSON value type. + /*! This creates a Value of specified type with default content. + \param type Type of the value. + \note Default content for number is zero. + */ + explicit GenericValue(Type type) RAPIDJSON_NOEXCEPT : data_() { + static const uint16_t defaultFlags[7] = { + kNullFlag, kFalseFlag, kTrueFlag, kObjectFlag, kArrayFlag, kShortStringFlag, + kNumberAnyFlag + }; + RAPIDJSON_ASSERT(type <= kNumberType); + data_.f.flags = defaultFlags[type]; + + // Use ShortString to store empty string. + if (type == kStringType) + data_.ss.SetLength(0); + } + + //! Explicit copy constructor (with allocator) + /*! Creates a copy of a Value by using the given Allocator + \tparam SourceAllocator allocator of \c rhs + \param rhs Value to copy from (read-only) + \param allocator Allocator for allocating copied elements and buffers. Commonly use GenericDocument::GetAllocator(). + \see CopyFrom() + */ + template< typename SourceAllocator > + GenericValue(const GenericValue& rhs, Allocator & allocator); + + //! Constructor for boolean value. + /*! \param b Boolean value + \note This constructor is limited to \em real boolean values and rejects + implicitly converted types like arbitrary pointers. Use an explicit cast + to \c bool, if you want to construct a boolean JSON value in such cases. + */ +#ifndef RAPIDJSON_DOXYGEN_RUNNING // hide SFINAE from Doxygen + template + explicit GenericValue(T b, RAPIDJSON_ENABLEIF((internal::IsSame))) RAPIDJSON_NOEXCEPT // See #472 +#else + explicit GenericValue(bool b) RAPIDJSON_NOEXCEPT +#endif + : data_() { + // safe-guard against failing SFINAE + RAPIDJSON_STATIC_ASSERT((internal::IsSame::Value)); + data_.f.flags = b ? kTrueFlag : kFalseFlag; + } + + //! Constructor for int value. + explicit GenericValue(int i) RAPIDJSON_NOEXCEPT : data_() { + data_.n.i64 = i; + data_.f.flags = (i >= 0) ? (kNumberIntFlag | kUintFlag | kUint64Flag) : kNumberIntFlag; + } + + //! Constructor for unsigned value. + explicit GenericValue(unsigned u) RAPIDJSON_NOEXCEPT : data_() { + data_.n.u64 = u; + data_.f.flags = (u & 0x80000000) ? kNumberUintFlag : (kNumberUintFlag | kIntFlag | kInt64Flag); + } + + //! Constructor for int64_t value. + explicit GenericValue(int64_t i64) RAPIDJSON_NOEXCEPT : data_() { + data_.n.i64 = i64; + data_.f.flags = kNumberInt64Flag; + if (i64 >= 0) { + data_.f.flags |= kNumberUint64Flag; + if (!(static_cast(i64) & RAPIDJSON_UINT64_C2(0xFFFFFFFF, 0x00000000))) + data_.f.flags |= kUintFlag; + if (!(static_cast(i64) & RAPIDJSON_UINT64_C2(0xFFFFFFFF, 0x80000000))) + data_.f.flags |= kIntFlag; + } + else if (i64 >= static_cast(RAPIDJSON_UINT64_C2(0xFFFFFFFF, 0x80000000))) + data_.f.flags |= kIntFlag; + } + + //! Constructor for uint64_t value. + explicit GenericValue(uint64_t u64) RAPIDJSON_NOEXCEPT : data_() { + data_.n.u64 = u64; + data_.f.flags = kNumberUint64Flag; + if (!(u64 & RAPIDJSON_UINT64_C2(0x80000000, 0x00000000))) + data_.f.flags |= kInt64Flag; + if (!(u64 & RAPIDJSON_UINT64_C2(0xFFFFFFFF, 0x00000000))) + data_.f.flags |= kUintFlag; + if (!(u64 & RAPIDJSON_UINT64_C2(0xFFFFFFFF, 0x80000000))) + data_.f.flags |= kIntFlag; + } + + //! Constructor for double value. + explicit GenericValue(double d) RAPIDJSON_NOEXCEPT : data_() { data_.n.d = d; data_.f.flags = kNumberDoubleFlag; } + + //! Constructor for constant string (i.e. do not make a copy of string) + GenericValue(const Ch* s, SizeType length) RAPIDJSON_NOEXCEPT : data_() { SetStringRaw(StringRef(s, length)); } + + //! Constructor for constant string (i.e. do not make a copy of string) + explicit GenericValue(StringRefType s) RAPIDJSON_NOEXCEPT : data_() { SetStringRaw(s); } + + //! Constructor for copy-string (i.e. do make a copy of string) + GenericValue(const Ch* s, SizeType length, Allocator& allocator) : data_() { SetStringRaw(StringRef(s, length), allocator); } + + //! Constructor for copy-string (i.e. do make a copy of string) + GenericValue(const Ch*s, Allocator& allocator) : data_() { SetStringRaw(StringRef(s), allocator); } + +#if RAPIDJSON_HAS_STDSTRING + //! Constructor for copy-string from a string object (i.e. do make a copy of string) + /*! \note Requires the definition of the preprocessor symbol \ref RAPIDJSON_HAS_STDSTRING. + */ + GenericValue(const std::basic_string& s, Allocator& allocator) : data_() { SetStringRaw(StringRef(s), allocator); } +#endif + + //! Constructor for Array. + /*! + \param a An array obtained by \c GetArray(). + \note \c Array is always pass-by-value. + \note the source array is moved into this value and the sourec array becomes empty. + */ + GenericValue(Array a) RAPIDJSON_NOEXCEPT : data_(a.value_.data_) { + a.value_.data_ = Data(); + a.value_.data_.f.flags = kArrayFlag; + } + + //! Constructor for Object. + /*! + \param o An object obtained by \c GetObject(). + \note \c Object is always pass-by-value. + \note the source object is moved into this value and the sourec object becomes empty. + */ + GenericValue(Object o) RAPIDJSON_NOEXCEPT : data_(o.value_.data_) { + o.value_.data_ = Data(); + o.value_.data_.f.flags = kObjectFlag; + } + + //! Destructor. + /*! Need to destruct elements of array, members of object, or copy-string. + */ + ~GenericValue() { + if (Allocator::kNeedFree) { // Shortcut by Allocator's trait + switch(data_.f.flags) { + case kArrayFlag: + { + GenericValue* e = GetElementsPointer(); + for (GenericValue* v = e; v != e + data_.a.size; ++v) + v->~GenericValue(); + Allocator::Free(e); + } + break; + + case kObjectFlag: + for (MemberIterator m = MemberBegin(); m != MemberEnd(); ++m) + m->~Member(); + Allocator::Free(GetMembersPointer()); + break; + + case kCopyStringFlag: + Allocator::Free(const_cast(GetStringPointer())); + break; + + default: + break; // Do nothing for other types. + } + } + } + + //@} + + //!@name Assignment operators + //@{ + + //! Assignment with move semantics. + /*! \param rhs Source of the assignment. It will become a null value after assignment. + */ + GenericValue& operator=(GenericValue& rhs) RAPIDJSON_NOEXCEPT { + RAPIDJSON_ASSERT(this != &rhs); + this->~GenericValue(); + RawAssign(rhs); + return *this; + } + +#if RAPIDJSON_HAS_CXX11_RVALUE_REFS + //! Move assignment in C++11 + GenericValue& operator=(GenericValue&& rhs) RAPIDJSON_NOEXCEPT { + return *this = rhs.Move(); + } +#endif + + //! Assignment of constant string reference (no copy) + /*! \param str Constant string reference to be assigned + \note This overload is needed to avoid clashes with the generic primitive type assignment overload below. + \see GenericStringRef, operator=(T) + */ + GenericValue& operator=(StringRefType str) RAPIDJSON_NOEXCEPT { + GenericValue s(str); + return *this = s; + } + + //! Assignment with primitive types. + /*! \tparam T Either \ref Type, \c int, \c unsigned, \c int64_t, \c uint64_t + \param value The value to be assigned. + + \note The source type \c T explicitly disallows all pointer types, + especially (\c const) \ref Ch*. This helps avoiding implicitly + referencing character strings with insufficient lifetime, use + \ref SetString(const Ch*, Allocator&) (for copying) or + \ref StringRef() (to explicitly mark the pointer as constant) instead. + All other pointer types would implicitly convert to \c bool, + use \ref SetBool() instead. + */ + template + RAPIDJSON_DISABLEIF_RETURN((internal::IsPointer), (GenericValue&)) + operator=(T value) { + GenericValue v(value); + return *this = v; + } + + //! Deep-copy assignment from Value + /*! Assigns a \b copy of the Value to the current Value object + \tparam SourceAllocator Allocator type of \c rhs + \param rhs Value to copy from (read-only) + \param allocator Allocator to use for copying + */ + template + GenericValue& CopyFrom(const GenericValue& rhs, Allocator& allocator) { + RAPIDJSON_ASSERT(static_cast(this) != static_cast(&rhs)); + this->~GenericValue(); + new (this) GenericValue(rhs, allocator); + return *this; + } + + //! Exchange the contents of this value with those of other. + /*! + \param other Another value. + \note Constant complexity. + */ + GenericValue& Swap(GenericValue& other) RAPIDJSON_NOEXCEPT { + GenericValue temp; + temp.RawAssign(*this); + RawAssign(other); + other.RawAssign(temp); + return *this; + } + + //! free-standing swap function helper + /*! + Helper function to enable support for common swap implementation pattern based on \c std::swap: + \code + void swap(MyClass& a, MyClass& b) { + using std::swap; + swap(a.value, b.value); + // ... + } + \endcode + \see Swap() + */ + friend inline void swap(GenericValue& a, GenericValue& b) RAPIDJSON_NOEXCEPT { a.Swap(b); } + + //! Prepare Value for move semantics + /*! \return *this */ + GenericValue& Move() RAPIDJSON_NOEXCEPT { return *this; } + //@} + + //!@name Equal-to and not-equal-to operators + //@{ + //! Equal-to operator + /*! + \note If an object contains duplicated named member, comparing equality with any object is always \c false. + \note Linear time complexity (number of all values in the subtree and total lengths of all strings). + */ + template + bool operator==(const GenericValue& rhs) const { + typedef GenericValue RhsType; + if (GetType() != rhs.GetType()) + return false; + + switch (GetType()) { + case kObjectType: // Warning: O(n^2) inner-loop + if (data_.o.size != rhs.data_.o.size) + return false; + for (ConstMemberIterator lhsMemberItr = MemberBegin(); lhsMemberItr != MemberEnd(); ++lhsMemberItr) { + typename RhsType::ConstMemberIterator rhsMemberItr = rhs.FindMember(lhsMemberItr->name); + if (rhsMemberItr == rhs.MemberEnd() || lhsMemberItr->value != rhsMemberItr->value) + return false; + } + return true; + + case kArrayType: + if (data_.a.size != rhs.data_.a.size) + return false; + for (SizeType i = 0; i < data_.a.size; i++) + if ((*this)[i] != rhs[i]) + return false; + return true; + + case kStringType: + return StringEqual(rhs); + + case kNumberType: + if (IsDouble() || rhs.IsDouble()) { + double a = GetDouble(); // May convert from integer to double. + double b = rhs.GetDouble(); // Ditto + return a >= b && a <= b; // Prevent -Wfloat-equal + } + else + return data_.n.u64 == rhs.data_.n.u64; + + default: + return true; + } + } + + //! Equal-to operator with const C-string pointer + bool operator==(const Ch* rhs) const { return *this == GenericValue(StringRef(rhs)); } + +#if RAPIDJSON_HAS_STDSTRING + //! Equal-to operator with string object + /*! \note Requires the definition of the preprocessor symbol \ref RAPIDJSON_HAS_STDSTRING. + */ + bool operator==(const std::basic_string& rhs) const { return *this == GenericValue(StringRef(rhs)); } +#endif + + //! Equal-to operator with primitive types + /*! \tparam T Either \ref Type, \c int, \c unsigned, \c int64_t, \c uint64_t, \c double, \c true, \c false + */ + template RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr,internal::IsGenericValue >), (bool)) operator==(const T& rhs) const { return *this == GenericValue(rhs); } + + //! Not-equal-to operator + /*! \return !(*this == rhs) + */ + template + bool operator!=(const GenericValue& rhs) const { return !(*this == rhs); } + + //! Not-equal-to operator with const C-string pointer + bool operator!=(const Ch* rhs) const { return !(*this == rhs); } + + //! Not-equal-to operator with arbitrary types + /*! \return !(*this == rhs) + */ + template RAPIDJSON_DISABLEIF_RETURN((internal::IsGenericValue), (bool)) operator!=(const T& rhs) const { return !(*this == rhs); } + + //! Equal-to operator with arbitrary types (symmetric version) + /*! \return (rhs == lhs) + */ + template friend RAPIDJSON_DISABLEIF_RETURN((internal::IsGenericValue), (bool)) operator==(const T& lhs, const GenericValue& rhs) { return rhs == lhs; } + + //! Not-Equal-to operator with arbitrary types (symmetric version) + /*! \return !(rhs == lhs) + */ + template friend RAPIDJSON_DISABLEIF_RETURN((internal::IsGenericValue), (bool)) operator!=(const T& lhs, const GenericValue& rhs) { return !(rhs == lhs); } + //@} + + //!@name Type + //@{ + + Type GetType() const { return static_cast(data_.f.flags & kTypeMask); } + bool IsNull() const { return data_.f.flags == kNullFlag; } + bool IsFalse() const { return data_.f.flags == kFalseFlag; } + bool IsTrue() const { return data_.f.flags == kTrueFlag; } + bool IsBool() const { return (data_.f.flags & kBoolFlag) != 0; } + bool IsObject() const { return data_.f.flags == kObjectFlag; } + bool IsArray() const { return data_.f.flags == kArrayFlag; } + bool IsNumber() const { return (data_.f.flags & kNumberFlag) != 0; } + bool IsInt() const { return (data_.f.flags & kIntFlag) != 0; } + bool IsUint() const { return (data_.f.flags & kUintFlag) != 0; } + bool IsInt64() const { return (data_.f.flags & kInt64Flag) != 0; } + bool IsUint64() const { return (data_.f.flags & kUint64Flag) != 0; } + bool IsDouble() const { return (data_.f.flags & kDoubleFlag) != 0; } + bool IsString() const { return (data_.f.flags & kStringFlag) != 0; } + + // Checks whether a number can be losslessly converted to a double. + bool IsLosslessDouble() const { + if (!IsNumber()) return false; + if (IsUint64()) { + uint64_t u = GetUint64(); + volatile double d = static_cast(u); + return (d >= 0.0) + && (d < static_cast(std::numeric_limits::max())) + && (u == static_cast(d)); + } + if (IsInt64()) { + int64_t i = GetInt64(); + volatile double d = static_cast(i); + return (d >= static_cast(std::numeric_limits::min())) + && (d < static_cast(std::numeric_limits::max())) + && (i == static_cast(d)); + } + return true; // double, int, uint are always lossless + } + + // Checks whether a number is a float (possible lossy). + bool IsFloat() const { + if ((data_.f.flags & kDoubleFlag) == 0) + return false; + double d = GetDouble(); + return d >= -3.4028234e38 && d <= 3.4028234e38; + } + // Checks whether a number can be losslessly converted to a float. + bool IsLosslessFloat() const { + if (!IsNumber()) return false; + double a = GetDouble(); + if (a < static_cast(-std::numeric_limits::max()) + || a > static_cast(std::numeric_limits::max())) + return false; + double b = static_cast(static_cast(a)); + return a >= b && a <= b; // Prevent -Wfloat-equal + } + + //@} + + //!@name Null + //@{ + + GenericValue& SetNull() { this->~GenericValue(); new (this) GenericValue(); return *this; } + + //@} + + //!@name Bool + //@{ + + bool GetBool() const { RAPIDJSON_ASSERT(IsBool()); return data_.f.flags == kTrueFlag; } + //!< Set boolean value + /*! \post IsBool() == true */ + GenericValue& SetBool(bool b) { this->~GenericValue(); new (this) GenericValue(b); return *this; } + + //@} + + //!@name Object + //@{ + + //! Set this value as an empty object. + /*! \post IsObject() == true */ + GenericValue& SetObject() { this->~GenericValue(); new (this) GenericValue(kObjectType); return *this; } + + //! Get the number of members in the object. + SizeType MemberCount() const { RAPIDJSON_ASSERT(IsObject()); return data_.o.size; } + + //! Check whether the object is empty. + bool ObjectEmpty() const { RAPIDJSON_ASSERT(IsObject()); return data_.o.size == 0; } + + //! Get a value from an object associated with the name. + /*! \pre IsObject() == true + \tparam T Either \c Ch or \c const \c Ch (template used for disambiguation with \ref operator[](SizeType)) + \note In version 0.1x, if the member is not found, this function returns a null value. This makes issue 7. + Since 0.2, if the name is not correct, it will assert. + If user is unsure whether a member exists, user should use HasMember() first. + A better approach is to use FindMember(). + \note Linear time complexity. + */ + template + RAPIDJSON_DISABLEIF_RETURN((internal::NotExpr::Type, Ch> >),(GenericValue&)) operator[](T* name) { + GenericValue n(StringRef(name)); + return (*this)[n]; + } + template + RAPIDJSON_DISABLEIF_RETURN((internal::NotExpr::Type, Ch> >),(const GenericValue&)) operator[](T* name) const { return const_cast(*this)[name]; } + + //! Get a value from an object associated with the name. + /*! \pre IsObject() == true + \tparam SourceAllocator Allocator of the \c name value + + \note Compared to \ref operator[](T*), this version is faster because it does not need a StrLen(). + And it can also handle strings with embedded null characters. + + \note Linear time complexity. + */ + template + GenericValue& operator[](const GenericValue& name) { + MemberIterator member = FindMember(name); + if (member != MemberEnd()) + return member->value; + else { + RAPIDJSON_ASSERT(false); // see above note + + // This will generate -Wexit-time-destructors in clang + // static GenericValue NullValue; + // return NullValue; + + // Use static buffer and placement-new to prevent destruction + static char buffer[sizeof(GenericValue)]; + return *new (buffer) GenericValue(); + } + } + template + const GenericValue& operator[](const GenericValue& name) const { return const_cast(*this)[name]; } + +#if RAPIDJSON_HAS_STDSTRING + //! Get a value from an object associated with name (string object). + GenericValue& operator[](const std::basic_string& name) { return (*this)[GenericValue(StringRef(name))]; } + const GenericValue& operator[](const std::basic_string& name) const { return (*this)[GenericValue(StringRef(name))]; } +#endif + + //! Const member iterator + /*! \pre IsObject() == true */ + ConstMemberIterator MemberBegin() const { RAPIDJSON_ASSERT(IsObject()); return ConstMemberIterator(GetMembersPointer()); } + //! Const \em past-the-end member iterator + /*! \pre IsObject() == true */ + ConstMemberIterator MemberEnd() const { RAPIDJSON_ASSERT(IsObject()); return ConstMemberIterator(GetMembersPointer() + data_.o.size); } + //! Member iterator + /*! \pre IsObject() == true */ + MemberIterator MemberBegin() { RAPIDJSON_ASSERT(IsObject()); return MemberIterator(GetMembersPointer()); } + //! \em Past-the-end member iterator + /*! \pre IsObject() == true */ + MemberIterator MemberEnd() { RAPIDJSON_ASSERT(IsObject()); return MemberIterator(GetMembersPointer() + data_.o.size); } + + //! Check whether a member exists in the object. + /*! + \param name Member name to be searched. + \pre IsObject() == true + \return Whether a member with that name exists. + \note It is better to use FindMember() directly if you need the obtain the value as well. + \note Linear time complexity. + */ + bool HasMember(const Ch* name) const { return FindMember(name) != MemberEnd(); } + +#if RAPIDJSON_HAS_STDSTRING + //! Check whether a member exists in the object with string object. + /*! + \param name Member name to be searched. + \pre IsObject() == true + \return Whether a member with that name exists. + \note It is better to use FindMember() directly if you need the obtain the value as well. + \note Linear time complexity. + */ + bool HasMember(const std::basic_string& name) const { return FindMember(name) != MemberEnd(); } +#endif + + //! Check whether a member exists in the object with GenericValue name. + /*! + This version is faster because it does not need a StrLen(). It can also handle string with null character. + \param name Member name to be searched. + \pre IsObject() == true + \return Whether a member with that name exists. + \note It is better to use FindMember() directly if you need the obtain the value as well. + \note Linear time complexity. + */ + template + bool HasMember(const GenericValue& name) const { return FindMember(name) != MemberEnd(); } + + //! Find member by name. + /*! + \param name Member name to be searched. + \pre IsObject() == true + \return Iterator to member, if it exists. + Otherwise returns \ref MemberEnd(). + + \note Earlier versions of Rapidjson returned a \c NULL pointer, in case + the requested member doesn't exist. For consistency with e.g. + \c std::map, this has been changed to MemberEnd() now. + \note Linear time complexity. + */ + MemberIterator FindMember(const Ch* name) { + GenericValue n(StringRef(name)); + return FindMember(n); + } + + ConstMemberIterator FindMember(const Ch* name) const { return const_cast(*this).FindMember(name); } + + //! Find member by name. + /*! + This version is faster because it does not need a StrLen(). It can also handle string with null character. + \param name Member name to be searched. + \pre IsObject() == true + \return Iterator to member, if it exists. + Otherwise returns \ref MemberEnd(). + + \note Earlier versions of Rapidjson returned a \c NULL pointer, in case + the requested member doesn't exist. For consistency with e.g. + \c std::map, this has been changed to MemberEnd() now. + \note Linear time complexity. + */ + template + MemberIterator FindMember(const GenericValue& name) { + RAPIDJSON_ASSERT(IsObject()); + RAPIDJSON_ASSERT(name.IsString()); + MemberIterator member = MemberBegin(); + for ( ; member != MemberEnd(); ++member) + if (name.StringEqual(member->name)) + break; + return member; + } + template ConstMemberIterator FindMember(const GenericValue& name) const { return const_cast(*this).FindMember(name); } + +#if RAPIDJSON_HAS_STDSTRING + //! Find member by string object name. + /*! + \param name Member name to be searched. + \pre IsObject() == true + \return Iterator to member, if it exists. + Otherwise returns \ref MemberEnd(). + */ + MemberIterator FindMember(const std::basic_string& name) { return FindMember(GenericValue(StringRef(name))); } + ConstMemberIterator FindMember(const std::basic_string& name) const { return FindMember(GenericValue(StringRef(name))); } +#endif + + //! Add a member (name-value pair) to the object. + /*! \param name A string value as name of member. + \param value Value of any type. + \param allocator Allocator for reallocating memory. It must be the same one as used before. Commonly use GenericDocument::GetAllocator(). + \return The value itself for fluent API. + \note The ownership of \c name and \c value will be transferred to this object on success. + \pre IsObject() && name.IsString() + \post name.IsNull() && value.IsNull() + \note Amortized Constant time complexity. + */ + GenericValue& AddMember(GenericValue& name, GenericValue& value, Allocator& allocator) { + RAPIDJSON_ASSERT(IsObject()); + RAPIDJSON_ASSERT(name.IsString()); + + ObjectData& o = data_.o; + if (o.size >= o.capacity) { + if (o.capacity == 0) { + o.capacity = kDefaultObjectCapacity; + SetMembersPointer(reinterpret_cast(allocator.Malloc(o.capacity * sizeof(Member)))); + } + else { + SizeType oldCapacity = o.capacity; + o.capacity += (oldCapacity + 1) / 2; // grow by factor 1.5 + SetMembersPointer(reinterpret_cast(allocator.Realloc(GetMembersPointer(), oldCapacity * sizeof(Member), o.capacity * sizeof(Member)))); + } + } + Member* members = GetMembersPointer(); + members[o.size].name.RawAssign(name); + members[o.size].value.RawAssign(value); + o.size++; + return *this; + } + + //! Add a constant string value as member (name-value pair) to the object. + /*! \param name A string value as name of member. + \param value constant string reference as value of member. + \param allocator Allocator for reallocating memory. It must be the same one as used before. Commonly use GenericDocument::GetAllocator(). + \return The value itself for fluent API. + \pre IsObject() + \note This overload is needed to avoid clashes with the generic primitive type AddMember(GenericValue&,T,Allocator&) overload below. + \note Amortized Constant time complexity. + */ + GenericValue& AddMember(GenericValue& name, StringRefType value, Allocator& allocator) { + GenericValue v(value); + return AddMember(name, v, allocator); + } + +#if RAPIDJSON_HAS_STDSTRING + //! Add a string object as member (name-value pair) to the object. + /*! \param name A string value as name of member. + \param value constant string reference as value of member. + \param allocator Allocator for reallocating memory. It must be the same one as used before. Commonly use GenericDocument::GetAllocator(). + \return The value itself for fluent API. + \pre IsObject() + \note This overload is needed to avoid clashes with the generic primitive type AddMember(GenericValue&,T,Allocator&) overload below. + \note Amortized Constant time complexity. + */ + GenericValue& AddMember(GenericValue& name, std::basic_string& value, Allocator& allocator) { + GenericValue v(value, allocator); + return AddMember(name, v, allocator); + } +#endif + + //! Add any primitive value as member (name-value pair) to the object. + /*! \tparam T Either \ref Type, \c int, \c unsigned, \c int64_t, \c uint64_t + \param name A string value as name of member. + \param value Value of primitive type \c T as value of member + \param allocator Allocator for reallocating memory. Commonly use GenericDocument::GetAllocator(). + \return The value itself for fluent API. + \pre IsObject() + + \note The source type \c T explicitly disallows all pointer types, + especially (\c const) \ref Ch*. This helps avoiding implicitly + referencing character strings with insufficient lifetime, use + \ref AddMember(StringRefType, GenericValue&, Allocator&) or \ref + AddMember(StringRefType, StringRefType, Allocator&). + All other pointer types would implicitly convert to \c bool, + use an explicit cast instead, if needed. + \note Amortized Constant time complexity. + */ + template + RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr, internal::IsGenericValue >), (GenericValue&)) + AddMember(GenericValue& name, T value, Allocator& allocator) { + GenericValue v(value); + return AddMember(name, v, allocator); + } + +#if RAPIDJSON_HAS_CXX11_RVALUE_REFS + GenericValue& AddMember(GenericValue&& name, GenericValue&& value, Allocator& allocator) { + return AddMember(name, value, allocator); + } + GenericValue& AddMember(GenericValue&& name, GenericValue& value, Allocator& allocator) { + return AddMember(name, value, allocator); + } + GenericValue& AddMember(GenericValue& name, GenericValue&& value, Allocator& allocator) { + return AddMember(name, value, allocator); + } + GenericValue& AddMember(StringRefType name, GenericValue&& value, Allocator& allocator) { + GenericValue n(name); + return AddMember(n, value, allocator); + } +#endif // RAPIDJSON_HAS_CXX11_RVALUE_REFS + + + //! Add a member (name-value pair) to the object. + /*! \param name A constant string reference as name of member. + \param value Value of any type. + \param allocator Allocator for reallocating memory. It must be the same one as used before. Commonly use GenericDocument::GetAllocator(). + \return The value itself for fluent API. + \note The ownership of \c value will be transferred to this object on success. + \pre IsObject() + \post value.IsNull() + \note Amortized Constant time complexity. + */ + GenericValue& AddMember(StringRefType name, GenericValue& value, Allocator& allocator) { + GenericValue n(name); + return AddMember(n, value, allocator); + } + + //! Add a constant string value as member (name-value pair) to the object. + /*! \param name A constant string reference as name of member. + \param value constant string reference as value of member. + \param allocator Allocator for reallocating memory. It must be the same one as used before. Commonly use GenericDocument::GetAllocator(). + \return The value itself for fluent API. + \pre IsObject() + \note This overload is needed to avoid clashes with the generic primitive type AddMember(StringRefType,T,Allocator&) overload below. + \note Amortized Constant time complexity. + */ + GenericValue& AddMember(StringRefType name, StringRefType value, Allocator& allocator) { + GenericValue v(value); + return AddMember(name, v, allocator); + } + + //! Add any primitive value as member (name-value pair) to the object. + /*! \tparam T Either \ref Type, \c int, \c unsigned, \c int64_t, \c uint64_t + \param name A constant string reference as name of member. + \param value Value of primitive type \c T as value of member + \param allocator Allocator for reallocating memory. Commonly use GenericDocument::GetAllocator(). + \return The value itself for fluent API. + \pre IsObject() + + \note The source type \c T explicitly disallows all pointer types, + especially (\c const) \ref Ch*. This helps avoiding implicitly + referencing character strings with insufficient lifetime, use + \ref AddMember(StringRefType, GenericValue&, Allocator&) or \ref + AddMember(StringRefType, StringRefType, Allocator&). + All other pointer types would implicitly convert to \c bool, + use an explicit cast instead, if needed. + \note Amortized Constant time complexity. + */ + template + RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr, internal::IsGenericValue >), (GenericValue&)) + AddMember(StringRefType name, T value, Allocator& allocator) { + GenericValue n(name); + return AddMember(n, value, allocator); + } + + //! Remove all members in the object. + /*! This function do not deallocate memory in the object, i.e. the capacity is unchanged. + \note Linear time complexity. + */ + void RemoveAllMembers() { + RAPIDJSON_ASSERT(IsObject()); + for (MemberIterator m = MemberBegin(); m != MemberEnd(); ++m) + m->~Member(); + data_.o.size = 0; + } + + //! Remove a member in object by its name. + /*! \param name Name of member to be removed. + \return Whether the member existed. + \note This function may reorder the object members. Use \ref + EraseMember(ConstMemberIterator) if you need to preserve the + relative order of the remaining members. + \note Linear time complexity. + */ + bool RemoveMember(const Ch* name) { + GenericValue n(StringRef(name)); + return RemoveMember(n); + } + +#if RAPIDJSON_HAS_STDSTRING + bool RemoveMember(const std::basic_string& name) { return RemoveMember(GenericValue(StringRef(name))); } +#endif + + template + bool RemoveMember(const GenericValue& name) { + MemberIterator m = FindMember(name); + if (m != MemberEnd()) { + RemoveMember(m); + return true; + } + else + return false; + } + + //! Remove a member in object by iterator. + /*! \param m member iterator (obtained by FindMember() or MemberBegin()). + \return the new iterator after removal. + \note This function may reorder the object members. Use \ref + EraseMember(ConstMemberIterator) if you need to preserve the + relative order of the remaining members. + \note Constant time complexity. + */ + MemberIterator RemoveMember(MemberIterator m) { + RAPIDJSON_ASSERT(IsObject()); + RAPIDJSON_ASSERT(data_.o.size > 0); + RAPIDJSON_ASSERT(GetMembersPointer() != 0); + RAPIDJSON_ASSERT(m >= MemberBegin() && m < MemberEnd()); + + MemberIterator last(GetMembersPointer() + (data_.o.size - 1)); + if (data_.o.size > 1 && m != last) + *m = *last; // Move the last one to this place + else + m->~Member(); // Only one left, just destroy + --data_.o.size; + return m; + } + + //! Remove a member from an object by iterator. + /*! \param pos iterator to the member to remove + \pre IsObject() == true && \ref MemberBegin() <= \c pos < \ref MemberEnd() + \return Iterator following the removed element. + If the iterator \c pos refers to the last element, the \ref MemberEnd() iterator is returned. + \note This function preserves the relative order of the remaining object + members. If you do not need this, use the more efficient \ref RemoveMember(MemberIterator). + \note Linear time complexity. + */ + MemberIterator EraseMember(ConstMemberIterator pos) { + return EraseMember(pos, pos +1); + } + + //! Remove members in the range [first, last) from an object. + /*! \param first iterator to the first member to remove + \param last iterator following the last member to remove + \pre IsObject() == true && \ref MemberBegin() <= \c first <= \c last <= \ref MemberEnd() + \return Iterator following the last removed element. + \note This function preserves the relative order of the remaining object + members. + \note Linear time complexity. + */ + MemberIterator EraseMember(ConstMemberIterator first, ConstMemberIterator last) { + RAPIDJSON_ASSERT(IsObject()); + RAPIDJSON_ASSERT(data_.o.size > 0); + RAPIDJSON_ASSERT(GetMembersPointer() != 0); + RAPIDJSON_ASSERT(first >= MemberBegin()); + RAPIDJSON_ASSERT(first <= last); + RAPIDJSON_ASSERT(last <= MemberEnd()); + + MemberIterator pos = MemberBegin() + (first - MemberBegin()); + for (MemberIterator itr = pos; itr != last; ++itr) + itr->~Member(); + std::memmove(&*pos, &*last, static_cast(MemberEnd() - last) * sizeof(Member)); + data_.o.size -= static_cast(last - first); + return pos; + } + + //! Erase a member in object by its name. + /*! \param name Name of member to be removed. + \return Whether the member existed. + \note Linear time complexity. + */ + bool EraseMember(const Ch* name) { + GenericValue n(StringRef(name)); + return EraseMember(n); + } + +#if RAPIDJSON_HAS_STDSTRING + bool EraseMember(const std::basic_string& name) { return EraseMember(GenericValue(StringRef(name))); } +#endif + + template + bool EraseMember(const GenericValue& name) { + MemberIterator m = FindMember(name); + if (m != MemberEnd()) { + EraseMember(m); + return true; + } + else + return false; + } + + Object GetObject() { RAPIDJSON_ASSERT(IsObject()); return Object(*this); } + ConstObject GetObject() const { RAPIDJSON_ASSERT(IsObject()); return ConstObject(*this); } + + //@} + + //!@name Array + //@{ + + //! Set this value as an empty array. + /*! \post IsArray == true */ + GenericValue& SetArray() { this->~GenericValue(); new (this) GenericValue(kArrayType); return *this; } + + //! Get the number of elements in array. + SizeType Size() const { RAPIDJSON_ASSERT(IsArray()); return data_.a.size; } + + //! Get the capacity of array. + SizeType Capacity() const { RAPIDJSON_ASSERT(IsArray()); return data_.a.capacity; } + + //! Check whether the array is empty. + bool Empty() const { RAPIDJSON_ASSERT(IsArray()); return data_.a.size == 0; } + + //! Remove all elements in the array. + /*! This function do not deallocate memory in the array, i.e. the capacity is unchanged. + \note Linear time complexity. + */ + void Clear() { + RAPIDJSON_ASSERT(IsArray()); + GenericValue* e = GetElementsPointer(); + for (GenericValue* v = e; v != e + data_.a.size; ++v) + v->~GenericValue(); + data_.a.size = 0; + } + + //! Get an element from array by index. + /*! \pre IsArray() == true + \param index Zero-based index of element. + \see operator[](T*) + */ + GenericValue& operator[](SizeType index) { + RAPIDJSON_ASSERT(IsArray()); + RAPIDJSON_ASSERT(index < data_.a.size); + return GetElementsPointer()[index]; + } + const GenericValue& operator[](SizeType index) const { return const_cast(*this)[index]; } + + //! Element iterator + /*! \pre IsArray() == true */ + ValueIterator Begin() { RAPIDJSON_ASSERT(IsArray()); return GetElementsPointer(); } + //! \em Past-the-end element iterator + /*! \pre IsArray() == true */ + ValueIterator End() { RAPIDJSON_ASSERT(IsArray()); return GetElementsPointer() + data_.a.size; } + //! Constant element iterator + /*! \pre IsArray() == true */ + ConstValueIterator Begin() const { return const_cast(*this).Begin(); } + //! Constant \em past-the-end element iterator + /*! \pre IsArray() == true */ + ConstValueIterator End() const { return const_cast(*this).End(); } + + //! Request the array to have enough capacity to store elements. + /*! \param newCapacity The capacity that the array at least need to have. + \param allocator Allocator for reallocating memory. It must be the same one as used before. Commonly use GenericDocument::GetAllocator(). + \return The value itself for fluent API. + \note Linear time complexity. + */ + GenericValue& Reserve(SizeType newCapacity, Allocator &allocator) { + RAPIDJSON_ASSERT(IsArray()); + if (newCapacity > data_.a.capacity) { + SetElementsPointer(reinterpret_cast(allocator.Realloc(GetElementsPointer(), data_.a.capacity * sizeof(GenericValue), newCapacity * sizeof(GenericValue)))); + data_.a.capacity = newCapacity; + } + return *this; + } + + //! Append a GenericValue at the end of the array. + /*! \param value Value to be appended. + \param allocator Allocator for reallocating memory. It must be the same one as used before. Commonly use GenericDocument::GetAllocator(). + \pre IsArray() == true + \post value.IsNull() == true + \return The value itself for fluent API. + \note The ownership of \c value will be transferred to this array on success. + \note If the number of elements to be appended is known, calls Reserve() once first may be more efficient. + \note Amortized constant time complexity. + */ + GenericValue& PushBack(GenericValue& value, Allocator& allocator) { + RAPIDJSON_ASSERT(IsArray()); + if (data_.a.size >= data_.a.capacity) + Reserve(data_.a.capacity == 0 ? kDefaultArrayCapacity : (data_.a.capacity + (data_.a.capacity + 1) / 2), allocator); + GetElementsPointer()[data_.a.size++].RawAssign(value); + return *this; + } + +#if RAPIDJSON_HAS_CXX11_RVALUE_REFS + GenericValue& PushBack(GenericValue&& value, Allocator& allocator) { + return PushBack(value, allocator); + } +#endif // RAPIDJSON_HAS_CXX11_RVALUE_REFS + + //! Append a constant string reference at the end of the array. + /*! \param value Constant string reference to be appended. + \param allocator Allocator for reallocating memory. It must be the same one used previously. Commonly use GenericDocument::GetAllocator(). + \pre IsArray() == true + \return The value itself for fluent API. + \note If the number of elements to be appended is known, calls Reserve() once first may be more efficient. + \note Amortized constant time complexity. + \see GenericStringRef + */ + GenericValue& PushBack(StringRefType value, Allocator& allocator) { + return (*this).template PushBack(value, allocator); + } + + //! Append a primitive value at the end of the array. + /*! \tparam T Either \ref Type, \c int, \c unsigned, \c int64_t, \c uint64_t + \param value Value of primitive type T to be appended. + \param allocator Allocator for reallocating memory. It must be the same one as used before. Commonly use GenericDocument::GetAllocator(). + \pre IsArray() == true + \return The value itself for fluent API. + \note If the number of elements to be appended is known, calls Reserve() once first may be more efficient. + + \note The source type \c T explicitly disallows all pointer types, + especially (\c const) \ref Ch*. This helps avoiding implicitly + referencing character strings with insufficient lifetime, use + \ref PushBack(GenericValue&, Allocator&) or \ref + PushBack(StringRefType, Allocator&). + All other pointer types would implicitly convert to \c bool, + use an explicit cast instead, if needed. + \note Amortized constant time complexity. + */ + template + RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr, internal::IsGenericValue >), (GenericValue&)) + PushBack(T value, Allocator& allocator) { + GenericValue v(value); + return PushBack(v, allocator); + } + + //! Remove the last element in the array. + /*! + \note Constant time complexity. + */ + GenericValue& PopBack() { + RAPIDJSON_ASSERT(IsArray()); + RAPIDJSON_ASSERT(!Empty()); + GetElementsPointer()[--data_.a.size].~GenericValue(); + return *this; + } + + //! Remove an element of array by iterator. + /*! + \param pos iterator to the element to remove + \pre IsArray() == true && \ref Begin() <= \c pos < \ref End() + \return Iterator following the removed element. If the iterator pos refers to the last element, the End() iterator is returned. + \note Linear time complexity. + */ + ValueIterator Erase(ConstValueIterator pos) { + return Erase(pos, pos + 1); + } + + //! Remove elements in the range [first, last) of the array. + /*! + \param first iterator to the first element to remove + \param last iterator following the last element to remove + \pre IsArray() == true && \ref Begin() <= \c first <= \c last <= \ref End() + \return Iterator following the last removed element. + \note Linear time complexity. + */ + ValueIterator Erase(ConstValueIterator first, ConstValueIterator last) { + RAPIDJSON_ASSERT(IsArray()); + RAPIDJSON_ASSERT(data_.a.size > 0); + RAPIDJSON_ASSERT(GetElementsPointer() != 0); + RAPIDJSON_ASSERT(first >= Begin()); + RAPIDJSON_ASSERT(first <= last); + RAPIDJSON_ASSERT(last <= End()); + ValueIterator pos = Begin() + (first - Begin()); + for (ValueIterator itr = pos; itr != last; ++itr) + itr->~GenericValue(); + std::memmove(pos, last, static_cast(End() - last) * sizeof(GenericValue)); + data_.a.size -= static_cast(last - first); + return pos; + } + + Array GetArray() { RAPIDJSON_ASSERT(IsArray()); return Array(*this); } + ConstArray GetArray() const { RAPIDJSON_ASSERT(IsArray()); return ConstArray(*this); } + + //@} + + //!@name Number + //@{ + + int GetInt() const { RAPIDJSON_ASSERT(data_.f.flags & kIntFlag); return data_.n.i.i; } + unsigned GetUint() const { RAPIDJSON_ASSERT(data_.f.flags & kUintFlag); return data_.n.u.u; } + int64_t GetInt64() const { RAPIDJSON_ASSERT(data_.f.flags & kInt64Flag); return data_.n.i64; } + uint64_t GetUint64() const { RAPIDJSON_ASSERT(data_.f.flags & kUint64Flag); return data_.n.u64; } + + //! Get the value as double type. + /*! \note If the value is 64-bit integer type, it may lose precision. Use \c IsLosslessDouble() to check whether the converison is lossless. + */ + double GetDouble() const { + RAPIDJSON_ASSERT(IsNumber()); + if ((data_.f.flags & kDoubleFlag) != 0) return data_.n.d; // exact type, no conversion. + if ((data_.f.flags & kIntFlag) != 0) return data_.n.i.i; // int -> double + if ((data_.f.flags & kUintFlag) != 0) return data_.n.u.u; // unsigned -> double + if ((data_.f.flags & kInt64Flag) != 0) return static_cast(data_.n.i64); // int64_t -> double (may lose precision) + RAPIDJSON_ASSERT((data_.f.flags & kUint64Flag) != 0); return static_cast(data_.n.u64); // uint64_t -> double (may lose precision) + } + + //! Get the value as float type. + /*! \note If the value is 64-bit integer type, it may lose precision. Use \c IsLosslessFloat() to check whether the converison is lossless. + */ + float GetFloat() const { + return static_cast(GetDouble()); + } + + GenericValue& SetInt(int i) { this->~GenericValue(); new (this) GenericValue(i); return *this; } + GenericValue& SetUint(unsigned u) { this->~GenericValue(); new (this) GenericValue(u); return *this; } + GenericValue& SetInt64(int64_t i64) { this->~GenericValue(); new (this) GenericValue(i64); return *this; } + GenericValue& SetUint64(uint64_t u64) { this->~GenericValue(); new (this) GenericValue(u64); return *this; } + GenericValue& SetDouble(double d) { this->~GenericValue(); new (this) GenericValue(d); return *this; } + GenericValue& SetFloat(float f) { this->~GenericValue(); new (this) GenericValue(f); return *this; } + + //@} + + //!@name String + //@{ + + const Ch* GetString() const { RAPIDJSON_ASSERT(IsString()); return (data_.f.flags & kInlineStrFlag) ? data_.ss.str : GetStringPointer(); } + + //! Get the length of string. + /*! Since rapidjson permits "\\u0000" in the json string, strlen(v.GetString()) may not equal to v.GetStringLength(). + */ + SizeType GetStringLength() const { RAPIDJSON_ASSERT(IsString()); return ((data_.f.flags & kInlineStrFlag) ? (data_.ss.GetLength()) : data_.s.length); } + + //! Set this value as a string without copying source string. + /*! This version has better performance with supplied length, and also support string containing null character. + \param s source string pointer. + \param length The length of source string, excluding the trailing null terminator. + \return The value itself for fluent API. + \post IsString() == true && GetString() == s && GetStringLength() == length + \see SetString(StringRefType) + */ + GenericValue& SetString(const Ch* s, SizeType length) { return SetString(StringRef(s, length)); } + + //! Set this value as a string without copying source string. + /*! \param s source string reference + \return The value itself for fluent API. + \post IsString() == true && GetString() == s && GetStringLength() == s.length + */ + GenericValue& SetString(StringRefType s) { this->~GenericValue(); SetStringRaw(s); return *this; } + + //! Set this value as a string by copying from source string. + /*! This version has better performance with supplied length, and also support string containing null character. + \param s source string. + \param length The length of source string, excluding the trailing null terminator. + \param allocator Allocator for allocating copied buffer. Commonly use GenericDocument::GetAllocator(). + \return The value itself for fluent API. + \post IsString() == true && GetString() != s && strcmp(GetString(),s) == 0 && GetStringLength() == length + */ + GenericValue& SetString(const Ch* s, SizeType length, Allocator& allocator) { this->~GenericValue(); SetStringRaw(StringRef(s, length), allocator); return *this; } + + //! Set this value as a string by copying from source string. + /*! \param s source string. + \param allocator Allocator for allocating copied buffer. Commonly use GenericDocument::GetAllocator(). + \return The value itself for fluent API. + \post IsString() == true && GetString() != s && strcmp(GetString(),s) == 0 && GetStringLength() == length + */ + GenericValue& SetString(const Ch* s, Allocator& allocator) { return SetString(s, internal::StrLen(s), allocator); } + +#if RAPIDJSON_HAS_STDSTRING + //! Set this value as a string by copying from source string. + /*! \param s source string. + \param allocator Allocator for allocating copied buffer. Commonly use GenericDocument::GetAllocator(). + \return The value itself for fluent API. + \post IsString() == true && GetString() != s.data() && strcmp(GetString(),s.data() == 0 && GetStringLength() == s.size() + \note Requires the definition of the preprocessor symbol \ref RAPIDJSON_HAS_STDSTRING. + */ + GenericValue& SetString(const std::basic_string& s, Allocator& allocator) { return SetString(s.data(), SizeType(s.size()), allocator); } +#endif + + //@} + + //!@name Array + //@{ + + //! Templated version for checking whether this value is type T. + /*! + \tparam T Either \c bool, \c int, \c unsigned, \c int64_t, \c uint64_t, \c double, \c float, \c const \c char*, \c std::basic_string + */ + template + bool Is() const { return internal::TypeHelper::Is(*this); } + + template + T Get() const { return internal::TypeHelper::Get(*this); } + + template + T Get() { return internal::TypeHelper::Get(*this); } + + template + ValueType& Set(const T& data) { return internal::TypeHelper::Set(*this, data); } + + template + ValueType& Set(const T& data, AllocatorType& allocator) { return internal::TypeHelper::Set(*this, data, allocator); } + + //@} + + //! Generate events of this value to a Handler. + /*! This function adopts the GoF visitor pattern. + Typical usage is to output this JSON value as JSON text via Writer, which is a Handler. + It can also be used to deep clone this value via GenericDocument, which is also a Handler. + \tparam Handler type of handler. + \param handler An object implementing concept Handler. + */ + template + bool Accept(Handler& handler) const { + switch(GetType()) { + case kNullType: return handler.Null(); + case kFalseType: return handler.Bool(false); + case kTrueType: return handler.Bool(true); + + case kObjectType: + if (RAPIDJSON_UNLIKELY(!handler.StartObject())) + return false; + for (ConstMemberIterator m = MemberBegin(); m != MemberEnd(); ++m) { + RAPIDJSON_ASSERT(m->name.IsString()); // User may change the type of name by MemberIterator. + if (RAPIDJSON_UNLIKELY(!handler.Key(m->name.GetString(), m->name.GetStringLength(), (m->name.data_.f.flags & kCopyFlag) != 0))) + return false; + if (RAPIDJSON_UNLIKELY(!m->value.Accept(handler))) + return false; + } + return handler.EndObject(data_.o.size); + + case kArrayType: + if (RAPIDJSON_UNLIKELY(!handler.StartArray())) + return false; + for (const GenericValue* v = Begin(); v != End(); ++v) + if (RAPIDJSON_UNLIKELY(!v->Accept(handler))) + return false; + return handler.EndArray(data_.a.size); + + case kStringType: + return handler.String(GetString(), GetStringLength(), (data_.f.flags & kCopyFlag) != 0); + + default: + RAPIDJSON_ASSERT(GetType() == kNumberType); + if (IsDouble()) return handler.Double(data_.n.d); + else if (IsInt()) return handler.Int(data_.n.i.i); + else if (IsUint()) return handler.Uint(data_.n.u.u); + else if (IsInt64()) return handler.Int64(data_.n.i64); + else return handler.Uint64(data_.n.u64); + } + } + +private: + template friend class GenericValue; + template friend class GenericDocument; + + enum { + kBoolFlag = 0x0008, + kNumberFlag = 0x0010, + kIntFlag = 0x0020, + kUintFlag = 0x0040, + kInt64Flag = 0x0080, + kUint64Flag = 0x0100, + kDoubleFlag = 0x0200, + kStringFlag = 0x0400, + kCopyFlag = 0x0800, + kInlineStrFlag = 0x1000, + + // Initial flags of different types. + kNullFlag = kNullType, + kTrueFlag = kTrueType | kBoolFlag, + kFalseFlag = kFalseType | kBoolFlag, + kNumberIntFlag = kNumberType | kNumberFlag | kIntFlag | kInt64Flag, + kNumberUintFlag = kNumberType | kNumberFlag | kUintFlag | kUint64Flag | kInt64Flag, + kNumberInt64Flag = kNumberType | kNumberFlag | kInt64Flag, + kNumberUint64Flag = kNumberType | kNumberFlag | kUint64Flag, + kNumberDoubleFlag = kNumberType | kNumberFlag | kDoubleFlag, + kNumberAnyFlag = kNumberType | kNumberFlag | kIntFlag | kInt64Flag | kUintFlag | kUint64Flag | kDoubleFlag, + kConstStringFlag = kStringType | kStringFlag, + kCopyStringFlag = kStringType | kStringFlag | kCopyFlag, + kShortStringFlag = kStringType | kStringFlag | kCopyFlag | kInlineStrFlag, + kObjectFlag = kObjectType, + kArrayFlag = kArrayType, + + kTypeMask = 0x07 + }; + + static const SizeType kDefaultArrayCapacity = 16; + static const SizeType kDefaultObjectCapacity = 16; + + struct Flag { +#if RAPIDJSON_48BITPOINTER_OPTIMIZATION + char payload[sizeof(SizeType) * 2 + 6]; // 2 x SizeType + lower 48-bit pointer +#elif RAPIDJSON_64BIT + char payload[sizeof(SizeType) * 2 + sizeof(void*) + 6]; // 6 padding bytes +#else + char payload[sizeof(SizeType) * 2 + sizeof(void*) + 2]; // 2 padding bytes +#endif + uint16_t flags; + }; + + struct String { + SizeType length; + SizeType hashcode; //!< reserved + const Ch* str; + }; // 12 bytes in 32-bit mode, 16 bytes in 64-bit mode + + // implementation detail: ShortString can represent zero-terminated strings up to MaxSize chars + // (excluding the terminating zero) and store a value to determine the length of the contained + // string in the last character str[LenPos] by storing "MaxSize - length" there. If the string + // to store has the maximal length of MaxSize then str[LenPos] will be 0 and therefore act as + // the string terminator as well. For getting the string length back from that value just use + // "MaxSize - str[LenPos]". + // This allows to store 13-chars strings in 32-bit mode, 21-chars strings in 64-bit mode, + // 13-chars strings for RAPIDJSON_48BITPOINTER_OPTIMIZATION=1 inline (for `UTF8`-encoded strings). + struct ShortString { + enum { MaxChars = sizeof(static_cast(0)->payload) / sizeof(Ch), MaxSize = MaxChars - 1, LenPos = MaxSize }; + Ch str[MaxChars]; + + inline static bool Usable(SizeType len) { return (MaxSize >= len); } + inline void SetLength(SizeType len) { str[LenPos] = static_cast(MaxSize - len); } + inline SizeType GetLength() const { return static_cast(MaxSize - str[LenPos]); } + }; // at most as many bytes as "String" above => 12 bytes in 32-bit mode, 16 bytes in 64-bit mode + + // By using proper binary layout, retrieval of different integer types do not need conversions. + union Number { +#if RAPIDJSON_ENDIAN == RAPIDJSON_LITTLEENDIAN + struct I { + int i; + char padding[4]; + }i; + struct U { + unsigned u; + char padding2[4]; + }u; +#else + struct I { + char padding[4]; + int i; + }i; + struct U { + char padding2[4]; + unsigned u; + }u; +#endif + int64_t i64; + uint64_t u64; + double d; + }; // 8 bytes + + struct ObjectData { + SizeType size; + SizeType capacity; + Member* members; + }; // 12 bytes in 32-bit mode, 16 bytes in 64-bit mode + + struct ArrayData { + SizeType size; + SizeType capacity; + GenericValue* elements; + }; // 12 bytes in 32-bit mode, 16 bytes in 64-bit mode + + union Data { + String s; + ShortString ss; + Number n; + ObjectData o; + ArrayData a; + Flag f; + }; // 16 bytes in 32-bit mode, 24 bytes in 64-bit mode, 16 bytes in 64-bit with RAPIDJSON_48BITPOINTER_OPTIMIZATION + + RAPIDJSON_FORCEINLINE const Ch* GetStringPointer() const { return RAPIDJSON_GETPOINTER(Ch, data_.s.str); } + RAPIDJSON_FORCEINLINE const Ch* SetStringPointer(const Ch* str) { return RAPIDJSON_SETPOINTER(Ch, data_.s.str, str); } + RAPIDJSON_FORCEINLINE GenericValue* GetElementsPointer() const { return RAPIDJSON_GETPOINTER(GenericValue, data_.a.elements); } + RAPIDJSON_FORCEINLINE GenericValue* SetElementsPointer(GenericValue* elements) { return RAPIDJSON_SETPOINTER(GenericValue, data_.a.elements, elements); } + RAPIDJSON_FORCEINLINE Member* GetMembersPointer() const { return RAPIDJSON_GETPOINTER(Member, data_.o.members); } + RAPIDJSON_FORCEINLINE Member* SetMembersPointer(Member* members) { return RAPIDJSON_SETPOINTER(Member, data_.o.members, members); } + + // Initialize this value as array with initial data, without calling destructor. + void SetArrayRaw(GenericValue* values, SizeType count, Allocator& allocator) { + data_.f.flags = kArrayFlag; + if (count) { + GenericValue* e = static_cast(allocator.Malloc(count * sizeof(GenericValue))); + SetElementsPointer(e); + std::memcpy(e, values, count * sizeof(GenericValue)); + } + else + SetElementsPointer(0); + data_.a.size = data_.a.capacity = count; + } + + //! Initialize this value as object with initial data, without calling destructor. + void SetObjectRaw(Member* members, SizeType count, Allocator& allocator) { + data_.f.flags = kObjectFlag; + if (count) { + Member* m = static_cast(allocator.Malloc(count * sizeof(Member))); + SetMembersPointer(m); + std::memcpy(m, members, count * sizeof(Member)); + } + else + SetMembersPointer(0); + data_.o.size = data_.o.capacity = count; + } + + //! Initialize this value as constant string, without calling destructor. + void SetStringRaw(StringRefType s) RAPIDJSON_NOEXCEPT { + data_.f.flags = kConstStringFlag; + SetStringPointer(s); + data_.s.length = s.length; + } + + //! Initialize this value as copy string with initial data, without calling destructor. + void SetStringRaw(StringRefType s, Allocator& allocator) { + Ch* str = 0; + if (ShortString::Usable(s.length)) { + data_.f.flags = kShortStringFlag; + data_.ss.SetLength(s.length); + str = data_.ss.str; + } else { + data_.f.flags = kCopyStringFlag; + data_.s.length = s.length; + str = static_cast(allocator.Malloc((s.length + 1) * sizeof(Ch))); + SetStringPointer(str); + } + std::memcpy(str, s, s.length * sizeof(Ch)); + str[s.length] = '\0'; + } + + //! Assignment without calling destructor + void RawAssign(GenericValue& rhs) RAPIDJSON_NOEXCEPT { + data_ = rhs.data_; + // data_.f.flags = rhs.data_.f.flags; + rhs.data_.f.flags = kNullFlag; + } + + template + bool StringEqual(const GenericValue& rhs) const { + RAPIDJSON_ASSERT(IsString()); + RAPIDJSON_ASSERT(rhs.IsString()); + + const SizeType len1 = GetStringLength(); + const SizeType len2 = rhs.GetStringLength(); + if(len1 != len2) { return false; } + + const Ch* const str1 = GetString(); + const Ch* const str2 = rhs.GetString(); + if(str1 == str2) { return true; } // fast path for constant string + + return (std::memcmp(str1, str2, sizeof(Ch) * len1) == 0); + } + + Data data_; +}; + +//! GenericValue with UTF8 encoding +typedef GenericValue > Value; + +/////////////////////////////////////////////////////////////////////////////// +// GenericDocument + +//! A document for parsing JSON text as DOM. +/*! + \note implements Handler concept + \tparam Encoding Encoding for both parsing and string storage. + \tparam Allocator Allocator for allocating memory for the DOM + \tparam StackAllocator Allocator for allocating memory for stack during parsing. + \warning Although GenericDocument inherits from GenericValue, the API does \b not provide any virtual functions, especially no virtual destructor. To avoid memory leaks, do not \c delete a GenericDocument object via a pointer to a GenericValue. +*/ +template , typename StackAllocator = CrtAllocator> +class GenericDocument : public GenericValue { +public: + typedef typename Encoding::Ch Ch; //!< Character type derived from Encoding. + typedef GenericValue ValueType; //!< Value type of the document. + typedef Allocator AllocatorType; //!< Allocator type from template parameter. + + //! Constructor + /*! Creates an empty document of specified type. + \param type Mandatory type of object to create. + \param allocator Optional allocator for allocating memory. + \param stackCapacity Optional initial capacity of stack in bytes. + \param stackAllocator Optional allocator for allocating memory for stack. + */ + explicit GenericDocument(Type type, Allocator* allocator = 0, size_t stackCapacity = kDefaultStackCapacity, StackAllocator* stackAllocator = 0) : + GenericValue(type), allocator_(allocator), ownAllocator_(0), stack_(stackAllocator, stackCapacity), parseResult_() + { + if (!allocator_) + ownAllocator_ = allocator_ = RAPIDJSON_NEW(Allocator()); + } + + //! Constructor + /*! Creates an empty document which type is Null. + \param allocator Optional allocator for allocating memory. + \param stackCapacity Optional initial capacity of stack in bytes. + \param stackAllocator Optional allocator for allocating memory for stack. + */ + GenericDocument(Allocator* allocator = 0, size_t stackCapacity = kDefaultStackCapacity, StackAllocator* stackAllocator = 0) : + allocator_(allocator), ownAllocator_(0), stack_(stackAllocator, stackCapacity), parseResult_() + { + if (!allocator_) + ownAllocator_ = allocator_ = RAPIDJSON_NEW(Allocator()); + } + +#if RAPIDJSON_HAS_CXX11_RVALUE_REFS + //! Move constructor in C++11 + GenericDocument(GenericDocument&& rhs) RAPIDJSON_NOEXCEPT + : ValueType(std::forward(rhs)), // explicit cast to avoid prohibited move from Document + allocator_(rhs.allocator_), + ownAllocator_(rhs.ownAllocator_), + stack_(std::move(rhs.stack_)), + parseResult_(rhs.parseResult_) + { + rhs.allocator_ = 0; + rhs.ownAllocator_ = 0; + rhs.parseResult_ = ParseResult(); + } +#endif + + ~GenericDocument() { + Destroy(); + } + +#if RAPIDJSON_HAS_CXX11_RVALUE_REFS + //! Move assignment in C++11 + GenericDocument& operator=(GenericDocument&& rhs) RAPIDJSON_NOEXCEPT + { + // The cast to ValueType is necessary here, because otherwise it would + // attempt to call GenericValue's templated assignment operator. + ValueType::operator=(std::forward(rhs)); + + // Calling the destructor here would prematurely call stack_'s destructor + Destroy(); + + allocator_ = rhs.allocator_; + ownAllocator_ = rhs.ownAllocator_; + stack_ = std::move(rhs.stack_); + parseResult_ = rhs.parseResult_; + + rhs.allocator_ = 0; + rhs.ownAllocator_ = 0; + rhs.parseResult_ = ParseResult(); + + return *this; + } +#endif + + //! Exchange the contents of this document with those of another. + /*! + \param rhs Another document. + \note Constant complexity. + \see GenericValue::Swap + */ + GenericDocument& Swap(GenericDocument& rhs) RAPIDJSON_NOEXCEPT { + ValueType::Swap(rhs); + stack_.Swap(rhs.stack_); + internal::Swap(allocator_, rhs.allocator_); + internal::Swap(ownAllocator_, rhs.ownAllocator_); + internal::Swap(parseResult_, rhs.parseResult_); + return *this; + } + + //! free-standing swap function helper + /*! + Helper function to enable support for common swap implementation pattern based on \c std::swap: + \code + void swap(MyClass& a, MyClass& b) { + using std::swap; + swap(a.doc, b.doc); + // ... + } + \endcode + \see Swap() + */ + friend inline void swap(GenericDocument& a, GenericDocument& b) RAPIDJSON_NOEXCEPT { a.Swap(b); } + + //! Populate this document by a generator which produces SAX events. + /*! \tparam Generator A functor with bool f(Handler) prototype. + \param g Generator functor which sends SAX events to the parameter. + \return The document itself for fluent API. + */ + template + GenericDocument& Populate(Generator& g) { + ClearStackOnExit scope(*this); + if (g(*this)) { + RAPIDJSON_ASSERT(stack_.GetSize() == sizeof(ValueType)); // Got one and only one root object + ValueType::operator=(*stack_.template Pop(1));// Move value from stack to document + } + return *this; + } + + //!@name Parse from stream + //!@{ + + //! Parse JSON text from an input stream (with Encoding conversion) + /*! \tparam parseFlags Combination of \ref ParseFlag. + \tparam SourceEncoding Encoding of input stream + \tparam InputStream Type of input stream, implementing Stream concept + \param is Input stream to be parsed. + \return The document itself for fluent API. + */ + template + GenericDocument& ParseStream(InputStream& is) { + GenericReader reader( + stack_.HasAllocator() ? &stack_.GetAllocator() : 0); + ClearStackOnExit scope(*this); + parseResult_ = reader.template Parse(is, *this); + if (parseResult_) { + RAPIDJSON_ASSERT(stack_.GetSize() == sizeof(ValueType)); // Got one and only one root object + ValueType::operator=(*stack_.template Pop(1));// Move value from stack to document + } + return *this; + } + + //! Parse JSON text from an input stream + /*! \tparam parseFlags Combination of \ref ParseFlag. + \tparam InputStream Type of input stream, implementing Stream concept + \param is Input stream to be parsed. + \return The document itself for fluent API. + */ + template + GenericDocument& ParseStream(InputStream& is) { + return ParseStream(is); + } + + //! Parse JSON text from an input stream (with \ref kParseDefaultFlags) + /*! \tparam InputStream Type of input stream, implementing Stream concept + \param is Input stream to be parsed. + \return The document itself for fluent API. + */ + template + GenericDocument& ParseStream(InputStream& is) { + return ParseStream(is); + } + //!@} + + //!@name Parse in-place from mutable string + //!@{ + + //! Parse JSON text from a mutable string + /*! \tparam parseFlags Combination of \ref ParseFlag. + \param str Mutable zero-terminated string to be parsed. + \return The document itself for fluent API. + */ + template + GenericDocument& ParseInsitu(Ch* str) { + GenericInsituStringStream s(str); + return ParseStream(s); + } + + //! Parse JSON text from a mutable string (with \ref kParseDefaultFlags) + /*! \param str Mutable zero-terminated string to be parsed. + \return The document itself for fluent API. + */ + GenericDocument& ParseInsitu(Ch* str) { + return ParseInsitu(str); + } + //!@} + + //!@name Parse from read-only string + //!@{ + + //! Parse JSON text from a read-only string (with Encoding conversion) + /*! \tparam parseFlags Combination of \ref ParseFlag (must not contain \ref kParseInsituFlag). + \tparam SourceEncoding Transcoding from input Encoding + \param str Read-only zero-terminated string to be parsed. + */ + template + GenericDocument& Parse(const typename SourceEncoding::Ch* str) { + RAPIDJSON_ASSERT(!(parseFlags & kParseInsituFlag)); + GenericStringStream s(str); + return ParseStream(s); + } + + //! Parse JSON text from a read-only string + /*! \tparam parseFlags Combination of \ref ParseFlag (must not contain \ref kParseInsituFlag). + \param str Read-only zero-terminated string to be parsed. + */ + template + GenericDocument& Parse(const Ch* str) { + return Parse(str); + } + + //! Parse JSON text from a read-only string (with \ref kParseDefaultFlags) + /*! \param str Read-only zero-terminated string to be parsed. + */ + GenericDocument& Parse(const Ch* str) { + return Parse(str); + } + + template + GenericDocument& Parse(const typename SourceEncoding::Ch* str, size_t length) { + RAPIDJSON_ASSERT(!(parseFlags & kParseInsituFlag)); + MemoryStream ms(static_cast(str), length * sizeof(typename SourceEncoding::Ch)); + EncodedInputStream is(ms); + ParseStream(is); + return *this; + } + + template + GenericDocument& Parse(const Ch* str, size_t length) { + return Parse(str, length); + } + + GenericDocument& Parse(const Ch* str, size_t length) { + return Parse(str, length); + } + +#if RAPIDJSON_HAS_STDSTRING + template + GenericDocument& Parse(const std::basic_string& str) { + // c_str() is constant complexity according to standard. Should be faster than Parse(const char*, size_t) + return Parse(str.c_str()); + } + + template + GenericDocument& Parse(const std::basic_string& str) { + return Parse(str.c_str()); + } + + GenericDocument& Parse(const std::basic_string& str) { + return Parse(str); + } +#endif // RAPIDJSON_HAS_STDSTRING + + //!@} + + //!@name Handling parse errors + //!@{ + + //! Whether a parse error has occured in the last parsing. + bool HasParseError() const { return parseResult_.IsError(); } + + //! Get the \ref ParseErrorCode of last parsing. + ParseErrorCode GetParseError() const { return parseResult_.Code(); } + + //! Get the position of last parsing error in input, 0 otherwise. + size_t GetErrorOffset() const { return parseResult_.Offset(); } + + //! Implicit conversion to get the last parse result +#ifndef __clang // -Wdocumentation + /*! \return \ref ParseResult of the last parse operation + + \code + Document doc; + ParseResult ok = doc.Parse(json); + if (!ok) + printf( "JSON parse error: %s (%u)\n", GetParseError_En(ok.Code()), ok.Offset()); + \endcode + */ +#endif + operator ParseResult() const { return parseResult_; } + //!@} + + //! Get the allocator of this document. + Allocator& GetAllocator() { + RAPIDJSON_ASSERT(allocator_); + return *allocator_; + } + + //! Get the capacity of stack in bytes. + size_t GetStackCapacity() const { return stack_.GetCapacity(); } + +private: + // clear stack on any exit from ParseStream, e.g. due to exception + struct ClearStackOnExit { + explicit ClearStackOnExit(GenericDocument& d) : d_(d) {} + ~ClearStackOnExit() { d_.ClearStack(); } + private: + ClearStackOnExit(const ClearStackOnExit&); + ClearStackOnExit& operator=(const ClearStackOnExit&); + GenericDocument& d_; + }; + + // callers of the following private Handler functions + // template friend class GenericReader; // for parsing + template friend class GenericValue; // for deep copying + +public: + // Implementation of Handler + bool Null() { new (stack_.template Push()) ValueType(); return true; } + bool Bool(bool b) { new (stack_.template Push()) ValueType(b); return true; } + bool Int(int i) { new (stack_.template Push()) ValueType(i); return true; } + bool Uint(unsigned i) { new (stack_.template Push()) ValueType(i); return true; } + bool Int64(int64_t i) { new (stack_.template Push()) ValueType(i); return true; } + bool Uint64(uint64_t i) { new (stack_.template Push()) ValueType(i); return true; } + bool Double(double d) { new (stack_.template Push()) ValueType(d); return true; } + + bool RawNumber(const Ch* str, SizeType length, bool copy) { + if (copy) + new (stack_.template Push()) ValueType(str, length, GetAllocator()); + else + new (stack_.template Push()) ValueType(str, length); + return true; + } + + bool String(const Ch* str, SizeType length, bool copy) { + if (copy) + new (stack_.template Push()) ValueType(str, length, GetAllocator()); + else + new (stack_.template Push()) ValueType(str, length); + return true; + } + + bool StartObject() { new (stack_.template Push()) ValueType(kObjectType); return true; } + + bool Key(const Ch* str, SizeType length, bool copy) { return String(str, length, copy); } + + bool EndObject(SizeType memberCount) { + typename ValueType::Member* members = stack_.template Pop(memberCount); + stack_.template Top()->SetObjectRaw(members, memberCount, GetAllocator()); + return true; + } + + bool StartArray() { new (stack_.template Push()) ValueType(kArrayType); return true; } + + bool EndArray(SizeType elementCount) { + ValueType* elements = stack_.template Pop(elementCount); + stack_.template Top()->SetArrayRaw(elements, elementCount, GetAllocator()); + return true; + } + +private: + //! Prohibit copying + GenericDocument(const GenericDocument&); + //! Prohibit assignment + GenericDocument& operator=(const GenericDocument&); + + void ClearStack() { + if (Allocator::kNeedFree) + while (stack_.GetSize() > 0) // Here assumes all elements in stack array are GenericValue (Member is actually 2 GenericValue objects) + (stack_.template Pop(1))->~ValueType(); + else + stack_.Clear(); + stack_.ShrinkToFit(); + } + + void Destroy() { + RAPIDJSON_DELETE(ownAllocator_); + } + + static const size_t kDefaultStackCapacity = 1024; + Allocator* allocator_; + Allocator* ownAllocator_; + internal::Stack stack_; + ParseResult parseResult_; +}; + +//! GenericDocument with UTF8 encoding +typedef GenericDocument > Document; + +// defined here due to the dependency on GenericDocument +template +template +inline +GenericValue::GenericValue(const GenericValue& rhs, Allocator& allocator) +{ + switch (rhs.GetType()) { + case kObjectType: + case kArrayType: { // perform deep copy via SAX Handler + GenericDocument d(&allocator); + rhs.Accept(d); + RawAssign(*d.stack_.template Pop(1)); + } + break; + case kStringType: + if (rhs.data_.f.flags == kConstStringFlag) { + data_.f.flags = rhs.data_.f.flags; + data_ = *reinterpret_cast(&rhs.data_); + } else { + SetStringRaw(StringRef(rhs.GetString(), rhs.GetStringLength()), allocator); + } + break; + default: + data_.f.flags = rhs.data_.f.flags; + data_ = *reinterpret_cast(&rhs.data_); + break; + } +} + +//! Helper class for accessing Value of array type. +/*! + Instance of this helper class is obtained by \c GenericValue::GetArray(). + In addition to all APIs for array type, it provides range-based for loop if \c RAPIDJSON_HAS_CXX11_RANGE_FOR=1. +*/ +template +class GenericArray { +public: + typedef GenericArray ConstArray; + typedef GenericArray Array; + typedef ValueT PlainType; + typedef typename internal::MaybeAddConst::Type ValueType; + typedef ValueType* ValueIterator; // This may be const or non-const iterator + typedef const ValueT* ConstValueIterator; + typedef typename ValueType::AllocatorType AllocatorType; + typedef typename ValueType::StringRefType StringRefType; + + template + friend class GenericValue; + + GenericArray(const GenericArray& rhs) : value_(rhs.value_) {} + GenericArray& operator=(const GenericArray& rhs) { value_ = rhs.value_; return *this; } + ~GenericArray() {} + + SizeType Size() const { return value_.Size(); } + SizeType Capacity() const { return value_.Capacity(); } + bool Empty() const { return value_.Empty(); } + void Clear() const { value_.Clear(); } + ValueType& operator[](SizeType index) const { return value_[index]; } + ValueIterator Begin() const { return value_.Begin(); } + ValueIterator End() const { return value_.End(); } + GenericArray Reserve(SizeType newCapacity, AllocatorType &allocator) const { value_.Reserve(newCapacity, allocator); return *this; } + GenericArray PushBack(ValueType& value, AllocatorType& allocator) const { value_.PushBack(value, allocator); return *this; } +#if RAPIDJSON_HAS_CXX11_RVALUE_REFS + GenericArray PushBack(ValueType&& value, AllocatorType& allocator) const { value_.PushBack(value, allocator); return *this; } +#endif // RAPIDJSON_HAS_CXX11_RVALUE_REFS + GenericArray PushBack(StringRefType value, AllocatorType& allocator) const { value_.PushBack(value, allocator); return *this; } + template RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr, internal::IsGenericValue >), (const GenericArray&)) PushBack(T value, AllocatorType& allocator) const { value_.PushBack(value, allocator); return *this; } + GenericArray PopBack() const { value_.PopBack(); return *this; } + ValueIterator Erase(ConstValueIterator pos) const { return value_.Erase(pos); } + ValueIterator Erase(ConstValueIterator first, ConstValueIterator last) const { return value_.Erase(first, last); } + +#if RAPIDJSON_HAS_CXX11_RANGE_FOR + ValueIterator begin() const { return value_.Begin(); } + ValueIterator end() const { return value_.End(); } +#endif + +private: + GenericArray(); + GenericArray(ValueType& value) : value_(value) {} + ValueType& value_; +}; + +//! Helper class for accessing Value of object type. +/*! + Instance of this helper class is obtained by \c GenericValue::GetObject(). + In addition to all APIs for array type, it provides range-based for loop if \c RAPIDJSON_HAS_CXX11_RANGE_FOR=1. +*/ +template +class GenericObject { +public: + typedef GenericObject ConstObject; + typedef GenericObject Object; + typedef ValueT PlainType; + typedef typename internal::MaybeAddConst::Type ValueType; + typedef GenericMemberIterator MemberIterator; // This may be const or non-const iterator + typedef GenericMemberIterator ConstMemberIterator; + typedef typename ValueType::AllocatorType AllocatorType; + typedef typename ValueType::StringRefType StringRefType; + typedef typename ValueType::EncodingType EncodingType; + typedef typename ValueType::Ch Ch; + + template + friend class GenericValue; + + GenericObject(const GenericObject& rhs) : value_(rhs.value_) {} + GenericObject& operator=(const GenericObject& rhs) { value_ = rhs.value_; return *this; } + ~GenericObject() {} + + SizeType MemberCount() const { return value_.MemberCount(); } + bool ObjectEmpty() const { return value_.ObjectEmpty(); } + template ValueType& operator[](T* name) const { return value_[name]; } + template ValueType& operator[](const GenericValue& name) const { return value_[name]; } +#if RAPIDJSON_HAS_STDSTRING + ValueType& operator[](const std::basic_string& name) const { return value_[name]; } +#endif + MemberIterator MemberBegin() const { return value_.MemberBegin(); } + MemberIterator MemberEnd() const { return value_.MemberEnd(); } + bool HasMember(const Ch* name) const { return value_.HasMember(name); } +#if RAPIDJSON_HAS_STDSTRING + bool HasMember(const std::basic_string& name) const { return value_.HasMember(name); } +#endif + template bool HasMember(const GenericValue& name) const { return value_.HasMember(name); } + MemberIterator FindMember(const Ch* name) const { return value_.FindMember(name); } + template MemberIterator FindMember(const GenericValue& name) const { return value_.FindMember(name); } +#if RAPIDJSON_HAS_STDSTRING + MemberIterator FindMember(const std::basic_string& name) const { return value_.FindMember(name); } +#endif + GenericObject AddMember(ValueType& name, ValueType& value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; } + GenericObject AddMember(ValueType& name, StringRefType value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; } +#if RAPIDJSON_HAS_STDSTRING + GenericObject AddMember(ValueType& name, std::basic_string& value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; } +#endif + template RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr, internal::IsGenericValue >), (ValueType&)) AddMember(ValueType& name, T value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; } +#if RAPIDJSON_HAS_CXX11_RVALUE_REFS + GenericObject AddMember(ValueType&& name, ValueType&& value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; } + GenericObject AddMember(ValueType&& name, ValueType& value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; } + GenericObject AddMember(ValueType& name, ValueType&& value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; } + GenericObject AddMember(StringRefType name, ValueType&& value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; } +#endif // RAPIDJSON_HAS_CXX11_RVALUE_REFS + GenericObject AddMember(StringRefType name, ValueType& value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; } + GenericObject AddMember(StringRefType name, StringRefType value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; } + template RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr, internal::IsGenericValue >), (GenericObject)) AddMember(StringRefType name, T value, AllocatorType& allocator) const { value_.AddMember(name, value, allocator); return *this; } + void RemoveAllMembers() { return value_.RemoveAllMembers(); } + bool RemoveMember(const Ch* name) const { return value_.RemoveMember(name); } +#if RAPIDJSON_HAS_STDSTRING + bool RemoveMember(const std::basic_string& name) const { return value_.RemoveMember(name); } +#endif + template bool RemoveMember(const GenericValue& name) const { return value_.RemoveMember(name); } + MemberIterator RemoveMember(MemberIterator m) const { return value_.RemoveMember(m); } + MemberIterator EraseMember(ConstMemberIterator pos) const { return value_.EraseMember(pos); } + MemberIterator EraseMember(ConstMemberIterator first, ConstMemberIterator last) const { return value_.EraseMember(first, last); } + bool EraseMember(const Ch* name) const { return value_.EraseMember(name); } +#if RAPIDJSON_HAS_STDSTRING + bool EraseMember(const std::basic_string& name) const { return EraseMember(ValueType(StringRef(name))); } +#endif + template bool EraseMember(const GenericValue& name) const { return value_.EraseMember(name); } + +#if RAPIDJSON_HAS_CXX11_RANGE_FOR + MemberIterator begin() const { return value_.MemberBegin(); } + MemberIterator end() const { return value_.MemberEnd(); } +#endif + +private: + GenericObject(); + GenericObject(ValueType& value) : value_(value) {} + ValueType& value_; +}; + +RAPIDJSON_NAMESPACE_END +RAPIDJSON_DIAG_POP + +#endif // RAPIDJSON_DOCUMENT_H_ diff --git a/contrib/rapidjson/1.1.0/include/rapidjson/encodedstream.h b/contrib/rapidjson/1.1.0/include/rapidjson/encodedstream.h new file mode 100644 index 0000000..1450683 --- /dev/null +++ b/contrib/rapidjson/1.1.0/include/rapidjson/encodedstream.h @@ -0,0 +1,299 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_ENCODEDSTREAM_H_ +#define RAPIDJSON_ENCODEDSTREAM_H_ + +#include "stream.h" +#include "memorystream.h" + +#ifdef __GNUC__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(effc++) +#endif + +#ifdef __clang__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(padded) +#endif + +RAPIDJSON_NAMESPACE_BEGIN + +//! Input byte stream wrapper with a statically bound encoding. +/*! + \tparam Encoding The interpretation of encoding of the stream. Either UTF8, UTF16LE, UTF16BE, UTF32LE, UTF32BE. + \tparam InputByteStream Type of input byte stream. For example, FileReadStream. +*/ +template +class EncodedInputStream { + RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1); +public: + typedef typename Encoding::Ch Ch; + + EncodedInputStream(InputByteStream& is) : is_(is) { + current_ = Encoding::TakeBOM(is_); + } + + Ch Peek() const { return current_; } + Ch Take() { Ch c = current_; current_ = Encoding::Take(is_); return c; } + size_t Tell() const { return is_.Tell(); } + + // Not implemented + void Put(Ch) { RAPIDJSON_ASSERT(false); } + void Flush() { RAPIDJSON_ASSERT(false); } + Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; } + size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; } + +private: + EncodedInputStream(const EncodedInputStream&); + EncodedInputStream& operator=(const EncodedInputStream&); + + InputByteStream& is_; + Ch current_; +}; + +//! Specialized for UTF8 MemoryStream. +template <> +class EncodedInputStream, MemoryStream> { +public: + typedef UTF8<>::Ch Ch; + + EncodedInputStream(MemoryStream& is) : is_(is) { + if (static_cast(is_.Peek()) == 0xEFu) is_.Take(); + if (static_cast(is_.Peek()) == 0xBBu) is_.Take(); + if (static_cast(is_.Peek()) == 0xBFu) is_.Take(); + } + Ch Peek() const { return is_.Peek(); } + Ch Take() { return is_.Take(); } + size_t Tell() const { return is_.Tell(); } + + // Not implemented + void Put(Ch) {} + void Flush() {} + Ch* PutBegin() { return 0; } + size_t PutEnd(Ch*) { return 0; } + + MemoryStream& is_; + +private: + EncodedInputStream(const EncodedInputStream&); + EncodedInputStream& operator=(const EncodedInputStream&); +}; + +//! Output byte stream wrapper with statically bound encoding. +/*! + \tparam Encoding The interpretation of encoding of the stream. Either UTF8, UTF16LE, UTF16BE, UTF32LE, UTF32BE. + \tparam OutputByteStream Type of input byte stream. For example, FileWriteStream. +*/ +template +class EncodedOutputStream { + RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1); +public: + typedef typename Encoding::Ch Ch; + + EncodedOutputStream(OutputByteStream& os, bool putBOM = true) : os_(os) { + if (putBOM) + Encoding::PutBOM(os_); + } + + void Put(Ch c) { Encoding::Put(os_, c); } + void Flush() { os_.Flush(); } + + // Not implemented + Ch Peek() const { RAPIDJSON_ASSERT(false); return 0;} + Ch Take() { RAPIDJSON_ASSERT(false); return 0;} + size_t Tell() const { RAPIDJSON_ASSERT(false); return 0; } + Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; } + size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; } + +private: + EncodedOutputStream(const EncodedOutputStream&); + EncodedOutputStream& operator=(const EncodedOutputStream&); + + OutputByteStream& os_; +}; + +#define RAPIDJSON_ENCODINGS_FUNC(x) UTF8::x, UTF16LE::x, UTF16BE::x, UTF32LE::x, UTF32BE::x + +//! Input stream wrapper with dynamically bound encoding and automatic encoding detection. +/*! + \tparam CharType Type of character for reading. + \tparam InputByteStream type of input byte stream to be wrapped. +*/ +template +class AutoUTFInputStream { + RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1); +public: + typedef CharType Ch; + + //! Constructor. + /*! + \param is input stream to be wrapped. + \param type UTF encoding type if it is not detected from the stream. + */ + AutoUTFInputStream(InputByteStream& is, UTFType type = kUTF8) : is_(&is), type_(type), hasBOM_(false) { + RAPIDJSON_ASSERT(type >= kUTF8 && type <= kUTF32BE); + DetectType(); + static const TakeFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(Take) }; + takeFunc_ = f[type_]; + current_ = takeFunc_(*is_); + } + + UTFType GetType() const { return type_; } + bool HasBOM() const { return hasBOM_; } + + Ch Peek() const { return current_; } + Ch Take() { Ch c = current_; current_ = takeFunc_(*is_); return c; } + size_t Tell() const { return is_->Tell(); } + + // Not implemented + void Put(Ch) { RAPIDJSON_ASSERT(false); } + void Flush() { RAPIDJSON_ASSERT(false); } + Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; } + size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; } + +private: + AutoUTFInputStream(const AutoUTFInputStream&); + AutoUTFInputStream& operator=(const AutoUTFInputStream&); + + // Detect encoding type with BOM or RFC 4627 + void DetectType() { + // BOM (Byte Order Mark): + // 00 00 FE FF UTF-32BE + // FF FE 00 00 UTF-32LE + // FE FF UTF-16BE + // FF FE UTF-16LE + // EF BB BF UTF-8 + + const unsigned char* c = reinterpret_cast(is_->Peek4()); + if (!c) + return; + + unsigned bom = static_cast(c[0] | (c[1] << 8) | (c[2] << 16) | (c[3] << 24)); + hasBOM_ = false; + if (bom == 0xFFFE0000) { type_ = kUTF32BE; hasBOM_ = true; is_->Take(); is_->Take(); is_->Take(); is_->Take(); } + else if (bom == 0x0000FEFF) { type_ = kUTF32LE; hasBOM_ = true; is_->Take(); is_->Take(); is_->Take(); is_->Take(); } + else if ((bom & 0xFFFF) == 0xFFFE) { type_ = kUTF16BE; hasBOM_ = true; is_->Take(); is_->Take(); } + else if ((bom & 0xFFFF) == 0xFEFF) { type_ = kUTF16LE; hasBOM_ = true; is_->Take(); is_->Take(); } + else if ((bom & 0xFFFFFF) == 0xBFBBEF) { type_ = kUTF8; hasBOM_ = true; is_->Take(); is_->Take(); is_->Take(); } + + // RFC 4627: Section 3 + // "Since the first two characters of a JSON text will always be ASCII + // characters [RFC0020], it is possible to determine whether an octet + // stream is UTF-8, UTF-16 (BE or LE), or UTF-32 (BE or LE) by looking + // at the pattern of nulls in the first four octets." + // 00 00 00 xx UTF-32BE + // 00 xx 00 xx UTF-16BE + // xx 00 00 00 UTF-32LE + // xx 00 xx 00 UTF-16LE + // xx xx xx xx UTF-8 + + if (!hasBOM_) { + unsigned pattern = (c[0] ? 1 : 0) | (c[1] ? 2 : 0) | (c[2] ? 4 : 0) | (c[3] ? 8 : 0); + switch (pattern) { + case 0x08: type_ = kUTF32BE; break; + case 0x0A: type_ = kUTF16BE; break; + case 0x01: type_ = kUTF32LE; break; + case 0x05: type_ = kUTF16LE; break; + case 0x0F: type_ = kUTF8; break; + default: break; // Use type defined by user. + } + } + + // Runtime check whether the size of character type is sufficient. It only perform checks with assertion. + if (type_ == kUTF16LE || type_ == kUTF16BE) RAPIDJSON_ASSERT(sizeof(Ch) >= 2); + if (type_ == kUTF32LE || type_ == kUTF32BE) RAPIDJSON_ASSERT(sizeof(Ch) >= 4); + } + + typedef Ch (*TakeFunc)(InputByteStream& is); + InputByteStream* is_; + UTFType type_; + Ch current_; + TakeFunc takeFunc_; + bool hasBOM_; +}; + +//! Output stream wrapper with dynamically bound encoding and automatic encoding detection. +/*! + \tparam CharType Type of character for writing. + \tparam OutputByteStream type of output byte stream to be wrapped. +*/ +template +class AutoUTFOutputStream { + RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1); +public: + typedef CharType Ch; + + //! Constructor. + /*! + \param os output stream to be wrapped. + \param type UTF encoding type. + \param putBOM Whether to write BOM at the beginning of the stream. + */ + AutoUTFOutputStream(OutputByteStream& os, UTFType type, bool putBOM) : os_(&os), type_(type) { + RAPIDJSON_ASSERT(type >= kUTF8 && type <= kUTF32BE); + + // Runtime check whether the size of character type is sufficient. It only perform checks with assertion. + if (type_ == kUTF16LE || type_ == kUTF16BE) RAPIDJSON_ASSERT(sizeof(Ch) >= 2); + if (type_ == kUTF32LE || type_ == kUTF32BE) RAPIDJSON_ASSERT(sizeof(Ch) >= 4); + + static const PutFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(Put) }; + putFunc_ = f[type_]; + + if (putBOM) + PutBOM(); + } + + UTFType GetType() const { return type_; } + + void Put(Ch c) { putFunc_(*os_, c); } + void Flush() { os_->Flush(); } + + // Not implemented + Ch Peek() const { RAPIDJSON_ASSERT(false); return 0;} + Ch Take() { RAPIDJSON_ASSERT(false); return 0;} + size_t Tell() const { RAPIDJSON_ASSERT(false); return 0; } + Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; } + size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; } + +private: + AutoUTFOutputStream(const AutoUTFOutputStream&); + AutoUTFOutputStream& operator=(const AutoUTFOutputStream&); + + void PutBOM() { + typedef void (*PutBOMFunc)(OutputByteStream&); + static const PutBOMFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(PutBOM) }; + f[type_](*os_); + } + + typedef void (*PutFunc)(OutputByteStream&, Ch); + + OutputByteStream* os_; + UTFType type_; + PutFunc putFunc_; +}; + +#undef RAPIDJSON_ENCODINGS_FUNC + +RAPIDJSON_NAMESPACE_END + +#ifdef __clang__ +RAPIDJSON_DIAG_POP +#endif + +#ifdef __GNUC__ +RAPIDJSON_DIAG_POP +#endif + +#endif // RAPIDJSON_FILESTREAM_H_ diff --git a/contrib/rapidjson/1.1.0/include/rapidjson/encodings.h b/contrib/rapidjson/1.1.0/include/rapidjson/encodings.h new file mode 100644 index 0000000..baa7c2b --- /dev/null +++ b/contrib/rapidjson/1.1.0/include/rapidjson/encodings.h @@ -0,0 +1,716 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_ENCODINGS_H_ +#define RAPIDJSON_ENCODINGS_H_ + +#include "rapidjson.h" + +#ifdef _MSC_VER +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(4244) // conversion from 'type1' to 'type2', possible loss of data +RAPIDJSON_DIAG_OFF(4702) // unreachable code +#elif defined(__GNUC__) +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(effc++) +RAPIDJSON_DIAG_OFF(overflow) +#endif + +RAPIDJSON_NAMESPACE_BEGIN + +/////////////////////////////////////////////////////////////////////////////// +// Encoding + +/*! \class rapidjson::Encoding + \brief Concept for encoding of Unicode characters. + +\code +concept Encoding { + typename Ch; //! Type of character. A "character" is actually a code unit in unicode's definition. + + enum { supportUnicode = 1 }; // or 0 if not supporting unicode + + //! \brief Encode a Unicode codepoint to an output stream. + //! \param os Output stream. + //! \param codepoint An unicode codepoint, ranging from 0x0 to 0x10FFFF inclusively. + template + static void Encode(OutputStream& os, unsigned codepoint); + + //! \brief Decode a Unicode codepoint from an input stream. + //! \param is Input stream. + //! \param codepoint Output of the unicode codepoint. + //! \return true if a valid codepoint can be decoded from the stream. + template + static bool Decode(InputStream& is, unsigned* codepoint); + + //! \brief Validate one Unicode codepoint from an encoded stream. + //! \param is Input stream to obtain codepoint. + //! \param os Output for copying one codepoint. + //! \return true if it is valid. + //! \note This function just validating and copying the codepoint without actually decode it. + template + static bool Validate(InputStream& is, OutputStream& os); + + // The following functions are deal with byte streams. + + //! Take a character from input byte stream, skip BOM if exist. + template + static CharType TakeBOM(InputByteStream& is); + + //! Take a character from input byte stream. + template + static Ch Take(InputByteStream& is); + + //! Put BOM to output byte stream. + template + static void PutBOM(OutputByteStream& os); + + //! Put a character to output byte stream. + template + static void Put(OutputByteStream& os, Ch c); +}; +\endcode +*/ + +/////////////////////////////////////////////////////////////////////////////// +// UTF8 + +//! UTF-8 encoding. +/*! http://en.wikipedia.org/wiki/UTF-8 + http://tools.ietf.org/html/rfc3629 + \tparam CharType Code unit for storing 8-bit UTF-8 data. Default is char. + \note implements Encoding concept +*/ +template +struct UTF8 { + typedef CharType Ch; + + enum { supportUnicode = 1 }; + + template + static void Encode(OutputStream& os, unsigned codepoint) { + if (codepoint <= 0x7F) + os.Put(static_cast(codepoint & 0xFF)); + else if (codepoint <= 0x7FF) { + os.Put(static_cast(0xC0 | ((codepoint >> 6) & 0xFF))); + os.Put(static_cast(0x80 | ((codepoint & 0x3F)))); + } + else if (codepoint <= 0xFFFF) { + os.Put(static_cast(0xE0 | ((codepoint >> 12) & 0xFF))); + os.Put(static_cast(0x80 | ((codepoint >> 6) & 0x3F))); + os.Put(static_cast(0x80 | (codepoint & 0x3F))); + } + else { + RAPIDJSON_ASSERT(codepoint <= 0x10FFFF); + os.Put(static_cast(0xF0 | ((codepoint >> 18) & 0xFF))); + os.Put(static_cast(0x80 | ((codepoint >> 12) & 0x3F))); + os.Put(static_cast(0x80 | ((codepoint >> 6) & 0x3F))); + os.Put(static_cast(0x80 | (codepoint & 0x3F))); + } + } + + template + static void EncodeUnsafe(OutputStream& os, unsigned codepoint) { + if (codepoint <= 0x7F) + PutUnsafe(os, static_cast(codepoint & 0xFF)); + else if (codepoint <= 0x7FF) { + PutUnsafe(os, static_cast(0xC0 | ((codepoint >> 6) & 0xFF))); + PutUnsafe(os, static_cast(0x80 | ((codepoint & 0x3F)))); + } + else if (codepoint <= 0xFFFF) { + PutUnsafe(os, static_cast(0xE0 | ((codepoint >> 12) & 0xFF))); + PutUnsafe(os, static_cast(0x80 | ((codepoint >> 6) & 0x3F))); + PutUnsafe(os, static_cast(0x80 | (codepoint & 0x3F))); + } + else { + RAPIDJSON_ASSERT(codepoint <= 0x10FFFF); + PutUnsafe(os, static_cast(0xF0 | ((codepoint >> 18) & 0xFF))); + PutUnsafe(os, static_cast(0x80 | ((codepoint >> 12) & 0x3F))); + PutUnsafe(os, static_cast(0x80 | ((codepoint >> 6) & 0x3F))); + PutUnsafe(os, static_cast(0x80 | (codepoint & 0x3F))); + } + } + + template + static bool Decode(InputStream& is, unsigned* codepoint) { +#define COPY() c = is.Take(); *codepoint = (*codepoint << 6) | (static_cast(c) & 0x3Fu) +#define TRANS(mask) result &= ((GetRange(static_cast(c)) & mask) != 0) +#define TAIL() COPY(); TRANS(0x70) + typename InputStream::Ch c = is.Take(); + if (!(c & 0x80)) { + *codepoint = static_cast(c); + return true; + } + + unsigned char type = GetRange(static_cast(c)); + if (type >= 32) { + *codepoint = 0; + } else { + *codepoint = (0xFF >> type) & static_cast(c); + } + bool result = true; + switch (type) { + case 2: TAIL(); return result; + case 3: TAIL(); TAIL(); return result; + case 4: COPY(); TRANS(0x50); TAIL(); return result; + case 5: COPY(); TRANS(0x10); TAIL(); TAIL(); return result; + case 6: TAIL(); TAIL(); TAIL(); return result; + case 10: COPY(); TRANS(0x20); TAIL(); return result; + case 11: COPY(); TRANS(0x60); TAIL(); TAIL(); return result; + default: return false; + } +#undef COPY +#undef TRANS +#undef TAIL + } + + template + static bool Validate(InputStream& is, OutputStream& os) { +#define COPY() os.Put(c = is.Take()) +#define TRANS(mask) result &= ((GetRange(static_cast(c)) & mask) != 0) +#define TAIL() COPY(); TRANS(0x70) + Ch c; + COPY(); + if (!(c & 0x80)) + return true; + + bool result = true; + switch (GetRange(static_cast(c))) { + case 2: TAIL(); return result; + case 3: TAIL(); TAIL(); return result; + case 4: COPY(); TRANS(0x50); TAIL(); return result; + case 5: COPY(); TRANS(0x10); TAIL(); TAIL(); return result; + case 6: TAIL(); TAIL(); TAIL(); return result; + case 10: COPY(); TRANS(0x20); TAIL(); return result; + case 11: COPY(); TRANS(0x60); TAIL(); TAIL(); return result; + default: return false; + } +#undef COPY +#undef TRANS +#undef TAIL + } + + static unsigned char GetRange(unsigned char c) { + // Referring to DFA of http://bjoern.hoehrmann.de/utf-8/decoder/dfa/ + // With new mapping 1 -> 0x10, 7 -> 0x20, 9 -> 0x40, such that AND operation can test multiple types. + static const unsigned char type[] = { + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10,0x10, + 0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40,0x40, + 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20, + 0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20,0x20, + 8,8,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, + 10,3,3,3,3,3,3,3,3,3,3,3,3,4,3,3, 11,6,6,6,5,8,8,8,8,8,8,8,8,8,8,8, + }; + return type[c]; + } + + template + static CharType TakeBOM(InputByteStream& is) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1); + typename InputByteStream::Ch c = Take(is); + if (static_cast(c) != 0xEFu) return c; + c = is.Take(); + if (static_cast(c) != 0xBBu) return c; + c = is.Take(); + if (static_cast(c) != 0xBFu) return c; + c = is.Take(); + return c; + } + + template + static Ch Take(InputByteStream& is) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1); + return static_cast(is.Take()); + } + + template + static void PutBOM(OutputByteStream& os) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1); + os.Put(static_cast(0xEFu)); + os.Put(static_cast(0xBBu)); + os.Put(static_cast(0xBFu)); + } + + template + static void Put(OutputByteStream& os, Ch c) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1); + os.Put(static_cast(c)); + } +}; + +/////////////////////////////////////////////////////////////////////////////// +// UTF16 + +//! UTF-16 encoding. +/*! http://en.wikipedia.org/wiki/UTF-16 + http://tools.ietf.org/html/rfc2781 + \tparam CharType Type for storing 16-bit UTF-16 data. Default is wchar_t. C++11 may use char16_t instead. + \note implements Encoding concept + + \note For in-memory access, no need to concern endianness. The code units and code points are represented by CPU's endianness. + For streaming, use UTF16LE and UTF16BE, which handle endianness. +*/ +template +struct UTF16 { + typedef CharType Ch; + RAPIDJSON_STATIC_ASSERT(sizeof(Ch) >= 2); + + enum { supportUnicode = 1 }; + + template + static void Encode(OutputStream& os, unsigned codepoint) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputStream::Ch) >= 2); + if (codepoint <= 0xFFFF) { + RAPIDJSON_ASSERT(codepoint < 0xD800 || codepoint > 0xDFFF); // Code point itself cannot be surrogate pair + os.Put(static_cast(codepoint)); + } + else { + RAPIDJSON_ASSERT(codepoint <= 0x10FFFF); + unsigned v = codepoint - 0x10000; + os.Put(static_cast((v >> 10) | 0xD800)); + os.Put((v & 0x3FF) | 0xDC00); + } + } + + + template + static void EncodeUnsafe(OutputStream& os, unsigned codepoint) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputStream::Ch) >= 2); + if (codepoint <= 0xFFFF) { + RAPIDJSON_ASSERT(codepoint < 0xD800 || codepoint > 0xDFFF); // Code point itself cannot be surrogate pair + PutUnsafe(os, static_cast(codepoint)); + } + else { + RAPIDJSON_ASSERT(codepoint <= 0x10FFFF); + unsigned v = codepoint - 0x10000; + PutUnsafe(os, static_cast((v >> 10) | 0xD800)); + PutUnsafe(os, (v & 0x3FF) | 0xDC00); + } + } + + template + static bool Decode(InputStream& is, unsigned* codepoint) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename InputStream::Ch) >= 2); + typename InputStream::Ch c = is.Take(); + if (c < 0xD800 || c > 0xDFFF) { + *codepoint = static_cast(c); + return true; + } + else if (c <= 0xDBFF) { + *codepoint = (static_cast(c) & 0x3FF) << 10; + c = is.Take(); + *codepoint |= (static_cast(c) & 0x3FF); + *codepoint += 0x10000; + return c >= 0xDC00 && c <= 0xDFFF; + } + return false; + } + + template + static bool Validate(InputStream& is, OutputStream& os) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename InputStream::Ch) >= 2); + RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputStream::Ch) >= 2); + typename InputStream::Ch c; + os.Put(static_cast(c = is.Take())); + if (c < 0xD800 || c > 0xDFFF) + return true; + else if (c <= 0xDBFF) { + os.Put(c = is.Take()); + return c >= 0xDC00 && c <= 0xDFFF; + } + return false; + } +}; + +//! UTF-16 little endian encoding. +template +struct UTF16LE : UTF16 { + template + static CharType TakeBOM(InputByteStream& is) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1); + CharType c = Take(is); + return static_cast(c) == 0xFEFFu ? Take(is) : c; + } + + template + static CharType Take(InputByteStream& is) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1); + unsigned c = static_cast(is.Take()); + c |= static_cast(static_cast(is.Take())) << 8; + return static_cast(c); + } + + template + static void PutBOM(OutputByteStream& os) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1); + os.Put(static_cast(0xFFu)); + os.Put(static_cast(0xFEu)); + } + + template + static void Put(OutputByteStream& os, CharType c) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1); + os.Put(static_cast(static_cast(c) & 0xFFu)); + os.Put(static_cast((static_cast(c) >> 8) & 0xFFu)); + } +}; + +//! UTF-16 big endian encoding. +template +struct UTF16BE : UTF16 { + template + static CharType TakeBOM(InputByteStream& is) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1); + CharType c = Take(is); + return static_cast(c) == 0xFEFFu ? Take(is) : c; + } + + template + static CharType Take(InputByteStream& is) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1); + unsigned c = static_cast(static_cast(is.Take())) << 8; + c |= static_cast(is.Take()); + return static_cast(c); + } + + template + static void PutBOM(OutputByteStream& os) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1); + os.Put(static_cast(0xFEu)); + os.Put(static_cast(0xFFu)); + } + + template + static void Put(OutputByteStream& os, CharType c) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1); + os.Put(static_cast((static_cast(c) >> 8) & 0xFFu)); + os.Put(static_cast(static_cast(c) & 0xFFu)); + } +}; + +/////////////////////////////////////////////////////////////////////////////// +// UTF32 + +//! UTF-32 encoding. +/*! http://en.wikipedia.org/wiki/UTF-32 + \tparam CharType Type for storing 32-bit UTF-32 data. Default is unsigned. C++11 may use char32_t instead. + \note implements Encoding concept + + \note For in-memory access, no need to concern endianness. The code units and code points are represented by CPU's endianness. + For streaming, use UTF32LE and UTF32BE, which handle endianness. +*/ +template +struct UTF32 { + typedef CharType Ch; + RAPIDJSON_STATIC_ASSERT(sizeof(Ch) >= 4); + + enum { supportUnicode = 1 }; + + template + static void Encode(OutputStream& os, unsigned codepoint) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputStream::Ch) >= 4); + RAPIDJSON_ASSERT(codepoint <= 0x10FFFF); + os.Put(codepoint); + } + + template + static void EncodeUnsafe(OutputStream& os, unsigned codepoint) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputStream::Ch) >= 4); + RAPIDJSON_ASSERT(codepoint <= 0x10FFFF); + PutUnsafe(os, codepoint); + } + + template + static bool Decode(InputStream& is, unsigned* codepoint) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename InputStream::Ch) >= 4); + Ch c = is.Take(); + *codepoint = c; + return c <= 0x10FFFF; + } + + template + static bool Validate(InputStream& is, OutputStream& os) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename InputStream::Ch) >= 4); + Ch c; + os.Put(c = is.Take()); + return c <= 0x10FFFF; + } +}; + +//! UTF-32 little endian enocoding. +template +struct UTF32LE : UTF32 { + template + static CharType TakeBOM(InputByteStream& is) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1); + CharType c = Take(is); + return static_cast(c) == 0x0000FEFFu ? Take(is) : c; + } + + template + static CharType Take(InputByteStream& is) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1); + unsigned c = static_cast(is.Take()); + c |= static_cast(static_cast(is.Take())) << 8; + c |= static_cast(static_cast(is.Take())) << 16; + c |= static_cast(static_cast(is.Take())) << 24; + return static_cast(c); + } + + template + static void PutBOM(OutputByteStream& os) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1); + os.Put(static_cast(0xFFu)); + os.Put(static_cast(0xFEu)); + os.Put(static_cast(0x00u)); + os.Put(static_cast(0x00u)); + } + + template + static void Put(OutputByteStream& os, CharType c) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1); + os.Put(static_cast(c & 0xFFu)); + os.Put(static_cast((c >> 8) & 0xFFu)); + os.Put(static_cast((c >> 16) & 0xFFu)); + os.Put(static_cast((c >> 24) & 0xFFu)); + } +}; + +//! UTF-32 big endian encoding. +template +struct UTF32BE : UTF32 { + template + static CharType TakeBOM(InputByteStream& is) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1); + CharType c = Take(is); + return static_cast(c) == 0x0000FEFFu ? Take(is) : c; + } + + template + static CharType Take(InputByteStream& is) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1); + unsigned c = static_cast(static_cast(is.Take())) << 24; + c |= static_cast(static_cast(is.Take())) << 16; + c |= static_cast(static_cast(is.Take())) << 8; + c |= static_cast(static_cast(is.Take())); + return static_cast(c); + } + + template + static void PutBOM(OutputByteStream& os) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1); + os.Put(static_cast(0x00u)); + os.Put(static_cast(0x00u)); + os.Put(static_cast(0xFEu)); + os.Put(static_cast(0xFFu)); + } + + template + static void Put(OutputByteStream& os, CharType c) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1); + os.Put(static_cast((c >> 24) & 0xFFu)); + os.Put(static_cast((c >> 16) & 0xFFu)); + os.Put(static_cast((c >> 8) & 0xFFu)); + os.Put(static_cast(c & 0xFFu)); + } +}; + +/////////////////////////////////////////////////////////////////////////////// +// ASCII + +//! ASCII encoding. +/*! http://en.wikipedia.org/wiki/ASCII + \tparam CharType Code unit for storing 7-bit ASCII data. Default is char. + \note implements Encoding concept +*/ +template +struct ASCII { + typedef CharType Ch; + + enum { supportUnicode = 0 }; + + template + static void Encode(OutputStream& os, unsigned codepoint) { + RAPIDJSON_ASSERT(codepoint <= 0x7F); + os.Put(static_cast(codepoint & 0xFF)); + } + + template + static void EncodeUnsafe(OutputStream& os, unsigned codepoint) { + RAPIDJSON_ASSERT(codepoint <= 0x7F); + PutUnsafe(os, static_cast(codepoint & 0xFF)); + } + + template + static bool Decode(InputStream& is, unsigned* codepoint) { + uint8_t c = static_cast(is.Take()); + *codepoint = c; + return c <= 0X7F; + } + + template + static bool Validate(InputStream& is, OutputStream& os) { + uint8_t c = static_cast(is.Take()); + os.Put(static_cast(c)); + return c <= 0x7F; + } + + template + static CharType TakeBOM(InputByteStream& is) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1); + uint8_t c = static_cast(Take(is)); + return static_cast(c); + } + + template + static Ch Take(InputByteStream& is) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename InputByteStream::Ch) == 1); + return static_cast(is.Take()); + } + + template + static void PutBOM(OutputByteStream& os) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1); + (void)os; + } + + template + static void Put(OutputByteStream& os, Ch c) { + RAPIDJSON_STATIC_ASSERT(sizeof(typename OutputByteStream::Ch) == 1); + os.Put(static_cast(c)); + } +}; + +/////////////////////////////////////////////////////////////////////////////// +// AutoUTF + +//! Runtime-specified UTF encoding type of a stream. +enum UTFType { + kUTF8 = 0, //!< UTF-8. + kUTF16LE = 1, //!< UTF-16 little endian. + kUTF16BE = 2, //!< UTF-16 big endian. + kUTF32LE = 3, //!< UTF-32 little endian. + kUTF32BE = 4 //!< UTF-32 big endian. +}; + +//! Dynamically select encoding according to stream's runtime-specified UTF encoding type. +/*! \note This class can be used with AutoUTFInputtStream and AutoUTFOutputStream, which provides GetType(). +*/ +template +struct AutoUTF { + typedef CharType Ch; + + enum { supportUnicode = 1 }; + +#define RAPIDJSON_ENCODINGS_FUNC(x) UTF8::x, UTF16LE::x, UTF16BE::x, UTF32LE::x, UTF32BE::x + + template + RAPIDJSON_FORCEINLINE static void Encode(OutputStream& os, unsigned codepoint) { + typedef void (*EncodeFunc)(OutputStream&, unsigned); + static const EncodeFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(Encode) }; + (*f[os.GetType()])(os, codepoint); + } + + template + RAPIDJSON_FORCEINLINE static void EncodeUnsafe(OutputStream& os, unsigned codepoint) { + typedef void (*EncodeFunc)(OutputStream&, unsigned); + static const EncodeFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(EncodeUnsafe) }; + (*f[os.GetType()])(os, codepoint); + } + + template + RAPIDJSON_FORCEINLINE static bool Decode(InputStream& is, unsigned* codepoint) { + typedef bool (*DecodeFunc)(InputStream&, unsigned*); + static const DecodeFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(Decode) }; + return (*f[is.GetType()])(is, codepoint); + } + + template + RAPIDJSON_FORCEINLINE static bool Validate(InputStream& is, OutputStream& os) { + typedef bool (*ValidateFunc)(InputStream&, OutputStream&); + static const ValidateFunc f[] = { RAPIDJSON_ENCODINGS_FUNC(Validate) }; + return (*f[is.GetType()])(is, os); + } + +#undef RAPIDJSON_ENCODINGS_FUNC +}; + +/////////////////////////////////////////////////////////////////////////////// +// Transcoder + +//! Encoding conversion. +template +struct Transcoder { + //! Take one Unicode codepoint from source encoding, convert it to target encoding and put it to the output stream. + template + RAPIDJSON_FORCEINLINE static bool Transcode(InputStream& is, OutputStream& os) { + unsigned codepoint; + if (!SourceEncoding::Decode(is, &codepoint)) + return false; + TargetEncoding::Encode(os, codepoint); + return true; + } + + template + RAPIDJSON_FORCEINLINE static bool TranscodeUnsafe(InputStream& is, OutputStream& os) { + unsigned codepoint; + if (!SourceEncoding::Decode(is, &codepoint)) + return false; + TargetEncoding::EncodeUnsafe(os, codepoint); + return true; + } + + //! Validate one Unicode codepoint from an encoded stream. + template + RAPIDJSON_FORCEINLINE static bool Validate(InputStream& is, OutputStream& os) { + return Transcode(is, os); // Since source/target encoding is different, must transcode. + } +}; + +// Forward declaration. +template +inline void PutUnsafe(Stream& stream, typename Stream::Ch c); + +//! Specialization of Transcoder with same source and target encoding. +template +struct Transcoder { + template + RAPIDJSON_FORCEINLINE static bool Transcode(InputStream& is, OutputStream& os) { + os.Put(is.Take()); // Just copy one code unit. This semantic is different from primary template class. + return true; + } + + template + RAPIDJSON_FORCEINLINE static bool TranscodeUnsafe(InputStream& is, OutputStream& os) { + PutUnsafe(os, is.Take()); // Just copy one code unit. This semantic is different from primary template class. + return true; + } + + template + RAPIDJSON_FORCEINLINE static bool Validate(InputStream& is, OutputStream& os) { + return Encoding::Validate(is, os); // source/target encoding are the same + } +}; + +RAPIDJSON_NAMESPACE_END + +#if defined(__GNUC__) || defined(_MSC_VER) +RAPIDJSON_DIAG_POP +#endif + +#endif // RAPIDJSON_ENCODINGS_H_ diff --git a/contrib/rapidjson/1.1.0/include/rapidjson/error/en.h b/contrib/rapidjson/1.1.0/include/rapidjson/error/en.h new file mode 100644 index 0000000..2db838b --- /dev/null +++ b/contrib/rapidjson/1.1.0/include/rapidjson/error/en.h @@ -0,0 +1,74 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_ERROR_EN_H_ +#define RAPIDJSON_ERROR_EN_H_ + +#include "error.h" + +#ifdef __clang__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(switch-enum) +RAPIDJSON_DIAG_OFF(covered-switch-default) +#endif + +RAPIDJSON_NAMESPACE_BEGIN + +//! Maps error code of parsing into error message. +/*! + \ingroup RAPIDJSON_ERRORS + \param parseErrorCode Error code obtained in parsing. + \return the error message. + \note User can make a copy of this function for localization. + Using switch-case is safer for future modification of error codes. +*/ +inline const RAPIDJSON_ERROR_CHARTYPE* GetParseError_En(ParseErrorCode parseErrorCode) { + switch (parseErrorCode) { + case kParseErrorNone: return RAPIDJSON_ERROR_STRING("No error."); + + case kParseErrorDocumentEmpty: return RAPIDJSON_ERROR_STRING("The document is empty."); + case kParseErrorDocumentRootNotSingular: return RAPIDJSON_ERROR_STRING("The document root must not be followed by other values."); + + case kParseErrorValueInvalid: return RAPIDJSON_ERROR_STRING("Invalid value."); + + case kParseErrorObjectMissName: return RAPIDJSON_ERROR_STRING("Missing a name for object member."); + case kParseErrorObjectMissColon: return RAPIDJSON_ERROR_STRING("Missing a colon after a name of object member."); + case kParseErrorObjectMissCommaOrCurlyBracket: return RAPIDJSON_ERROR_STRING("Missing a comma or '}' after an object member."); + + case kParseErrorArrayMissCommaOrSquareBracket: return RAPIDJSON_ERROR_STRING("Missing a comma or ']' after an array element."); + + case kParseErrorStringUnicodeEscapeInvalidHex: return RAPIDJSON_ERROR_STRING("Incorrect hex digit after \\u escape in string."); + case kParseErrorStringUnicodeSurrogateInvalid: return RAPIDJSON_ERROR_STRING("The surrogate pair in string is invalid."); + case kParseErrorStringEscapeInvalid: return RAPIDJSON_ERROR_STRING("Invalid escape character in string."); + case kParseErrorStringMissQuotationMark: return RAPIDJSON_ERROR_STRING("Missing a closing quotation mark in string."); + case kParseErrorStringInvalidEncoding: return RAPIDJSON_ERROR_STRING("Invalid encoding in string."); + + case kParseErrorNumberTooBig: return RAPIDJSON_ERROR_STRING("Number too big to be stored in double."); + case kParseErrorNumberMissFraction: return RAPIDJSON_ERROR_STRING("Miss fraction part in number."); + case kParseErrorNumberMissExponent: return RAPIDJSON_ERROR_STRING("Miss exponent in number."); + + case kParseErrorTermination: return RAPIDJSON_ERROR_STRING("Terminate parsing due to Handler error."); + case kParseErrorUnspecificSyntaxError: return RAPIDJSON_ERROR_STRING("Unspecific syntax error."); + + default: return RAPIDJSON_ERROR_STRING("Unknown error."); + } +} + +RAPIDJSON_NAMESPACE_END + +#ifdef __clang__ +RAPIDJSON_DIAG_POP +#endif + +#endif // RAPIDJSON_ERROR_EN_H_ diff --git a/contrib/rapidjson/1.1.0/include/rapidjson/error/error.h b/contrib/rapidjson/1.1.0/include/rapidjson/error/error.h new file mode 100644 index 0000000..95cb31a --- /dev/null +++ b/contrib/rapidjson/1.1.0/include/rapidjson/error/error.h @@ -0,0 +1,155 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_ERROR_ERROR_H_ +#define RAPIDJSON_ERROR_ERROR_H_ + +#include "../rapidjson.h" + +#ifdef __clang__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(padded) +#endif + +/*! \file error.h */ + +/*! \defgroup RAPIDJSON_ERRORS RapidJSON error handling */ + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_ERROR_CHARTYPE + +//! Character type of error messages. +/*! \ingroup RAPIDJSON_ERRORS + The default character type is \c char. + On Windows, user can define this macro as \c TCHAR for supporting both + unicode/non-unicode settings. +*/ +#ifndef RAPIDJSON_ERROR_CHARTYPE +#define RAPIDJSON_ERROR_CHARTYPE char +#endif + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_ERROR_STRING + +//! Macro for converting string literial to \ref RAPIDJSON_ERROR_CHARTYPE[]. +/*! \ingroup RAPIDJSON_ERRORS + By default this conversion macro does nothing. + On Windows, user can define this macro as \c _T(x) for supporting both + unicode/non-unicode settings. +*/ +#ifndef RAPIDJSON_ERROR_STRING +#define RAPIDJSON_ERROR_STRING(x) x +#endif + +RAPIDJSON_NAMESPACE_BEGIN + +/////////////////////////////////////////////////////////////////////////////// +// ParseErrorCode + +//! Error code of parsing. +/*! \ingroup RAPIDJSON_ERRORS + \see GenericReader::Parse, GenericReader::GetParseErrorCode +*/ +enum ParseErrorCode { + kParseErrorNone = 0, //!< No error. + + kParseErrorDocumentEmpty, //!< The document is empty. + kParseErrorDocumentRootNotSingular, //!< The document root must not follow by other values. + + kParseErrorValueInvalid, //!< Invalid value. + + kParseErrorObjectMissName, //!< Missing a name for object member. + kParseErrorObjectMissColon, //!< Missing a colon after a name of object member. + kParseErrorObjectMissCommaOrCurlyBracket, //!< Missing a comma or '}' after an object member. + + kParseErrorArrayMissCommaOrSquareBracket, //!< Missing a comma or ']' after an array element. + + kParseErrorStringUnicodeEscapeInvalidHex, //!< Incorrect hex digit after \\u escape in string. + kParseErrorStringUnicodeSurrogateInvalid, //!< The surrogate pair in string is invalid. + kParseErrorStringEscapeInvalid, //!< Invalid escape character in string. + kParseErrorStringMissQuotationMark, //!< Missing a closing quotation mark in string. + kParseErrorStringInvalidEncoding, //!< Invalid encoding in string. + + kParseErrorNumberTooBig, //!< Number too big to be stored in double. + kParseErrorNumberMissFraction, //!< Miss fraction part in number. + kParseErrorNumberMissExponent, //!< Miss exponent in number. + + kParseErrorTermination, //!< Parsing was terminated. + kParseErrorUnspecificSyntaxError //!< Unspecific syntax error. +}; + +//! Result of parsing (wraps ParseErrorCode) +/*! + \ingroup RAPIDJSON_ERRORS + \code + Document doc; + ParseResult ok = doc.Parse("[42]"); + if (!ok) { + fprintf(stderr, "JSON parse error: %s (%u)", + GetParseError_En(ok.Code()), ok.Offset()); + exit(EXIT_FAILURE); + } + \endcode + \see GenericReader::Parse, GenericDocument::Parse +*/ +struct ParseResult { +public: + //! Default constructor, no error. + ParseResult() : code_(kParseErrorNone), offset_(0) {} + //! Constructor to set an error. + ParseResult(ParseErrorCode code, size_t offset) : code_(code), offset_(offset) {} + + //! Get the error code. + ParseErrorCode Code() const { return code_; } + //! Get the error offset, if \ref IsError(), 0 otherwise. + size_t Offset() const { return offset_; } + + //! Conversion to \c bool, returns \c true, iff !\ref IsError(). + operator bool() const { return !IsError(); } + //! Whether the result is an error. + bool IsError() const { return code_ != kParseErrorNone; } + + bool operator==(const ParseResult& that) const { return code_ == that.code_; } + bool operator==(ParseErrorCode code) const { return code_ == code; } + friend bool operator==(ParseErrorCode code, const ParseResult & err) { return code == err.code_; } + + //! Reset error code. + void Clear() { Set(kParseErrorNone); } + //! Update error code and offset. + void Set(ParseErrorCode code, size_t offset = 0) { code_ = code; offset_ = offset; } + +private: + ParseErrorCode code_; + size_t offset_; +}; + +//! Function pointer type of GetParseError(). +/*! \ingroup RAPIDJSON_ERRORS + + This is the prototype for \c GetParseError_X(), where \c X is a locale. + User can dynamically change locale in runtime, e.g.: +\code + GetParseErrorFunc GetParseError = GetParseError_En; // or whatever + const RAPIDJSON_ERROR_CHARTYPE* s = GetParseError(document.GetParseErrorCode()); +\endcode +*/ +typedef const RAPIDJSON_ERROR_CHARTYPE* (*GetParseErrorFunc)(ParseErrorCode); + +RAPIDJSON_NAMESPACE_END + +#ifdef __clang__ +RAPIDJSON_DIAG_POP +#endif + +#endif // RAPIDJSON_ERROR_ERROR_H_ diff --git a/contrib/rapidjson/1.1.0/include/rapidjson/filereadstream.h b/contrib/rapidjson/1.1.0/include/rapidjson/filereadstream.h new file mode 100644 index 0000000..b56ea13 --- /dev/null +++ b/contrib/rapidjson/1.1.0/include/rapidjson/filereadstream.h @@ -0,0 +1,99 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_FILEREADSTREAM_H_ +#define RAPIDJSON_FILEREADSTREAM_H_ + +#include "stream.h" +#include + +#ifdef __clang__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(padded) +RAPIDJSON_DIAG_OFF(unreachable-code) +RAPIDJSON_DIAG_OFF(missing-noreturn) +#endif + +RAPIDJSON_NAMESPACE_BEGIN + +//! File byte stream for input using fread(). +/*! + \note implements Stream concept +*/ +class FileReadStream { +public: + typedef char Ch; //!< Character type (byte). + + //! Constructor. + /*! + \param fp File pointer opened for read. + \param buffer user-supplied buffer. + \param bufferSize size of buffer in bytes. Must >=4 bytes. + */ + FileReadStream(std::FILE* fp, char* buffer, size_t bufferSize) : fp_(fp), buffer_(buffer), bufferSize_(bufferSize), bufferLast_(0), current_(buffer_), readCount_(0), count_(0), eof_(false) { + RAPIDJSON_ASSERT(fp_ != 0); + RAPIDJSON_ASSERT(bufferSize >= 4); + Read(); + } + + Ch Peek() const { return *current_; } + Ch Take() { Ch c = *current_; Read(); return c; } + size_t Tell() const { return count_ + static_cast(current_ - buffer_); } + + // Not implemented + void Put(Ch) { RAPIDJSON_ASSERT(false); } + void Flush() { RAPIDJSON_ASSERT(false); } + Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; } + size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; } + + // For encoding detection only. + const Ch* Peek4() const { + return (current_ + 4 <= bufferLast_) ? current_ : 0; + } + +private: + void Read() { + if (current_ < bufferLast_) + ++current_; + else if (!eof_) { + count_ += readCount_; + readCount_ = fread(buffer_, 1, bufferSize_, fp_); + bufferLast_ = buffer_ + readCount_ - 1; + current_ = buffer_; + + if (readCount_ < bufferSize_) { + buffer_[readCount_] = '\0'; + ++bufferLast_; + eof_ = true; + } + } + } + + std::FILE* fp_; + Ch *buffer_; + size_t bufferSize_; + Ch *bufferLast_; + Ch *current_; + size_t readCount_; + size_t count_; //!< Number of characters read + bool eof_; +}; + +RAPIDJSON_NAMESPACE_END + +#ifdef __clang__ +RAPIDJSON_DIAG_POP +#endif + +#endif // RAPIDJSON_FILESTREAM_H_ diff --git a/contrib/rapidjson/1.1.0/include/rapidjson/filewritestream.h b/contrib/rapidjson/1.1.0/include/rapidjson/filewritestream.h new file mode 100644 index 0000000..6378dd6 --- /dev/null +++ b/contrib/rapidjson/1.1.0/include/rapidjson/filewritestream.h @@ -0,0 +1,104 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_FILEWRITESTREAM_H_ +#define RAPIDJSON_FILEWRITESTREAM_H_ + +#include "stream.h" +#include + +#ifdef __clang__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(unreachable-code) +#endif + +RAPIDJSON_NAMESPACE_BEGIN + +//! Wrapper of C file stream for input using fread(). +/*! + \note implements Stream concept +*/ +class FileWriteStream { +public: + typedef char Ch; //!< Character type. Only support char. + + FileWriteStream(std::FILE* fp, char* buffer, size_t bufferSize) : fp_(fp), buffer_(buffer), bufferEnd_(buffer + bufferSize), current_(buffer_) { + RAPIDJSON_ASSERT(fp_ != 0); + } + + void Put(char c) { + if (current_ >= bufferEnd_) + Flush(); + + *current_++ = c; + } + + void PutN(char c, size_t n) { + size_t avail = static_cast(bufferEnd_ - current_); + while (n > avail) { + std::memset(current_, c, avail); + current_ += avail; + Flush(); + n -= avail; + avail = static_cast(bufferEnd_ - current_); + } + + if (n > 0) { + std::memset(current_, c, n); + current_ += n; + } + } + + void Flush() { + if (current_ != buffer_) { + size_t result = fwrite(buffer_, 1, static_cast(current_ - buffer_), fp_); + if (result < static_cast(current_ - buffer_)) { + // failure deliberately ignored at this time + // added to avoid warn_unused_result build errors + } + current_ = buffer_; + } + } + + // Not implemented + char Peek() const { RAPIDJSON_ASSERT(false); return 0; } + char Take() { RAPIDJSON_ASSERT(false); return 0; } + size_t Tell() const { RAPIDJSON_ASSERT(false); return 0; } + char* PutBegin() { RAPIDJSON_ASSERT(false); return 0; } + size_t PutEnd(char*) { RAPIDJSON_ASSERT(false); return 0; } + +private: + // Prohibit copy constructor & assignment operator. + FileWriteStream(const FileWriteStream&); + FileWriteStream& operator=(const FileWriteStream&); + + std::FILE* fp_; + char *buffer_; + char *bufferEnd_; + char *current_; +}; + +//! Implement specialized version of PutN() with memset() for better performance. +template<> +inline void PutN(FileWriteStream& stream, char c, size_t n) { + stream.PutN(c, n); +} + +RAPIDJSON_NAMESPACE_END + +#ifdef __clang__ +RAPIDJSON_DIAG_POP +#endif + +#endif // RAPIDJSON_FILESTREAM_H_ diff --git a/contrib/rapidjson/1.1.0/include/rapidjson/fwd.h b/contrib/rapidjson/1.1.0/include/rapidjson/fwd.h new file mode 100644 index 0000000..e8104e8 --- /dev/null +++ b/contrib/rapidjson/1.1.0/include/rapidjson/fwd.h @@ -0,0 +1,151 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_FWD_H_ +#define RAPIDJSON_FWD_H_ + +#include "rapidjson.h" + +RAPIDJSON_NAMESPACE_BEGIN + +// encodings.h + +template struct UTF8; +template struct UTF16; +template struct UTF16BE; +template struct UTF16LE; +template struct UTF32; +template struct UTF32BE; +template struct UTF32LE; +template struct ASCII; +template struct AutoUTF; + +template +struct Transcoder; + +// allocators.h + +class CrtAllocator; + +template +class MemoryPoolAllocator; + +// stream.h + +template +struct GenericStringStream; + +typedef GenericStringStream > StringStream; + +template +struct GenericInsituStringStream; + +typedef GenericInsituStringStream > InsituStringStream; + +// stringbuffer.h + +template +class GenericStringBuffer; + +typedef GenericStringBuffer, CrtAllocator> StringBuffer; + +// filereadstream.h + +class FileReadStream; + +// filewritestream.h + +class FileWriteStream; + +// memorybuffer.h + +template +struct GenericMemoryBuffer; + +typedef GenericMemoryBuffer MemoryBuffer; + +// memorystream.h + +struct MemoryStream; + +// reader.h + +template +struct BaseReaderHandler; + +template +class GenericReader; + +typedef GenericReader, UTF8, CrtAllocator> Reader; + +// writer.h + +template +class Writer; + +// prettywriter.h + +template +class PrettyWriter; + +// document.h + +template +struct GenericMember; + +template +class GenericMemberIterator; + +template +struct GenericStringRef; + +template +class GenericValue; + +typedef GenericValue, MemoryPoolAllocator > Value; + +template +class GenericDocument; + +typedef GenericDocument, MemoryPoolAllocator, CrtAllocator> Document; + +// pointer.h + +template +class GenericPointer; + +typedef GenericPointer Pointer; + +// schema.h + +template +class IGenericRemoteSchemaDocumentProvider; + +template +class GenericSchemaDocument; + +typedef GenericSchemaDocument SchemaDocument; +typedef IGenericRemoteSchemaDocumentProvider IRemoteSchemaDocumentProvider; + +template < + typename SchemaDocumentType, + typename OutputHandler, + typename StateAllocator> +class GenericSchemaValidator; + +typedef GenericSchemaValidator, void>, CrtAllocator> SchemaValidator; + +RAPIDJSON_NAMESPACE_END + +#endif // RAPIDJSON_RAPIDJSONFWD_H_ diff --git a/contrib/rapidjson/1.1.0/include/rapidjson/internal/biginteger.h b/contrib/rapidjson/1.1.0/include/rapidjson/internal/biginteger.h new file mode 100644 index 0000000..9d3e88c --- /dev/null +++ b/contrib/rapidjson/1.1.0/include/rapidjson/internal/biginteger.h @@ -0,0 +1,290 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_BIGINTEGER_H_ +#define RAPIDJSON_BIGINTEGER_H_ + +#include "../rapidjson.h" + +#if defined(_MSC_VER) && defined(_M_AMD64) +#include // for _umul128 +#pragma intrinsic(_umul128) +#endif + +RAPIDJSON_NAMESPACE_BEGIN +namespace internal { + +class BigInteger { +public: + typedef uint64_t Type; + + BigInteger(const BigInteger& rhs) : count_(rhs.count_) { + std::memcpy(digits_, rhs.digits_, count_ * sizeof(Type)); + } + + explicit BigInteger(uint64_t u) : count_(1) { + digits_[0] = u; + } + + BigInteger(const char* decimals, size_t length) : count_(1) { + RAPIDJSON_ASSERT(length > 0); + digits_[0] = 0; + size_t i = 0; + const size_t kMaxDigitPerIteration = 19; // 2^64 = 18446744073709551616 > 10^19 + while (length >= kMaxDigitPerIteration) { + AppendDecimal64(decimals + i, decimals + i + kMaxDigitPerIteration); + length -= kMaxDigitPerIteration; + i += kMaxDigitPerIteration; + } + + if (length > 0) + AppendDecimal64(decimals + i, decimals + i + length); + } + + BigInteger& operator=(const BigInteger &rhs) + { + if (this != &rhs) { + count_ = rhs.count_; + std::memcpy(digits_, rhs.digits_, count_ * sizeof(Type)); + } + return *this; + } + + BigInteger& operator=(uint64_t u) { + digits_[0] = u; + count_ = 1; + return *this; + } + + BigInteger& operator+=(uint64_t u) { + Type backup = digits_[0]; + digits_[0] += u; + for (size_t i = 0; i < count_ - 1; i++) { + if (digits_[i] >= backup) + return *this; // no carry + backup = digits_[i + 1]; + digits_[i + 1] += 1; + } + + // Last carry + if (digits_[count_ - 1] < backup) + PushBack(1); + + return *this; + } + + BigInteger& operator*=(uint64_t u) { + if (u == 0) return *this = 0; + if (u == 1) return *this; + if (*this == 1) return *this = u; + + uint64_t k = 0; + for (size_t i = 0; i < count_; i++) { + uint64_t hi; + digits_[i] = MulAdd64(digits_[i], u, k, &hi); + k = hi; + } + + if (k > 0) + PushBack(k); + + return *this; + } + + BigInteger& operator*=(uint32_t u) { + if (u == 0) return *this = 0; + if (u == 1) return *this; + if (*this == 1) return *this = u; + + uint64_t k = 0; + for (size_t i = 0; i < count_; i++) { + const uint64_t c = digits_[i] >> 32; + const uint64_t d = digits_[i] & 0xFFFFFFFF; + const uint64_t uc = u * c; + const uint64_t ud = u * d; + const uint64_t p0 = ud + k; + const uint64_t p1 = uc + (p0 >> 32); + digits_[i] = (p0 & 0xFFFFFFFF) | (p1 << 32); + k = p1 >> 32; + } + + if (k > 0) + PushBack(k); + + return *this; + } + + BigInteger& operator<<=(size_t shift) { + if (IsZero() || shift == 0) return *this; + + size_t offset = shift / kTypeBit; + size_t interShift = shift % kTypeBit; + RAPIDJSON_ASSERT(count_ + offset <= kCapacity); + + if (interShift == 0) { + std::memmove(&digits_[count_ - 1 + offset], &digits_[count_ - 1], count_ * sizeof(Type)); + count_ += offset; + } + else { + digits_[count_] = 0; + for (size_t i = count_; i > 0; i--) + digits_[i + offset] = (digits_[i] << interShift) | (digits_[i - 1] >> (kTypeBit - interShift)); + digits_[offset] = digits_[0] << interShift; + count_ += offset; + if (digits_[count_]) + count_++; + } + + std::memset(digits_, 0, offset * sizeof(Type)); + + return *this; + } + + bool operator==(const BigInteger& rhs) const { + return count_ == rhs.count_ && std::memcmp(digits_, rhs.digits_, count_ * sizeof(Type)) == 0; + } + + bool operator==(const Type rhs) const { + return count_ == 1 && digits_[0] == rhs; + } + + BigInteger& MultiplyPow5(unsigned exp) { + static const uint32_t kPow5[12] = { + 5, + 5 * 5, + 5 * 5 * 5, + 5 * 5 * 5 * 5, + 5 * 5 * 5 * 5 * 5, + 5 * 5 * 5 * 5 * 5 * 5, + 5 * 5 * 5 * 5 * 5 * 5 * 5, + 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5, + 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5, + 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5, + 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5, + 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 * 5 + }; + if (exp == 0) return *this; + for (; exp >= 27; exp -= 27) *this *= RAPIDJSON_UINT64_C2(0X6765C793, 0XFA10079D); // 5^27 + for (; exp >= 13; exp -= 13) *this *= static_cast(1220703125u); // 5^13 + if (exp > 0) *this *= kPow5[exp - 1]; + return *this; + } + + // Compute absolute difference of this and rhs. + // Assume this != rhs + bool Difference(const BigInteger& rhs, BigInteger* out) const { + int cmp = Compare(rhs); + RAPIDJSON_ASSERT(cmp != 0); + const BigInteger *a, *b; // Makes a > b + bool ret; + if (cmp < 0) { a = &rhs; b = this; ret = true; } + else { a = this; b = &rhs; ret = false; } + + Type borrow = 0; + for (size_t i = 0; i < a->count_; i++) { + Type d = a->digits_[i] - borrow; + if (i < b->count_) + d -= b->digits_[i]; + borrow = (d > a->digits_[i]) ? 1 : 0; + out->digits_[i] = d; + if (d != 0) + out->count_ = i + 1; + } + + return ret; + } + + int Compare(const BigInteger& rhs) const { + if (count_ != rhs.count_) + return count_ < rhs.count_ ? -1 : 1; + + for (size_t i = count_; i-- > 0;) + if (digits_[i] != rhs.digits_[i]) + return digits_[i] < rhs.digits_[i] ? -1 : 1; + + return 0; + } + + size_t GetCount() const { return count_; } + Type GetDigit(size_t index) const { RAPIDJSON_ASSERT(index < count_); return digits_[index]; } + bool IsZero() const { return count_ == 1 && digits_[0] == 0; } + +private: + void AppendDecimal64(const char* begin, const char* end) { + uint64_t u = ParseUint64(begin, end); + if (IsZero()) + *this = u; + else { + unsigned exp = static_cast(end - begin); + (MultiplyPow5(exp) <<= exp) += u; // *this = *this * 10^exp + u + } + } + + void PushBack(Type digit) { + RAPIDJSON_ASSERT(count_ < kCapacity); + digits_[count_++] = digit; + } + + static uint64_t ParseUint64(const char* begin, const char* end) { + uint64_t r = 0; + for (const char* p = begin; p != end; ++p) { + RAPIDJSON_ASSERT(*p >= '0' && *p <= '9'); + r = r * 10u + static_cast(*p - '0'); + } + return r; + } + + // Assume a * b + k < 2^128 + static uint64_t MulAdd64(uint64_t a, uint64_t b, uint64_t k, uint64_t* outHigh) { +#if defined(_MSC_VER) && defined(_M_AMD64) + uint64_t low = _umul128(a, b, outHigh) + k; + if (low < k) + (*outHigh)++; + return low; +#elif (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) && defined(__x86_64__) + __extension__ typedef unsigned __int128 uint128; + uint128 p = static_cast(a) * static_cast(b); + p += k; + *outHigh = static_cast(p >> 64); + return static_cast(p); +#else + const uint64_t a0 = a & 0xFFFFFFFF, a1 = a >> 32, b0 = b & 0xFFFFFFFF, b1 = b >> 32; + uint64_t x0 = a0 * b0, x1 = a0 * b1, x2 = a1 * b0, x3 = a1 * b1; + x1 += (x0 >> 32); // can't give carry + x1 += x2; + if (x1 < x2) + x3 += (static_cast(1) << 32); + uint64_t lo = (x1 << 32) + (x0 & 0xFFFFFFFF); + uint64_t hi = x3 + (x1 >> 32); + + lo += k; + if (lo < k) + hi++; + *outHigh = hi; + return lo; +#endif + } + + static const size_t kBitCount = 3328; // 64bit * 54 > 10^1000 + static const size_t kCapacity = kBitCount / sizeof(Type); + static const size_t kTypeBit = sizeof(Type) * 8; + + Type digits_[kCapacity]; + size_t count_; +}; + +} // namespace internal +RAPIDJSON_NAMESPACE_END + +#endif // RAPIDJSON_BIGINTEGER_H_ diff --git a/contrib/rapidjson/1.1.0/include/rapidjson/internal/diyfp.h b/contrib/rapidjson/1.1.0/include/rapidjson/internal/diyfp.h new file mode 100644 index 0000000..c9fefdc --- /dev/null +++ b/contrib/rapidjson/1.1.0/include/rapidjson/internal/diyfp.h @@ -0,0 +1,258 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +// This is a C++ header-only implementation of Grisu2 algorithm from the publication: +// Loitsch, Florian. "Printing floating-point numbers quickly and accurately with +// integers." ACM Sigplan Notices 45.6 (2010): 233-243. + +#ifndef RAPIDJSON_DIYFP_H_ +#define RAPIDJSON_DIYFP_H_ + +#include "../rapidjson.h" + +#if defined(_MSC_VER) && defined(_M_AMD64) +#include +#pragma intrinsic(_BitScanReverse64) +#pragma intrinsic(_umul128) +#endif + +RAPIDJSON_NAMESPACE_BEGIN +namespace internal { + +#ifdef __GNUC__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(effc++) +#endif + +#ifdef __clang__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(padded) +#endif + +struct DiyFp { + DiyFp() : f(), e() {} + + DiyFp(uint64_t fp, int exp) : f(fp), e(exp) {} + + explicit DiyFp(double d) { + union { + double d; + uint64_t u64; + } u = { d }; + + int biased_e = static_cast((u.u64 & kDpExponentMask) >> kDpSignificandSize); + uint64_t significand = (u.u64 & kDpSignificandMask); + if (biased_e != 0) { + f = significand + kDpHiddenBit; + e = biased_e - kDpExponentBias; + } + else { + f = significand; + e = kDpMinExponent + 1; + } + } + + DiyFp operator-(const DiyFp& rhs) const { + return DiyFp(f - rhs.f, e); + } + + DiyFp operator*(const DiyFp& rhs) const { +#if defined(_MSC_VER) && defined(_M_AMD64) + uint64_t h; + uint64_t l = _umul128(f, rhs.f, &h); + if (l & (uint64_t(1) << 63)) // rounding + h++; + return DiyFp(h, e + rhs.e + 64); +#elif (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) && defined(__x86_64__) + __extension__ typedef unsigned __int128 uint128; + uint128 p = static_cast(f) * static_cast(rhs.f); + uint64_t h = static_cast(p >> 64); + uint64_t l = static_cast(p); + if (l & (uint64_t(1) << 63)) // rounding + h++; + return DiyFp(h, e + rhs.e + 64); +#else + const uint64_t M32 = 0xFFFFFFFF; + const uint64_t a = f >> 32; + const uint64_t b = f & M32; + const uint64_t c = rhs.f >> 32; + const uint64_t d = rhs.f & M32; + const uint64_t ac = a * c; + const uint64_t bc = b * c; + const uint64_t ad = a * d; + const uint64_t bd = b * d; + uint64_t tmp = (bd >> 32) + (ad & M32) + (bc & M32); + tmp += 1U << 31; /// mult_round + return DiyFp(ac + (ad >> 32) + (bc >> 32) + (tmp >> 32), e + rhs.e + 64); +#endif + } + + DiyFp Normalize() const { +#if defined(_MSC_VER) && defined(_M_AMD64) + unsigned long index; + _BitScanReverse64(&index, f); + return DiyFp(f << (63 - index), e - (63 - index)); +#elif defined(__GNUC__) && __GNUC__ >= 4 + int s = __builtin_clzll(f); + return DiyFp(f << s, e - s); +#else + DiyFp res = *this; + while (!(res.f & (static_cast(1) << 63))) { + res.f <<= 1; + res.e--; + } + return res; +#endif + } + + DiyFp NormalizeBoundary() const { + DiyFp res = *this; + while (!(res.f & (kDpHiddenBit << 1))) { + res.f <<= 1; + res.e--; + } + res.f <<= (kDiySignificandSize - kDpSignificandSize - 2); + res.e = res.e - (kDiySignificandSize - kDpSignificandSize - 2); + return res; + } + + void NormalizedBoundaries(DiyFp* minus, DiyFp* plus) const { + DiyFp pl = DiyFp((f << 1) + 1, e - 1).NormalizeBoundary(); + DiyFp mi = (f == kDpHiddenBit) ? DiyFp((f << 2) - 1, e - 2) : DiyFp((f << 1) - 1, e - 1); + mi.f <<= mi.e - pl.e; + mi.e = pl.e; + *plus = pl; + *minus = mi; + } + + double ToDouble() const { + union { + double d; + uint64_t u64; + }u; + const uint64_t be = (e == kDpDenormalExponent && (f & kDpHiddenBit) == 0) ? 0 : + static_cast(e + kDpExponentBias); + u.u64 = (f & kDpSignificandMask) | (be << kDpSignificandSize); + return u.d; + } + + static const int kDiySignificandSize = 64; + static const int kDpSignificandSize = 52; + static const int kDpExponentBias = 0x3FF + kDpSignificandSize; + static const int kDpMaxExponent = 0x7FF - kDpExponentBias; + static const int kDpMinExponent = -kDpExponentBias; + static const int kDpDenormalExponent = -kDpExponentBias + 1; + static const uint64_t kDpExponentMask = RAPIDJSON_UINT64_C2(0x7FF00000, 0x00000000); + static const uint64_t kDpSignificandMask = RAPIDJSON_UINT64_C2(0x000FFFFF, 0xFFFFFFFF); + static const uint64_t kDpHiddenBit = RAPIDJSON_UINT64_C2(0x00100000, 0x00000000); + + uint64_t f; + int e; +}; + +inline DiyFp GetCachedPowerByIndex(size_t index) { + // 10^-348, 10^-340, ..., 10^340 + static const uint64_t kCachedPowers_F[] = { + RAPIDJSON_UINT64_C2(0xfa8fd5a0, 0x081c0288), RAPIDJSON_UINT64_C2(0xbaaee17f, 0xa23ebf76), + RAPIDJSON_UINT64_C2(0x8b16fb20, 0x3055ac76), RAPIDJSON_UINT64_C2(0xcf42894a, 0x5dce35ea), + RAPIDJSON_UINT64_C2(0x9a6bb0aa, 0x55653b2d), RAPIDJSON_UINT64_C2(0xe61acf03, 0x3d1a45df), + RAPIDJSON_UINT64_C2(0xab70fe17, 0xc79ac6ca), RAPIDJSON_UINT64_C2(0xff77b1fc, 0xbebcdc4f), + RAPIDJSON_UINT64_C2(0xbe5691ef, 0x416bd60c), RAPIDJSON_UINT64_C2(0x8dd01fad, 0x907ffc3c), + RAPIDJSON_UINT64_C2(0xd3515c28, 0x31559a83), RAPIDJSON_UINT64_C2(0x9d71ac8f, 0xada6c9b5), + RAPIDJSON_UINT64_C2(0xea9c2277, 0x23ee8bcb), RAPIDJSON_UINT64_C2(0xaecc4991, 0x4078536d), + RAPIDJSON_UINT64_C2(0x823c1279, 0x5db6ce57), RAPIDJSON_UINT64_C2(0xc2109436, 0x4dfb5637), + RAPIDJSON_UINT64_C2(0x9096ea6f, 0x3848984f), RAPIDJSON_UINT64_C2(0xd77485cb, 0x25823ac7), + RAPIDJSON_UINT64_C2(0xa086cfcd, 0x97bf97f4), RAPIDJSON_UINT64_C2(0xef340a98, 0x172aace5), + RAPIDJSON_UINT64_C2(0xb23867fb, 0x2a35b28e), RAPIDJSON_UINT64_C2(0x84c8d4df, 0xd2c63f3b), + RAPIDJSON_UINT64_C2(0xc5dd4427, 0x1ad3cdba), RAPIDJSON_UINT64_C2(0x936b9fce, 0xbb25c996), + RAPIDJSON_UINT64_C2(0xdbac6c24, 0x7d62a584), RAPIDJSON_UINT64_C2(0xa3ab6658, 0x0d5fdaf6), + RAPIDJSON_UINT64_C2(0xf3e2f893, 0xdec3f126), RAPIDJSON_UINT64_C2(0xb5b5ada8, 0xaaff80b8), + RAPIDJSON_UINT64_C2(0x87625f05, 0x6c7c4a8b), RAPIDJSON_UINT64_C2(0xc9bcff60, 0x34c13053), + RAPIDJSON_UINT64_C2(0x964e858c, 0x91ba2655), RAPIDJSON_UINT64_C2(0xdff97724, 0x70297ebd), + RAPIDJSON_UINT64_C2(0xa6dfbd9f, 0xb8e5b88f), RAPIDJSON_UINT64_C2(0xf8a95fcf, 0x88747d94), + RAPIDJSON_UINT64_C2(0xb9447093, 0x8fa89bcf), RAPIDJSON_UINT64_C2(0x8a08f0f8, 0xbf0f156b), + RAPIDJSON_UINT64_C2(0xcdb02555, 0x653131b6), RAPIDJSON_UINT64_C2(0x993fe2c6, 0xd07b7fac), + RAPIDJSON_UINT64_C2(0xe45c10c4, 0x2a2b3b06), RAPIDJSON_UINT64_C2(0xaa242499, 0x697392d3), + RAPIDJSON_UINT64_C2(0xfd87b5f2, 0x8300ca0e), RAPIDJSON_UINT64_C2(0xbce50864, 0x92111aeb), + RAPIDJSON_UINT64_C2(0x8cbccc09, 0x6f5088cc), RAPIDJSON_UINT64_C2(0xd1b71758, 0xe219652c), + RAPIDJSON_UINT64_C2(0x9c400000, 0x00000000), RAPIDJSON_UINT64_C2(0xe8d4a510, 0x00000000), + RAPIDJSON_UINT64_C2(0xad78ebc5, 0xac620000), RAPIDJSON_UINT64_C2(0x813f3978, 0xf8940984), + RAPIDJSON_UINT64_C2(0xc097ce7b, 0xc90715b3), RAPIDJSON_UINT64_C2(0x8f7e32ce, 0x7bea5c70), + RAPIDJSON_UINT64_C2(0xd5d238a4, 0xabe98068), RAPIDJSON_UINT64_C2(0x9f4f2726, 0x179a2245), + RAPIDJSON_UINT64_C2(0xed63a231, 0xd4c4fb27), RAPIDJSON_UINT64_C2(0xb0de6538, 0x8cc8ada8), + RAPIDJSON_UINT64_C2(0x83c7088e, 0x1aab65db), RAPIDJSON_UINT64_C2(0xc45d1df9, 0x42711d9a), + RAPIDJSON_UINT64_C2(0x924d692c, 0xa61be758), RAPIDJSON_UINT64_C2(0xda01ee64, 0x1a708dea), + RAPIDJSON_UINT64_C2(0xa26da399, 0x9aef774a), RAPIDJSON_UINT64_C2(0xf209787b, 0xb47d6b85), + RAPIDJSON_UINT64_C2(0xb454e4a1, 0x79dd1877), RAPIDJSON_UINT64_C2(0x865b8692, 0x5b9bc5c2), + RAPIDJSON_UINT64_C2(0xc83553c5, 0xc8965d3d), RAPIDJSON_UINT64_C2(0x952ab45c, 0xfa97a0b3), + RAPIDJSON_UINT64_C2(0xde469fbd, 0x99a05fe3), RAPIDJSON_UINT64_C2(0xa59bc234, 0xdb398c25), + RAPIDJSON_UINT64_C2(0xf6c69a72, 0xa3989f5c), RAPIDJSON_UINT64_C2(0xb7dcbf53, 0x54e9bece), + RAPIDJSON_UINT64_C2(0x88fcf317, 0xf22241e2), RAPIDJSON_UINT64_C2(0xcc20ce9b, 0xd35c78a5), + RAPIDJSON_UINT64_C2(0x98165af3, 0x7b2153df), RAPIDJSON_UINT64_C2(0xe2a0b5dc, 0x971f303a), + RAPIDJSON_UINT64_C2(0xa8d9d153, 0x5ce3b396), RAPIDJSON_UINT64_C2(0xfb9b7cd9, 0xa4a7443c), + RAPIDJSON_UINT64_C2(0xbb764c4c, 0xa7a44410), RAPIDJSON_UINT64_C2(0x8bab8eef, 0xb6409c1a), + RAPIDJSON_UINT64_C2(0xd01fef10, 0xa657842c), RAPIDJSON_UINT64_C2(0x9b10a4e5, 0xe9913129), + RAPIDJSON_UINT64_C2(0xe7109bfb, 0xa19c0c9d), RAPIDJSON_UINT64_C2(0xac2820d9, 0x623bf429), + RAPIDJSON_UINT64_C2(0x80444b5e, 0x7aa7cf85), RAPIDJSON_UINT64_C2(0xbf21e440, 0x03acdd2d), + RAPIDJSON_UINT64_C2(0x8e679c2f, 0x5e44ff8f), RAPIDJSON_UINT64_C2(0xd433179d, 0x9c8cb841), + RAPIDJSON_UINT64_C2(0x9e19db92, 0xb4e31ba9), RAPIDJSON_UINT64_C2(0xeb96bf6e, 0xbadf77d9), + RAPIDJSON_UINT64_C2(0xaf87023b, 0x9bf0ee6b) + }; + static const int16_t kCachedPowers_E[] = { + -1220, -1193, -1166, -1140, -1113, -1087, -1060, -1034, -1007, -980, + -954, -927, -901, -874, -847, -821, -794, -768, -741, -715, + -688, -661, -635, -608, -582, -555, -529, -502, -475, -449, + -422, -396, -369, -343, -316, -289, -263, -236, -210, -183, + -157, -130, -103, -77, -50, -24, 3, 30, 56, 83, + 109, 136, 162, 189, 216, 242, 269, 295, 322, 348, + 375, 402, 428, 455, 481, 508, 534, 561, 588, 614, + 641, 667, 694, 720, 747, 774, 800, 827, 853, 880, + 907, 933, 960, 986, 1013, 1039, 1066 + }; + return DiyFp(kCachedPowers_F[index], kCachedPowers_E[index]); +} + +inline DiyFp GetCachedPower(int e, int* K) { + + //int k = static_cast(ceil((-61 - e) * 0.30102999566398114)) + 374; + double dk = (-61 - e) * 0.30102999566398114 + 347; // dk must be positive, so can do ceiling in positive + int k = static_cast(dk); + if (dk - k > 0.0) + k++; + + unsigned index = static_cast((k >> 3) + 1); + *K = -(-348 + static_cast(index << 3)); // decimal exponent no need lookup table + + return GetCachedPowerByIndex(index); +} + +inline DiyFp GetCachedPower10(int exp, int *outExp) { + unsigned index = (static_cast(exp) + 348u) / 8u; + *outExp = -348 + static_cast(index) * 8; + return GetCachedPowerByIndex(index); + } + +#ifdef __GNUC__ +RAPIDJSON_DIAG_POP +#endif + +#ifdef __clang__ +RAPIDJSON_DIAG_POP +RAPIDJSON_DIAG_OFF(padded) +#endif + +} // namespace internal +RAPIDJSON_NAMESPACE_END + +#endif // RAPIDJSON_DIYFP_H_ diff --git a/contrib/rapidjson/1.1.0/include/rapidjson/internal/dtoa.h b/contrib/rapidjson/1.1.0/include/rapidjson/internal/dtoa.h new file mode 100644 index 0000000..8d6350e --- /dev/null +++ b/contrib/rapidjson/1.1.0/include/rapidjson/internal/dtoa.h @@ -0,0 +1,245 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +// This is a C++ header-only implementation of Grisu2 algorithm from the publication: +// Loitsch, Florian. "Printing floating-point numbers quickly and accurately with +// integers." ACM Sigplan Notices 45.6 (2010): 233-243. + +#ifndef RAPIDJSON_DTOA_ +#define RAPIDJSON_DTOA_ + +#include "itoa.h" // GetDigitsLut() +#include "diyfp.h" +#include "ieee754.h" + +RAPIDJSON_NAMESPACE_BEGIN +namespace internal { + +#ifdef __GNUC__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(effc++) +RAPIDJSON_DIAG_OFF(array-bounds) // some gcc versions generate wrong warnings https://gcc.gnu.org/bugzilla/show_bug.cgi?id=59124 +#endif + +inline void GrisuRound(char* buffer, int len, uint64_t delta, uint64_t rest, uint64_t ten_kappa, uint64_t wp_w) { + while (rest < wp_w && delta - rest >= ten_kappa && + (rest + ten_kappa < wp_w || /// closer + wp_w - rest > rest + ten_kappa - wp_w)) { + buffer[len - 1]--; + rest += ten_kappa; + } +} + +inline unsigned CountDecimalDigit32(uint32_t n) { + // Simple pure C++ implementation was faster than __builtin_clz version in this situation. + if (n < 10) return 1; + if (n < 100) return 2; + if (n < 1000) return 3; + if (n < 10000) return 4; + if (n < 100000) return 5; + if (n < 1000000) return 6; + if (n < 10000000) return 7; + if (n < 100000000) return 8; + // Will not reach 10 digits in DigitGen() + //if (n < 1000000000) return 9; + //return 10; + return 9; +} + +inline void DigitGen(const DiyFp& W, const DiyFp& Mp, uint64_t delta, char* buffer, int* len, int* K) { + static const uint32_t kPow10[] = { 1, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000 }; + const DiyFp one(uint64_t(1) << -Mp.e, Mp.e); + const DiyFp wp_w = Mp - W; + uint32_t p1 = static_cast(Mp.f >> -one.e); + uint64_t p2 = Mp.f & (one.f - 1); + unsigned kappa = CountDecimalDigit32(p1); // kappa in [0, 9] + *len = 0; + + while (kappa > 0) { + uint32_t d = 0; + switch (kappa) { + case 9: d = p1 / 100000000; p1 %= 100000000; break; + case 8: d = p1 / 10000000; p1 %= 10000000; break; + case 7: d = p1 / 1000000; p1 %= 1000000; break; + case 6: d = p1 / 100000; p1 %= 100000; break; + case 5: d = p1 / 10000; p1 %= 10000; break; + case 4: d = p1 / 1000; p1 %= 1000; break; + case 3: d = p1 / 100; p1 %= 100; break; + case 2: d = p1 / 10; p1 %= 10; break; + case 1: d = p1; p1 = 0; break; + default:; + } + if (d || *len) + buffer[(*len)++] = static_cast('0' + static_cast(d)); + kappa--; + uint64_t tmp = (static_cast(p1) << -one.e) + p2; + if (tmp <= delta) { + *K += kappa; + GrisuRound(buffer, *len, delta, tmp, static_cast(kPow10[kappa]) << -one.e, wp_w.f); + return; + } + } + + // kappa = 0 + for (;;) { + p2 *= 10; + delta *= 10; + char d = static_cast(p2 >> -one.e); + if (d || *len) + buffer[(*len)++] = static_cast('0' + d); + p2 &= one.f - 1; + kappa--; + if (p2 < delta) { + *K += kappa; + int index = -static_cast(kappa); + GrisuRound(buffer, *len, delta, p2, one.f, wp_w.f * (index < 9 ? kPow10[-static_cast(kappa)] : 0)); + return; + } + } +} + +inline void Grisu2(double value, char* buffer, int* length, int* K) { + const DiyFp v(value); + DiyFp w_m, w_p; + v.NormalizedBoundaries(&w_m, &w_p); + + const DiyFp c_mk = GetCachedPower(w_p.e, K); + const DiyFp W = v.Normalize() * c_mk; + DiyFp Wp = w_p * c_mk; + DiyFp Wm = w_m * c_mk; + Wm.f++; + Wp.f--; + DigitGen(W, Wp, Wp.f - Wm.f, buffer, length, K); +} + +inline char* WriteExponent(int K, char* buffer) { + if (K < 0) { + *buffer++ = '-'; + K = -K; + } + + if (K >= 100) { + *buffer++ = static_cast('0' + static_cast(K / 100)); + K %= 100; + const char* d = GetDigitsLut() + K * 2; + *buffer++ = d[0]; + *buffer++ = d[1]; + } + else if (K >= 10) { + const char* d = GetDigitsLut() + K * 2; + *buffer++ = d[0]; + *buffer++ = d[1]; + } + else + *buffer++ = static_cast('0' + static_cast(K)); + + return buffer; +} + +inline char* Prettify(char* buffer, int length, int k, int maxDecimalPlaces) { + const int kk = length + k; // 10^(kk-1) <= v < 10^kk + + if (0 <= k && kk <= 21) { + // 1234e7 -> 12340000000 + for (int i = length; i < kk; i++) + buffer[i] = '0'; + buffer[kk] = '.'; + buffer[kk + 1] = '0'; + return &buffer[kk + 2]; + } + else if (0 < kk && kk <= 21) { + // 1234e-2 -> 12.34 + std::memmove(&buffer[kk + 1], &buffer[kk], static_cast(length - kk)); + buffer[kk] = '.'; + if (0 > k + maxDecimalPlaces) { + // When maxDecimalPlaces = 2, 1.2345 -> 1.23, 1.102 -> 1.1 + // Remove extra trailing zeros (at least one) after truncation. + for (int i = kk + maxDecimalPlaces; i > kk + 1; i--) + if (buffer[i] != '0') + return &buffer[i + 1]; + return &buffer[kk + 2]; // Reserve one zero + } + else + return &buffer[length + 1]; + } + else if (-6 < kk && kk <= 0) { + // 1234e-6 -> 0.001234 + const int offset = 2 - kk; + std::memmove(&buffer[offset], &buffer[0], static_cast(length)); + buffer[0] = '0'; + buffer[1] = '.'; + for (int i = 2; i < offset; i++) + buffer[i] = '0'; + if (length - kk > maxDecimalPlaces) { + // When maxDecimalPlaces = 2, 0.123 -> 0.12, 0.102 -> 0.1 + // Remove extra trailing zeros (at least one) after truncation. + for (int i = maxDecimalPlaces + 1; i > 2; i--) + if (buffer[i] != '0') + return &buffer[i + 1]; + return &buffer[3]; // Reserve one zero + } + else + return &buffer[length + offset]; + } + else if (kk < -maxDecimalPlaces) { + // Truncate to zero + buffer[0] = '0'; + buffer[1] = '.'; + buffer[2] = '0'; + return &buffer[3]; + } + else if (length == 1) { + // 1e30 + buffer[1] = 'e'; + return WriteExponent(kk - 1, &buffer[2]); + } + else { + // 1234e30 -> 1.234e33 + std::memmove(&buffer[2], &buffer[1], static_cast(length - 1)); + buffer[1] = '.'; + buffer[length + 1] = 'e'; + return WriteExponent(kk - 1, &buffer[0 + length + 2]); + } +} + +inline char* dtoa(double value, char* buffer, int maxDecimalPlaces = 324) { + RAPIDJSON_ASSERT(maxDecimalPlaces >= 1); + Double d(value); + if (d.IsZero()) { + if (d.Sign()) + *buffer++ = '-'; // -0.0, Issue #289 + buffer[0] = '0'; + buffer[1] = '.'; + buffer[2] = '0'; + return &buffer[3]; + } + else { + if (value < 0) { + *buffer++ = '-'; + value = -value; + } + int length, K; + Grisu2(value, buffer, &length, &K); + return Prettify(buffer, length, K, maxDecimalPlaces); + } +} + +#ifdef __GNUC__ +RAPIDJSON_DIAG_POP +#endif + +} // namespace internal +RAPIDJSON_NAMESPACE_END + +#endif // RAPIDJSON_DTOA_ diff --git a/contrib/rapidjson/1.1.0/include/rapidjson/internal/ieee754.h b/contrib/rapidjson/1.1.0/include/rapidjson/internal/ieee754.h new file mode 100644 index 0000000..82bb0b9 --- /dev/null +++ b/contrib/rapidjson/1.1.0/include/rapidjson/internal/ieee754.h @@ -0,0 +1,78 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_IEEE754_ +#define RAPIDJSON_IEEE754_ + +#include "../rapidjson.h" + +RAPIDJSON_NAMESPACE_BEGIN +namespace internal { + +class Double { +public: + Double() {} + Double(double d) : d_(d) {} + Double(uint64_t u) : u_(u) {} + + double Value() const { return d_; } + uint64_t Uint64Value() const { return u_; } + + double NextPositiveDouble() const { + RAPIDJSON_ASSERT(!Sign()); + return Double(u_ + 1).Value(); + } + + bool Sign() const { return (u_ & kSignMask) != 0; } + uint64_t Significand() const { return u_ & kSignificandMask; } + int Exponent() const { return static_cast(((u_ & kExponentMask) >> kSignificandSize) - kExponentBias); } + + bool IsNan() const { return (u_ & kExponentMask) == kExponentMask && Significand() != 0; } + bool IsInf() const { return (u_ & kExponentMask) == kExponentMask && Significand() == 0; } + bool IsNanOrInf() const { return (u_ & kExponentMask) == kExponentMask; } + bool IsNormal() const { return (u_ & kExponentMask) != 0 || Significand() == 0; } + bool IsZero() const { return (u_ & (kExponentMask | kSignificandMask)) == 0; } + + uint64_t IntegerSignificand() const { return IsNormal() ? Significand() | kHiddenBit : Significand(); } + int IntegerExponent() const { return (IsNormal() ? Exponent() : kDenormalExponent) - kSignificandSize; } + uint64_t ToBias() const { return (u_ & kSignMask) ? ~u_ + 1 : u_ | kSignMask; } + + static unsigned EffectiveSignificandSize(int order) { + if (order >= -1021) + return 53; + else if (order <= -1074) + return 0; + else + return static_cast(order) + 1074; + } + +private: + static const int kSignificandSize = 52; + static const int kExponentBias = 0x3FF; + static const int kDenormalExponent = 1 - kExponentBias; + static const uint64_t kSignMask = RAPIDJSON_UINT64_C2(0x80000000, 0x00000000); + static const uint64_t kExponentMask = RAPIDJSON_UINT64_C2(0x7FF00000, 0x00000000); + static const uint64_t kSignificandMask = RAPIDJSON_UINT64_C2(0x000FFFFF, 0xFFFFFFFF); + static const uint64_t kHiddenBit = RAPIDJSON_UINT64_C2(0x00100000, 0x00000000); + + union { + double d_; + uint64_t u_; + }; +}; + +} // namespace internal +RAPIDJSON_NAMESPACE_END + +#endif // RAPIDJSON_IEEE754_ diff --git a/contrib/rapidjson/1.1.0/include/rapidjson/internal/itoa.h b/contrib/rapidjson/1.1.0/include/rapidjson/internal/itoa.h new file mode 100644 index 0000000..01a4e7e --- /dev/null +++ b/contrib/rapidjson/1.1.0/include/rapidjson/internal/itoa.h @@ -0,0 +1,304 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_ITOA_ +#define RAPIDJSON_ITOA_ + +#include "../rapidjson.h" + +RAPIDJSON_NAMESPACE_BEGIN +namespace internal { + +inline const char* GetDigitsLut() { + static const char cDigitsLut[200] = { + '0','0','0','1','0','2','0','3','0','4','0','5','0','6','0','7','0','8','0','9', + '1','0','1','1','1','2','1','3','1','4','1','5','1','6','1','7','1','8','1','9', + '2','0','2','1','2','2','2','3','2','4','2','5','2','6','2','7','2','8','2','9', + '3','0','3','1','3','2','3','3','3','4','3','5','3','6','3','7','3','8','3','9', + '4','0','4','1','4','2','4','3','4','4','4','5','4','6','4','7','4','8','4','9', + '5','0','5','1','5','2','5','3','5','4','5','5','5','6','5','7','5','8','5','9', + '6','0','6','1','6','2','6','3','6','4','6','5','6','6','6','7','6','8','6','9', + '7','0','7','1','7','2','7','3','7','4','7','5','7','6','7','7','7','8','7','9', + '8','0','8','1','8','2','8','3','8','4','8','5','8','6','8','7','8','8','8','9', + '9','0','9','1','9','2','9','3','9','4','9','5','9','6','9','7','9','8','9','9' + }; + return cDigitsLut; +} + +inline char* u32toa(uint32_t value, char* buffer) { + const char* cDigitsLut = GetDigitsLut(); + + if (value < 10000) { + const uint32_t d1 = (value / 100) << 1; + const uint32_t d2 = (value % 100) << 1; + + if (value >= 1000) + *buffer++ = cDigitsLut[d1]; + if (value >= 100) + *buffer++ = cDigitsLut[d1 + 1]; + if (value >= 10) + *buffer++ = cDigitsLut[d2]; + *buffer++ = cDigitsLut[d2 + 1]; + } + else if (value < 100000000) { + // value = bbbbcccc + const uint32_t b = value / 10000; + const uint32_t c = value % 10000; + + const uint32_t d1 = (b / 100) << 1; + const uint32_t d2 = (b % 100) << 1; + + const uint32_t d3 = (c / 100) << 1; + const uint32_t d4 = (c % 100) << 1; + + if (value >= 10000000) + *buffer++ = cDigitsLut[d1]; + if (value >= 1000000) + *buffer++ = cDigitsLut[d1 + 1]; + if (value >= 100000) + *buffer++ = cDigitsLut[d2]; + *buffer++ = cDigitsLut[d2 + 1]; + + *buffer++ = cDigitsLut[d3]; + *buffer++ = cDigitsLut[d3 + 1]; + *buffer++ = cDigitsLut[d4]; + *buffer++ = cDigitsLut[d4 + 1]; + } + else { + // value = aabbbbcccc in decimal + + const uint32_t a = value / 100000000; // 1 to 42 + value %= 100000000; + + if (a >= 10) { + const unsigned i = a << 1; + *buffer++ = cDigitsLut[i]; + *buffer++ = cDigitsLut[i + 1]; + } + else + *buffer++ = static_cast('0' + static_cast(a)); + + const uint32_t b = value / 10000; // 0 to 9999 + const uint32_t c = value % 10000; // 0 to 9999 + + const uint32_t d1 = (b / 100) << 1; + const uint32_t d2 = (b % 100) << 1; + + const uint32_t d3 = (c / 100) << 1; + const uint32_t d4 = (c % 100) << 1; + + *buffer++ = cDigitsLut[d1]; + *buffer++ = cDigitsLut[d1 + 1]; + *buffer++ = cDigitsLut[d2]; + *buffer++ = cDigitsLut[d2 + 1]; + *buffer++ = cDigitsLut[d3]; + *buffer++ = cDigitsLut[d3 + 1]; + *buffer++ = cDigitsLut[d4]; + *buffer++ = cDigitsLut[d4 + 1]; + } + return buffer; +} + +inline char* i32toa(int32_t value, char* buffer) { + uint32_t u = static_cast(value); + if (value < 0) { + *buffer++ = '-'; + u = ~u + 1; + } + + return u32toa(u, buffer); +} + +inline char* u64toa(uint64_t value, char* buffer) { + const char* cDigitsLut = GetDigitsLut(); + const uint64_t kTen8 = 100000000; + const uint64_t kTen9 = kTen8 * 10; + const uint64_t kTen10 = kTen8 * 100; + const uint64_t kTen11 = kTen8 * 1000; + const uint64_t kTen12 = kTen8 * 10000; + const uint64_t kTen13 = kTen8 * 100000; + const uint64_t kTen14 = kTen8 * 1000000; + const uint64_t kTen15 = kTen8 * 10000000; + const uint64_t kTen16 = kTen8 * kTen8; + + if (value < kTen8) { + uint32_t v = static_cast(value); + if (v < 10000) { + const uint32_t d1 = (v / 100) << 1; + const uint32_t d2 = (v % 100) << 1; + + if (v >= 1000) + *buffer++ = cDigitsLut[d1]; + if (v >= 100) + *buffer++ = cDigitsLut[d1 + 1]; + if (v >= 10) + *buffer++ = cDigitsLut[d2]; + *buffer++ = cDigitsLut[d2 + 1]; + } + else { + // value = bbbbcccc + const uint32_t b = v / 10000; + const uint32_t c = v % 10000; + + const uint32_t d1 = (b / 100) << 1; + const uint32_t d2 = (b % 100) << 1; + + const uint32_t d3 = (c / 100) << 1; + const uint32_t d4 = (c % 100) << 1; + + if (value >= 10000000) + *buffer++ = cDigitsLut[d1]; + if (value >= 1000000) + *buffer++ = cDigitsLut[d1 + 1]; + if (value >= 100000) + *buffer++ = cDigitsLut[d2]; + *buffer++ = cDigitsLut[d2 + 1]; + + *buffer++ = cDigitsLut[d3]; + *buffer++ = cDigitsLut[d3 + 1]; + *buffer++ = cDigitsLut[d4]; + *buffer++ = cDigitsLut[d4 + 1]; + } + } + else if (value < kTen16) { + const uint32_t v0 = static_cast(value / kTen8); + const uint32_t v1 = static_cast(value % kTen8); + + const uint32_t b0 = v0 / 10000; + const uint32_t c0 = v0 % 10000; + + const uint32_t d1 = (b0 / 100) << 1; + const uint32_t d2 = (b0 % 100) << 1; + + const uint32_t d3 = (c0 / 100) << 1; + const uint32_t d4 = (c0 % 100) << 1; + + const uint32_t b1 = v1 / 10000; + const uint32_t c1 = v1 % 10000; + + const uint32_t d5 = (b1 / 100) << 1; + const uint32_t d6 = (b1 % 100) << 1; + + const uint32_t d7 = (c1 / 100) << 1; + const uint32_t d8 = (c1 % 100) << 1; + + if (value >= kTen15) + *buffer++ = cDigitsLut[d1]; + if (value >= kTen14) + *buffer++ = cDigitsLut[d1 + 1]; + if (value >= kTen13) + *buffer++ = cDigitsLut[d2]; + if (value >= kTen12) + *buffer++ = cDigitsLut[d2 + 1]; + if (value >= kTen11) + *buffer++ = cDigitsLut[d3]; + if (value >= kTen10) + *buffer++ = cDigitsLut[d3 + 1]; + if (value >= kTen9) + *buffer++ = cDigitsLut[d4]; + if (value >= kTen8) + *buffer++ = cDigitsLut[d4 + 1]; + + *buffer++ = cDigitsLut[d5]; + *buffer++ = cDigitsLut[d5 + 1]; + *buffer++ = cDigitsLut[d6]; + *buffer++ = cDigitsLut[d6 + 1]; + *buffer++ = cDigitsLut[d7]; + *buffer++ = cDigitsLut[d7 + 1]; + *buffer++ = cDigitsLut[d8]; + *buffer++ = cDigitsLut[d8 + 1]; + } + else { + const uint32_t a = static_cast(value / kTen16); // 1 to 1844 + value %= kTen16; + + if (a < 10) + *buffer++ = static_cast('0' + static_cast(a)); + else if (a < 100) { + const uint32_t i = a << 1; + *buffer++ = cDigitsLut[i]; + *buffer++ = cDigitsLut[i + 1]; + } + else if (a < 1000) { + *buffer++ = static_cast('0' + static_cast(a / 100)); + + const uint32_t i = (a % 100) << 1; + *buffer++ = cDigitsLut[i]; + *buffer++ = cDigitsLut[i + 1]; + } + else { + const uint32_t i = (a / 100) << 1; + const uint32_t j = (a % 100) << 1; + *buffer++ = cDigitsLut[i]; + *buffer++ = cDigitsLut[i + 1]; + *buffer++ = cDigitsLut[j]; + *buffer++ = cDigitsLut[j + 1]; + } + + const uint32_t v0 = static_cast(value / kTen8); + const uint32_t v1 = static_cast(value % kTen8); + + const uint32_t b0 = v0 / 10000; + const uint32_t c0 = v0 % 10000; + + const uint32_t d1 = (b0 / 100) << 1; + const uint32_t d2 = (b0 % 100) << 1; + + const uint32_t d3 = (c0 / 100) << 1; + const uint32_t d4 = (c0 % 100) << 1; + + const uint32_t b1 = v1 / 10000; + const uint32_t c1 = v1 % 10000; + + const uint32_t d5 = (b1 / 100) << 1; + const uint32_t d6 = (b1 % 100) << 1; + + const uint32_t d7 = (c1 / 100) << 1; + const uint32_t d8 = (c1 % 100) << 1; + + *buffer++ = cDigitsLut[d1]; + *buffer++ = cDigitsLut[d1 + 1]; + *buffer++ = cDigitsLut[d2]; + *buffer++ = cDigitsLut[d2 + 1]; + *buffer++ = cDigitsLut[d3]; + *buffer++ = cDigitsLut[d3 + 1]; + *buffer++ = cDigitsLut[d4]; + *buffer++ = cDigitsLut[d4 + 1]; + *buffer++ = cDigitsLut[d5]; + *buffer++ = cDigitsLut[d5 + 1]; + *buffer++ = cDigitsLut[d6]; + *buffer++ = cDigitsLut[d6 + 1]; + *buffer++ = cDigitsLut[d7]; + *buffer++ = cDigitsLut[d7 + 1]; + *buffer++ = cDigitsLut[d8]; + *buffer++ = cDigitsLut[d8 + 1]; + } + + return buffer; +} + +inline char* i64toa(int64_t value, char* buffer) { + uint64_t u = static_cast(value); + if (value < 0) { + *buffer++ = '-'; + u = ~u + 1; + } + + return u64toa(u, buffer); +} + +} // namespace internal +RAPIDJSON_NAMESPACE_END + +#endif // RAPIDJSON_ITOA_ diff --git a/contrib/rapidjson/1.1.0/include/rapidjson/internal/meta.h b/contrib/rapidjson/1.1.0/include/rapidjson/internal/meta.h new file mode 100644 index 0000000..5a9aaa4 --- /dev/null +++ b/contrib/rapidjson/1.1.0/include/rapidjson/internal/meta.h @@ -0,0 +1,181 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_INTERNAL_META_H_ +#define RAPIDJSON_INTERNAL_META_H_ + +#include "../rapidjson.h" + +#ifdef __GNUC__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(effc++) +#endif +#if defined(_MSC_VER) +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(6334) +#endif + +#if RAPIDJSON_HAS_CXX11_TYPETRAITS +#include +#endif + +//@cond RAPIDJSON_INTERNAL +RAPIDJSON_NAMESPACE_BEGIN +namespace internal { + +// Helper to wrap/convert arbitrary types to void, useful for arbitrary type matching +template struct Void { typedef void Type; }; + +/////////////////////////////////////////////////////////////////////////////// +// BoolType, TrueType, FalseType +// +template struct BoolType { + static const bool Value = Cond; + typedef BoolType Type; +}; +typedef BoolType TrueType; +typedef BoolType FalseType; + + +/////////////////////////////////////////////////////////////////////////////// +// SelectIf, BoolExpr, NotExpr, AndExpr, OrExpr +// + +template struct SelectIfImpl { template struct Apply { typedef T1 Type; }; }; +template <> struct SelectIfImpl { template struct Apply { typedef T2 Type; }; }; +template struct SelectIfCond : SelectIfImpl::template Apply {}; +template struct SelectIf : SelectIfCond {}; + +template struct AndExprCond : FalseType {}; +template <> struct AndExprCond : TrueType {}; +template struct OrExprCond : TrueType {}; +template <> struct OrExprCond : FalseType {}; + +template struct BoolExpr : SelectIf::Type {}; +template struct NotExpr : SelectIf::Type {}; +template struct AndExpr : AndExprCond::Type {}; +template struct OrExpr : OrExprCond::Type {}; + + +/////////////////////////////////////////////////////////////////////////////// +// AddConst, MaybeAddConst, RemoveConst +template struct AddConst { typedef const T Type; }; +template struct MaybeAddConst : SelectIfCond {}; +template struct RemoveConst { typedef T Type; }; +template struct RemoveConst { typedef T Type; }; + + +/////////////////////////////////////////////////////////////////////////////// +// IsSame, IsConst, IsMoreConst, IsPointer +// +template struct IsSame : FalseType {}; +template struct IsSame : TrueType {}; + +template struct IsConst : FalseType {}; +template struct IsConst : TrueType {}; + +template +struct IsMoreConst + : AndExpr::Type, typename RemoveConst::Type>, + BoolType::Value >= IsConst::Value> >::Type {}; + +template struct IsPointer : FalseType {}; +template struct IsPointer : TrueType {}; + +/////////////////////////////////////////////////////////////////////////////// +// IsBaseOf +// +#if RAPIDJSON_HAS_CXX11_TYPETRAITS + +template struct IsBaseOf + : BoolType< ::std::is_base_of::value> {}; + +#else // simplified version adopted from Boost + +template struct IsBaseOfImpl { + RAPIDJSON_STATIC_ASSERT(sizeof(B) != 0); + RAPIDJSON_STATIC_ASSERT(sizeof(D) != 0); + + typedef char (&Yes)[1]; + typedef char (&No) [2]; + + template + static Yes Check(const D*, T); + static No Check(const B*, int); + + struct Host { + operator const B*() const; + operator const D*(); + }; + + enum { Value = (sizeof(Check(Host(), 0)) == sizeof(Yes)) }; +}; + +template struct IsBaseOf + : OrExpr, BoolExpr > >::Type {}; + +#endif // RAPIDJSON_HAS_CXX11_TYPETRAITS + + +////////////////////////////////////////////////////////////////////////// +// EnableIf / DisableIf +// +template struct EnableIfCond { typedef T Type; }; +template struct EnableIfCond { /* empty */ }; + +template struct DisableIfCond { typedef T Type; }; +template struct DisableIfCond { /* empty */ }; + +template +struct EnableIf : EnableIfCond {}; + +template +struct DisableIf : DisableIfCond {}; + +// SFINAE helpers +struct SfinaeTag {}; +template struct RemoveSfinaeTag; +template struct RemoveSfinaeTag { typedef T Type; }; + +#define RAPIDJSON_REMOVEFPTR_(type) \ + typename ::RAPIDJSON_NAMESPACE::internal::RemoveSfinaeTag \ + < ::RAPIDJSON_NAMESPACE::internal::SfinaeTag&(*) type>::Type + +#define RAPIDJSON_ENABLEIF(cond) \ + typename ::RAPIDJSON_NAMESPACE::internal::EnableIf \ + ::Type * = NULL + +#define RAPIDJSON_DISABLEIF(cond) \ + typename ::RAPIDJSON_NAMESPACE::internal::DisableIf \ + ::Type * = NULL + +#define RAPIDJSON_ENABLEIF_RETURN(cond,returntype) \ + typename ::RAPIDJSON_NAMESPACE::internal::EnableIf \ + ::Type + +#define RAPIDJSON_DISABLEIF_RETURN(cond,returntype) \ + typename ::RAPIDJSON_NAMESPACE::internal::DisableIf \ + ::Type + +} // namespace internal +RAPIDJSON_NAMESPACE_END +//@endcond + +#if defined(__GNUC__) || defined(_MSC_VER) +RAPIDJSON_DIAG_POP +#endif + +#endif // RAPIDJSON_INTERNAL_META_H_ diff --git a/contrib/rapidjson/1.1.0/include/rapidjson/internal/pow10.h b/contrib/rapidjson/1.1.0/include/rapidjson/internal/pow10.h new file mode 100644 index 0000000..02f475d --- /dev/null +++ b/contrib/rapidjson/1.1.0/include/rapidjson/internal/pow10.h @@ -0,0 +1,55 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_POW10_ +#define RAPIDJSON_POW10_ + +#include "../rapidjson.h" + +RAPIDJSON_NAMESPACE_BEGIN +namespace internal { + +//! Computes integer powers of 10 in double (10.0^n). +/*! This function uses lookup table for fast and accurate results. + \param n non-negative exponent. Must <= 308. + \return 10.0^n +*/ +inline double Pow10(int n) { + static const double e[] = { // 1e-0...1e308: 309 * 8 bytes = 2472 bytes + 1e+0, + 1e+1, 1e+2, 1e+3, 1e+4, 1e+5, 1e+6, 1e+7, 1e+8, 1e+9, 1e+10, 1e+11, 1e+12, 1e+13, 1e+14, 1e+15, 1e+16, 1e+17, 1e+18, 1e+19, 1e+20, + 1e+21, 1e+22, 1e+23, 1e+24, 1e+25, 1e+26, 1e+27, 1e+28, 1e+29, 1e+30, 1e+31, 1e+32, 1e+33, 1e+34, 1e+35, 1e+36, 1e+37, 1e+38, 1e+39, 1e+40, + 1e+41, 1e+42, 1e+43, 1e+44, 1e+45, 1e+46, 1e+47, 1e+48, 1e+49, 1e+50, 1e+51, 1e+52, 1e+53, 1e+54, 1e+55, 1e+56, 1e+57, 1e+58, 1e+59, 1e+60, + 1e+61, 1e+62, 1e+63, 1e+64, 1e+65, 1e+66, 1e+67, 1e+68, 1e+69, 1e+70, 1e+71, 1e+72, 1e+73, 1e+74, 1e+75, 1e+76, 1e+77, 1e+78, 1e+79, 1e+80, + 1e+81, 1e+82, 1e+83, 1e+84, 1e+85, 1e+86, 1e+87, 1e+88, 1e+89, 1e+90, 1e+91, 1e+92, 1e+93, 1e+94, 1e+95, 1e+96, 1e+97, 1e+98, 1e+99, 1e+100, + 1e+101,1e+102,1e+103,1e+104,1e+105,1e+106,1e+107,1e+108,1e+109,1e+110,1e+111,1e+112,1e+113,1e+114,1e+115,1e+116,1e+117,1e+118,1e+119,1e+120, + 1e+121,1e+122,1e+123,1e+124,1e+125,1e+126,1e+127,1e+128,1e+129,1e+130,1e+131,1e+132,1e+133,1e+134,1e+135,1e+136,1e+137,1e+138,1e+139,1e+140, + 1e+141,1e+142,1e+143,1e+144,1e+145,1e+146,1e+147,1e+148,1e+149,1e+150,1e+151,1e+152,1e+153,1e+154,1e+155,1e+156,1e+157,1e+158,1e+159,1e+160, + 1e+161,1e+162,1e+163,1e+164,1e+165,1e+166,1e+167,1e+168,1e+169,1e+170,1e+171,1e+172,1e+173,1e+174,1e+175,1e+176,1e+177,1e+178,1e+179,1e+180, + 1e+181,1e+182,1e+183,1e+184,1e+185,1e+186,1e+187,1e+188,1e+189,1e+190,1e+191,1e+192,1e+193,1e+194,1e+195,1e+196,1e+197,1e+198,1e+199,1e+200, + 1e+201,1e+202,1e+203,1e+204,1e+205,1e+206,1e+207,1e+208,1e+209,1e+210,1e+211,1e+212,1e+213,1e+214,1e+215,1e+216,1e+217,1e+218,1e+219,1e+220, + 1e+221,1e+222,1e+223,1e+224,1e+225,1e+226,1e+227,1e+228,1e+229,1e+230,1e+231,1e+232,1e+233,1e+234,1e+235,1e+236,1e+237,1e+238,1e+239,1e+240, + 1e+241,1e+242,1e+243,1e+244,1e+245,1e+246,1e+247,1e+248,1e+249,1e+250,1e+251,1e+252,1e+253,1e+254,1e+255,1e+256,1e+257,1e+258,1e+259,1e+260, + 1e+261,1e+262,1e+263,1e+264,1e+265,1e+266,1e+267,1e+268,1e+269,1e+270,1e+271,1e+272,1e+273,1e+274,1e+275,1e+276,1e+277,1e+278,1e+279,1e+280, + 1e+281,1e+282,1e+283,1e+284,1e+285,1e+286,1e+287,1e+288,1e+289,1e+290,1e+291,1e+292,1e+293,1e+294,1e+295,1e+296,1e+297,1e+298,1e+299,1e+300, + 1e+301,1e+302,1e+303,1e+304,1e+305,1e+306,1e+307,1e+308 + }; + RAPIDJSON_ASSERT(n >= 0 && n <= 308); + return e[n]; +} + +} // namespace internal +RAPIDJSON_NAMESPACE_END + +#endif // RAPIDJSON_POW10_ diff --git a/contrib/rapidjson/1.1.0/include/rapidjson/internal/regex.h b/contrib/rapidjson/1.1.0/include/rapidjson/internal/regex.h new file mode 100644 index 0000000..422a524 --- /dev/null +++ b/contrib/rapidjson/1.1.0/include/rapidjson/internal/regex.h @@ -0,0 +1,701 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_INTERNAL_REGEX_H_ +#define RAPIDJSON_INTERNAL_REGEX_H_ + +#include "../allocators.h" +#include "../stream.h" +#include "stack.h" + +#ifdef __clang__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(padded) +RAPIDJSON_DIAG_OFF(switch-enum) +RAPIDJSON_DIAG_OFF(implicit-fallthrough) +#endif + +#ifdef __GNUC__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(effc++) +#endif + +#ifdef _MSC_VER +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(4512) // assignment operator could not be generated +#endif + +#ifndef RAPIDJSON_REGEX_VERBOSE +#define RAPIDJSON_REGEX_VERBOSE 0 +#endif + +RAPIDJSON_NAMESPACE_BEGIN +namespace internal { + +/////////////////////////////////////////////////////////////////////////////// +// GenericRegex + +static const SizeType kRegexInvalidState = ~SizeType(0); //!< Represents an invalid index in GenericRegex::State::out, out1 +static const SizeType kRegexInvalidRange = ~SizeType(0); + +//! Regular expression engine with subset of ECMAscript grammar. +/*! + Supported regular expression syntax: + - \c ab Concatenation + - \c a|b Alternation + - \c a? Zero or one + - \c a* Zero or more + - \c a+ One or more + - \c a{3} Exactly 3 times + - \c a{3,} At least 3 times + - \c a{3,5} 3 to 5 times + - \c (ab) Grouping + - \c ^a At the beginning + - \c a$ At the end + - \c . Any character + - \c [abc] Character classes + - \c [a-c] Character class range + - \c [a-z0-9_] Character class combination + - \c [^abc] Negated character classes + - \c [^a-c] Negated character class range + - \c [\b] Backspace (U+0008) + - \c \\| \\\\ ... Escape characters + - \c \\f Form feed (U+000C) + - \c \\n Line feed (U+000A) + - \c \\r Carriage return (U+000D) + - \c \\t Tab (U+0009) + - \c \\v Vertical tab (U+000B) + + \note This is a Thompson NFA engine, implemented with reference to + Cox, Russ. "Regular Expression Matching Can Be Simple And Fast (but is slow in Java, Perl, PHP, Python, Ruby,...).", + https://swtch.com/~rsc/regexp/regexp1.html +*/ +template +class GenericRegex { +public: + typedef typename Encoding::Ch Ch; + + GenericRegex(const Ch* source, Allocator* allocator = 0) : + states_(allocator, 256), ranges_(allocator, 256), root_(kRegexInvalidState), stateCount_(), rangeCount_(), + stateSet_(), state0_(allocator, 0), state1_(allocator, 0), anchorBegin_(), anchorEnd_() + { + GenericStringStream ss(source); + DecodedStream > ds(ss); + Parse(ds); + } + + ~GenericRegex() { + Allocator::Free(stateSet_); + } + + bool IsValid() const { + return root_ != kRegexInvalidState; + } + + template + bool Match(InputStream& is) const { + return SearchWithAnchoring(is, true, true); + } + + bool Match(const Ch* s) const { + GenericStringStream is(s); + return Match(is); + } + + template + bool Search(InputStream& is) const { + return SearchWithAnchoring(is, anchorBegin_, anchorEnd_); + } + + bool Search(const Ch* s) const { + GenericStringStream is(s); + return Search(is); + } + +private: + enum Operator { + kZeroOrOne, + kZeroOrMore, + kOneOrMore, + kConcatenation, + kAlternation, + kLeftParenthesis + }; + + static const unsigned kAnyCharacterClass = 0xFFFFFFFF; //!< For '.' + static const unsigned kRangeCharacterClass = 0xFFFFFFFE; + static const unsigned kRangeNegationFlag = 0x80000000; + + struct Range { + unsigned start; // + unsigned end; + SizeType next; + }; + + struct State { + SizeType out; //!< Equals to kInvalid for matching state + SizeType out1; //!< Equals to non-kInvalid for split + SizeType rangeStart; + unsigned codepoint; + }; + + struct Frag { + Frag(SizeType s, SizeType o, SizeType m) : start(s), out(o), minIndex(m) {} + SizeType start; + SizeType out; //!< link-list of all output states + SizeType minIndex; + }; + + template + class DecodedStream { + public: + DecodedStream(SourceStream& ss) : ss_(ss), codepoint_() { Decode(); } + unsigned Peek() { return codepoint_; } + unsigned Take() { + unsigned c = codepoint_; + if (c) // No further decoding when '\0' + Decode(); + return c; + } + + private: + void Decode() { + if (!Encoding::Decode(ss_, &codepoint_)) + codepoint_ = 0; + } + + SourceStream& ss_; + unsigned codepoint_; + }; + + State& GetState(SizeType index) { + RAPIDJSON_ASSERT(index < stateCount_); + return states_.template Bottom()[index]; + } + + const State& GetState(SizeType index) const { + RAPIDJSON_ASSERT(index < stateCount_); + return states_.template Bottom()[index]; + } + + Range& GetRange(SizeType index) { + RAPIDJSON_ASSERT(index < rangeCount_); + return ranges_.template Bottom()[index]; + } + + const Range& GetRange(SizeType index) const { + RAPIDJSON_ASSERT(index < rangeCount_); + return ranges_.template Bottom()[index]; + } + + template + void Parse(DecodedStream& ds) { + Allocator allocator; + Stack operandStack(&allocator, 256); // Frag + Stack operatorStack(&allocator, 256); // Operator + Stack atomCountStack(&allocator, 256); // unsigned (Atom per parenthesis) + + *atomCountStack.template Push() = 0; + + unsigned codepoint; + while (ds.Peek() != 0) { + switch (codepoint = ds.Take()) { + case '^': + anchorBegin_ = true; + break; + + case '$': + anchorEnd_ = true; + break; + + case '|': + while (!operatorStack.Empty() && *operatorStack.template Top() < kAlternation) + if (!Eval(operandStack, *operatorStack.template Pop(1))) + return; + *operatorStack.template Push() = kAlternation; + *atomCountStack.template Top() = 0; + break; + + case '(': + *operatorStack.template Push() = kLeftParenthesis; + *atomCountStack.template Push() = 0; + break; + + case ')': + while (!operatorStack.Empty() && *operatorStack.template Top() != kLeftParenthesis) + if (!Eval(operandStack, *operatorStack.template Pop(1))) + return; + if (operatorStack.Empty()) + return; + operatorStack.template Pop(1); + atomCountStack.template Pop(1); + ImplicitConcatenation(atomCountStack, operatorStack); + break; + + case '?': + if (!Eval(operandStack, kZeroOrOne)) + return; + break; + + case '*': + if (!Eval(operandStack, kZeroOrMore)) + return; + break; + + case '+': + if (!Eval(operandStack, kOneOrMore)) + return; + break; + + case '{': + { + unsigned n, m; + if (!ParseUnsigned(ds, &n)) + return; + + if (ds.Peek() == ',') { + ds.Take(); + if (ds.Peek() == '}') + m = kInfinityQuantifier; + else if (!ParseUnsigned(ds, &m) || m < n) + return; + } + else + m = n; + + if (!EvalQuantifier(operandStack, n, m) || ds.Peek() != '}') + return; + ds.Take(); + } + break; + + case '.': + PushOperand(operandStack, kAnyCharacterClass); + ImplicitConcatenation(atomCountStack, operatorStack); + break; + + case '[': + { + SizeType range; + if (!ParseRange(ds, &range)) + return; + SizeType s = NewState(kRegexInvalidState, kRegexInvalidState, kRangeCharacterClass); + GetState(s).rangeStart = range; + *operandStack.template Push() = Frag(s, s, s); + } + ImplicitConcatenation(atomCountStack, operatorStack); + break; + + case '\\': // Escape character + if (!CharacterEscape(ds, &codepoint)) + return; // Unsupported escape character + // fall through to default + + default: // Pattern character + PushOperand(operandStack, codepoint); + ImplicitConcatenation(atomCountStack, operatorStack); + } + } + + while (!operatorStack.Empty()) + if (!Eval(operandStack, *operatorStack.template Pop(1))) + return; + + // Link the operand to matching state. + if (operandStack.GetSize() == sizeof(Frag)) { + Frag* e = operandStack.template Pop(1); + Patch(e->out, NewState(kRegexInvalidState, kRegexInvalidState, 0)); + root_ = e->start; + +#if RAPIDJSON_REGEX_VERBOSE + printf("root: %d\n", root_); + for (SizeType i = 0; i < stateCount_ ; i++) { + State& s = GetState(i); + printf("[%2d] out: %2d out1: %2d c: '%c'\n", i, s.out, s.out1, (char)s.codepoint); + } + printf("\n"); +#endif + } + + // Preallocate buffer for SearchWithAnchoring() + RAPIDJSON_ASSERT(stateSet_ == 0); + if (stateCount_ > 0) { + stateSet_ = static_cast(states_.GetAllocator().Malloc(GetStateSetSize())); + state0_.template Reserve(stateCount_); + state1_.template Reserve(stateCount_); + } + } + + SizeType NewState(SizeType out, SizeType out1, unsigned codepoint) { + State* s = states_.template Push(); + s->out = out; + s->out1 = out1; + s->codepoint = codepoint; + s->rangeStart = kRegexInvalidRange; + return stateCount_++; + } + + void PushOperand(Stack& operandStack, unsigned codepoint) { + SizeType s = NewState(kRegexInvalidState, kRegexInvalidState, codepoint); + *operandStack.template Push() = Frag(s, s, s); + } + + void ImplicitConcatenation(Stack& atomCountStack, Stack& operatorStack) { + if (*atomCountStack.template Top()) + *operatorStack.template Push() = kConcatenation; + (*atomCountStack.template Top())++; + } + + SizeType Append(SizeType l1, SizeType l2) { + SizeType old = l1; + while (GetState(l1).out != kRegexInvalidState) + l1 = GetState(l1).out; + GetState(l1).out = l2; + return old; + } + + void Patch(SizeType l, SizeType s) { + for (SizeType next; l != kRegexInvalidState; l = next) { + next = GetState(l).out; + GetState(l).out = s; + } + } + + bool Eval(Stack& operandStack, Operator op) { + switch (op) { + case kConcatenation: + RAPIDJSON_ASSERT(operandStack.GetSize() >= sizeof(Frag) * 2); + { + Frag e2 = *operandStack.template Pop(1); + Frag e1 = *operandStack.template Pop(1); + Patch(e1.out, e2.start); + *operandStack.template Push() = Frag(e1.start, e2.out, Min(e1.minIndex, e2.minIndex)); + } + return true; + + case kAlternation: + if (operandStack.GetSize() >= sizeof(Frag) * 2) { + Frag e2 = *operandStack.template Pop(1); + Frag e1 = *operandStack.template Pop(1); + SizeType s = NewState(e1.start, e2.start, 0); + *operandStack.template Push() = Frag(s, Append(e1.out, e2.out), Min(e1.minIndex, e2.minIndex)); + return true; + } + return false; + + case kZeroOrOne: + if (operandStack.GetSize() >= sizeof(Frag)) { + Frag e = *operandStack.template Pop(1); + SizeType s = NewState(kRegexInvalidState, e.start, 0); + *operandStack.template Push() = Frag(s, Append(e.out, s), e.minIndex); + return true; + } + return false; + + case kZeroOrMore: + if (operandStack.GetSize() >= sizeof(Frag)) { + Frag e = *operandStack.template Pop(1); + SizeType s = NewState(kRegexInvalidState, e.start, 0); + Patch(e.out, s); + *operandStack.template Push() = Frag(s, s, e.minIndex); + return true; + } + return false; + + default: + RAPIDJSON_ASSERT(op == kOneOrMore); + if (operandStack.GetSize() >= sizeof(Frag)) { + Frag e = *operandStack.template Pop(1); + SizeType s = NewState(kRegexInvalidState, e.start, 0); + Patch(e.out, s); + *operandStack.template Push() = Frag(e.start, s, e.minIndex); + return true; + } + return false; + } + } + + bool EvalQuantifier(Stack& operandStack, unsigned n, unsigned m) { + RAPIDJSON_ASSERT(n <= m); + RAPIDJSON_ASSERT(operandStack.GetSize() >= sizeof(Frag)); + + if (n == 0) { + if (m == 0) // a{0} not support + return false; + else if (m == kInfinityQuantifier) + Eval(operandStack, kZeroOrMore); // a{0,} -> a* + else { + Eval(operandStack, kZeroOrOne); // a{0,5} -> a? + for (unsigned i = 0; i < m - 1; i++) + CloneTopOperand(operandStack); // a{0,5} -> a? a? a? a? a? + for (unsigned i = 0; i < m - 1; i++) + Eval(operandStack, kConcatenation); // a{0,5} -> a?a?a?a?a? + } + return true; + } + + for (unsigned i = 0; i < n - 1; i++) // a{3} -> a a a + CloneTopOperand(operandStack); + + if (m == kInfinityQuantifier) + Eval(operandStack, kOneOrMore); // a{3,} -> a a a+ + else if (m > n) { + CloneTopOperand(operandStack); // a{3,5} -> a a a a + Eval(operandStack, kZeroOrOne); // a{3,5} -> a a a a? + for (unsigned i = n; i < m - 1; i++) + CloneTopOperand(operandStack); // a{3,5} -> a a a a? a? + for (unsigned i = n; i < m; i++) + Eval(operandStack, kConcatenation); // a{3,5} -> a a aa?a? + } + + for (unsigned i = 0; i < n - 1; i++) + Eval(operandStack, kConcatenation); // a{3} -> aaa, a{3,} -> aaa+, a{3.5} -> aaaa?a? + + return true; + } + + static SizeType Min(SizeType a, SizeType b) { return a < b ? a : b; } + + void CloneTopOperand(Stack& operandStack) { + const Frag src = *operandStack.template Top(); // Copy constructor to prevent invalidation + SizeType count = stateCount_ - src.minIndex; // Assumes top operand contains states in [src->minIndex, stateCount_) + State* s = states_.template Push(count); + memcpy(s, &GetState(src.minIndex), count * sizeof(State)); + for (SizeType j = 0; j < count; j++) { + if (s[j].out != kRegexInvalidState) + s[j].out += count; + if (s[j].out1 != kRegexInvalidState) + s[j].out1 += count; + } + *operandStack.template Push() = Frag(src.start + count, src.out + count, src.minIndex + count); + stateCount_ += count; + } + + template + bool ParseUnsigned(DecodedStream& ds, unsigned* u) { + unsigned r = 0; + if (ds.Peek() < '0' || ds.Peek() > '9') + return false; + while (ds.Peek() >= '0' && ds.Peek() <= '9') { + if (r >= 429496729 && ds.Peek() > '5') // 2^32 - 1 = 4294967295 + return false; // overflow + r = r * 10 + (ds.Take() - '0'); + } + *u = r; + return true; + } + + template + bool ParseRange(DecodedStream& ds, SizeType* range) { + bool isBegin = true; + bool negate = false; + int step = 0; + SizeType start = kRegexInvalidRange; + SizeType current = kRegexInvalidRange; + unsigned codepoint; + while ((codepoint = ds.Take()) != 0) { + if (isBegin) { + isBegin = false; + if (codepoint == '^') { + negate = true; + continue; + } + } + + switch (codepoint) { + case ']': + if (start == kRegexInvalidRange) + return false; // Error: nothing inside [] + if (step == 2) { // Add trailing '-' + SizeType r = NewRange('-'); + RAPIDJSON_ASSERT(current != kRegexInvalidRange); + GetRange(current).next = r; + } + if (negate) + GetRange(start).start |= kRangeNegationFlag; + *range = start; + return true; + + case '\\': + if (ds.Peek() == 'b') { + ds.Take(); + codepoint = 0x0008; // Escape backspace character + } + else if (!CharacterEscape(ds, &codepoint)) + return false; + // fall through to default + + default: + switch (step) { + case 1: + if (codepoint == '-') { + step++; + break; + } + // fall through to step 0 for other characters + + case 0: + { + SizeType r = NewRange(codepoint); + if (current != kRegexInvalidRange) + GetRange(current).next = r; + if (start == kRegexInvalidRange) + start = r; + current = r; + } + step = 1; + break; + + default: + RAPIDJSON_ASSERT(step == 2); + GetRange(current).end = codepoint; + step = 0; + } + } + } + return false; + } + + SizeType NewRange(unsigned codepoint) { + Range* r = ranges_.template Push(); + r->start = r->end = codepoint; + r->next = kRegexInvalidRange; + return rangeCount_++; + } + + template + bool CharacterEscape(DecodedStream& ds, unsigned* escapedCodepoint) { + unsigned codepoint; + switch (codepoint = ds.Take()) { + case '^': + case '$': + case '|': + case '(': + case ')': + case '?': + case '*': + case '+': + case '.': + case '[': + case ']': + case '{': + case '}': + case '\\': + *escapedCodepoint = codepoint; return true; + case 'f': *escapedCodepoint = 0x000C; return true; + case 'n': *escapedCodepoint = 0x000A; return true; + case 'r': *escapedCodepoint = 0x000D; return true; + case 't': *escapedCodepoint = 0x0009; return true; + case 'v': *escapedCodepoint = 0x000B; return true; + default: + return false; // Unsupported escape character + } + } + + template + bool SearchWithAnchoring(InputStream& is, bool anchorBegin, bool anchorEnd) const { + RAPIDJSON_ASSERT(IsValid()); + DecodedStream ds(is); + + state0_.Clear(); + Stack *current = &state0_, *next = &state1_; + const size_t stateSetSize = GetStateSetSize(); + std::memset(stateSet_, 0, stateSetSize); + + bool matched = AddState(*current, root_); + unsigned codepoint; + while (!current->Empty() && (codepoint = ds.Take()) != 0) { + std::memset(stateSet_, 0, stateSetSize); + next->Clear(); + matched = false; + for (const SizeType* s = current->template Bottom(); s != current->template End(); ++s) { + const State& sr = GetState(*s); + if (sr.codepoint == codepoint || + sr.codepoint == kAnyCharacterClass || + (sr.codepoint == kRangeCharacterClass && MatchRange(sr.rangeStart, codepoint))) + { + matched = AddState(*next, sr.out) || matched; + if (!anchorEnd && matched) + return true; + } + if (!anchorBegin) + AddState(*next, root_); + } + internal::Swap(current, next); + } + + return matched; + } + + size_t GetStateSetSize() const { + return (stateCount_ + 31) / 32 * 4; + } + + // Return whether the added states is a match state + bool AddState(Stack& l, SizeType index) const { + RAPIDJSON_ASSERT(index != kRegexInvalidState); + + const State& s = GetState(index); + if (s.out1 != kRegexInvalidState) { // Split + bool matched = AddState(l, s.out); + return AddState(l, s.out1) || matched; + } + else if (!(stateSet_[index >> 5] & (1 << (index & 31)))) { + stateSet_[index >> 5] |= (1 << (index & 31)); + *l.template PushUnsafe() = index; + } + return s.out == kRegexInvalidState; // by using PushUnsafe() above, we can ensure s is not validated due to reallocation. + } + + bool MatchRange(SizeType rangeIndex, unsigned codepoint) const { + bool yes = (GetRange(rangeIndex).start & kRangeNegationFlag) == 0; + while (rangeIndex != kRegexInvalidRange) { + const Range& r = GetRange(rangeIndex); + if (codepoint >= (r.start & ~kRangeNegationFlag) && codepoint <= r.end) + return yes; + rangeIndex = r.next; + } + return !yes; + } + + Stack states_; + Stack ranges_; + SizeType root_; + SizeType stateCount_; + SizeType rangeCount_; + + static const unsigned kInfinityQuantifier = ~0u; + + // For SearchWithAnchoring() + uint32_t* stateSet_; // allocated by states_.GetAllocator() + mutable Stack state0_; + mutable Stack state1_; + bool anchorBegin_; + bool anchorEnd_; +}; + +typedef GenericRegex > Regex; + +} // namespace internal +RAPIDJSON_NAMESPACE_END + +#ifdef __clang__ +RAPIDJSON_DIAG_POP +#endif + +#ifdef _MSC_VER +RAPIDJSON_DIAG_POP +#endif + +#endif // RAPIDJSON_INTERNAL_REGEX_H_ diff --git a/contrib/rapidjson/1.1.0/include/rapidjson/internal/stack.h b/contrib/rapidjson/1.1.0/include/rapidjson/internal/stack.h new file mode 100644 index 0000000..022c9aa --- /dev/null +++ b/contrib/rapidjson/1.1.0/include/rapidjson/internal/stack.h @@ -0,0 +1,230 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_INTERNAL_STACK_H_ +#define RAPIDJSON_INTERNAL_STACK_H_ + +#include "../allocators.h" +#include "swap.h" + +#if defined(__clang__) +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(c++98-compat) +#endif + +RAPIDJSON_NAMESPACE_BEGIN +namespace internal { + +/////////////////////////////////////////////////////////////////////////////// +// Stack + +//! A type-unsafe stack for storing different types of data. +/*! \tparam Allocator Allocator for allocating stack memory. +*/ +template +class Stack { +public: + // Optimization note: Do not allocate memory for stack_ in constructor. + // Do it lazily when first Push() -> Expand() -> Resize(). + Stack(Allocator* allocator, size_t stackCapacity) : allocator_(allocator), ownAllocator_(0), stack_(0), stackTop_(0), stackEnd_(0), initialCapacity_(stackCapacity) { + } + +#if RAPIDJSON_HAS_CXX11_RVALUE_REFS + Stack(Stack&& rhs) + : allocator_(rhs.allocator_), + ownAllocator_(rhs.ownAllocator_), + stack_(rhs.stack_), + stackTop_(rhs.stackTop_), + stackEnd_(rhs.stackEnd_), + initialCapacity_(rhs.initialCapacity_) + { + rhs.allocator_ = 0; + rhs.ownAllocator_ = 0; + rhs.stack_ = 0; + rhs.stackTop_ = 0; + rhs.stackEnd_ = 0; + rhs.initialCapacity_ = 0; + } +#endif + + ~Stack() { + Destroy(); + } + +#if RAPIDJSON_HAS_CXX11_RVALUE_REFS + Stack& operator=(Stack&& rhs) { + if (&rhs != this) + { + Destroy(); + + allocator_ = rhs.allocator_; + ownAllocator_ = rhs.ownAllocator_; + stack_ = rhs.stack_; + stackTop_ = rhs.stackTop_; + stackEnd_ = rhs.stackEnd_; + initialCapacity_ = rhs.initialCapacity_; + + rhs.allocator_ = 0; + rhs.ownAllocator_ = 0; + rhs.stack_ = 0; + rhs.stackTop_ = 0; + rhs.stackEnd_ = 0; + rhs.initialCapacity_ = 0; + } + return *this; + } +#endif + + void Swap(Stack& rhs) RAPIDJSON_NOEXCEPT { + internal::Swap(allocator_, rhs.allocator_); + internal::Swap(ownAllocator_, rhs.ownAllocator_); + internal::Swap(stack_, rhs.stack_); + internal::Swap(stackTop_, rhs.stackTop_); + internal::Swap(stackEnd_, rhs.stackEnd_); + internal::Swap(initialCapacity_, rhs.initialCapacity_); + } + + void Clear() { stackTop_ = stack_; } + + void ShrinkToFit() { + if (Empty()) { + // If the stack is empty, completely deallocate the memory. + Allocator::Free(stack_); + stack_ = 0; + stackTop_ = 0; + stackEnd_ = 0; + } + else + Resize(GetSize()); + } + + // Optimization note: try to minimize the size of this function for force inline. + // Expansion is run very infrequently, so it is moved to another (probably non-inline) function. + template + RAPIDJSON_FORCEINLINE void Reserve(size_t count = 1) { + // Expand the stack if needed + if (RAPIDJSON_UNLIKELY(stackTop_ + sizeof(T) * count > stackEnd_)) + Expand(count); + } + + template + RAPIDJSON_FORCEINLINE T* Push(size_t count = 1) { + Reserve(count); + return PushUnsafe(count); + } + + template + RAPIDJSON_FORCEINLINE T* PushUnsafe(size_t count = 1) { + RAPIDJSON_ASSERT(stackTop_ + sizeof(T) * count <= stackEnd_); + T* ret = reinterpret_cast(stackTop_); + stackTop_ += sizeof(T) * count; + return ret; + } + + template + T* Pop(size_t count) { + RAPIDJSON_ASSERT(GetSize() >= count * sizeof(T)); + stackTop_ -= count * sizeof(T); + return reinterpret_cast(stackTop_); + } + + template + T* Top() { + RAPIDJSON_ASSERT(GetSize() >= sizeof(T)); + return reinterpret_cast(stackTop_ - sizeof(T)); + } + + template + const T* Top() const { + RAPIDJSON_ASSERT(GetSize() >= sizeof(T)); + return reinterpret_cast(stackTop_ - sizeof(T)); + } + + template + T* End() { return reinterpret_cast(stackTop_); } + + template + const T* End() const { return reinterpret_cast(stackTop_); } + + template + T* Bottom() { return reinterpret_cast(stack_); } + + template + const T* Bottom() const { return reinterpret_cast(stack_); } + + bool HasAllocator() const { + return allocator_ != 0; + } + + Allocator& GetAllocator() { + RAPIDJSON_ASSERT(allocator_); + return *allocator_; + } + + bool Empty() const { return stackTop_ == stack_; } + size_t GetSize() const { return static_cast(stackTop_ - stack_); } + size_t GetCapacity() const { return static_cast(stackEnd_ - stack_); } + +private: + template + void Expand(size_t count) { + // Only expand the capacity if the current stack exists. Otherwise just create a stack with initial capacity. + size_t newCapacity; + if (stack_ == 0) { + if (!allocator_) + ownAllocator_ = allocator_ = RAPIDJSON_NEW(Allocator()); + newCapacity = initialCapacity_; + } else { + newCapacity = GetCapacity(); + newCapacity += (newCapacity + 1) / 2; + } + size_t newSize = GetSize() + sizeof(T) * count; + if (newCapacity < newSize) + newCapacity = newSize; + + Resize(newCapacity); + } + + void Resize(size_t newCapacity) { + const size_t size = GetSize(); // Backup the current size + stack_ = static_cast(allocator_->Realloc(stack_, GetCapacity(), newCapacity)); + stackTop_ = stack_ + size; + stackEnd_ = stack_ + newCapacity; + } + + void Destroy() { + Allocator::Free(stack_); + RAPIDJSON_DELETE(ownAllocator_); // Only delete if it is owned by the stack + } + + // Prohibit copy constructor & assignment operator. + Stack(const Stack&); + Stack& operator=(const Stack&); + + Allocator* allocator_; + Allocator* ownAllocator_; + char *stack_; + char *stackTop_; + char *stackEnd_; + size_t initialCapacity_; +}; + +} // namespace internal +RAPIDJSON_NAMESPACE_END + +#if defined(__clang__) +RAPIDJSON_DIAG_POP +#endif + +#endif // RAPIDJSON_STACK_H_ diff --git a/contrib/rapidjson/1.1.0/include/rapidjson/internal/strfunc.h b/contrib/rapidjson/1.1.0/include/rapidjson/internal/strfunc.h new file mode 100644 index 0000000..2edfae5 --- /dev/null +++ b/contrib/rapidjson/1.1.0/include/rapidjson/internal/strfunc.h @@ -0,0 +1,55 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_INTERNAL_STRFUNC_H_ +#define RAPIDJSON_INTERNAL_STRFUNC_H_ + +#include "../stream.h" + +RAPIDJSON_NAMESPACE_BEGIN +namespace internal { + +//! Custom strlen() which works on different character types. +/*! \tparam Ch Character type (e.g. char, wchar_t, short) + \param s Null-terminated input string. + \return Number of characters in the string. + \note This has the same semantics as strlen(), the return value is not number of Unicode codepoints. +*/ +template +inline SizeType StrLen(const Ch* s) { + const Ch* p = s; + while (*p) ++p; + return SizeType(p - s); +} + +//! Returns number of code points in a encoded string. +template +bool CountStringCodePoint(const typename Encoding::Ch* s, SizeType length, SizeType* outCount) { + GenericStringStream is(s); + const typename Encoding::Ch* end = s + length; + SizeType count = 0; + while (is.src_ < end) { + unsigned codepoint; + if (!Encoding::Decode(is, &codepoint)) + return false; + count++; + } + *outCount = count; + return true; +} + +} // namespace internal +RAPIDJSON_NAMESPACE_END + +#endif // RAPIDJSON_INTERNAL_STRFUNC_H_ diff --git a/contrib/rapidjson/1.1.0/include/rapidjson/internal/strtod.h b/contrib/rapidjson/1.1.0/include/rapidjson/internal/strtod.h new file mode 100644 index 0000000..289c413 --- /dev/null +++ b/contrib/rapidjson/1.1.0/include/rapidjson/internal/strtod.h @@ -0,0 +1,269 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_STRTOD_ +#define RAPIDJSON_STRTOD_ + +#include "ieee754.h" +#include "biginteger.h" +#include "diyfp.h" +#include "pow10.h" + +RAPIDJSON_NAMESPACE_BEGIN +namespace internal { + +inline double FastPath(double significand, int exp) { + if (exp < -308) + return 0.0; + else if (exp >= 0) + return significand * internal::Pow10(exp); + else + return significand / internal::Pow10(-exp); +} + +inline double StrtodNormalPrecision(double d, int p) { + if (p < -308) { + // Prevent expSum < -308, making Pow10(p) = 0 + d = FastPath(d, -308); + d = FastPath(d, p + 308); + } + else + d = FastPath(d, p); + return d; +} + +template +inline T Min3(T a, T b, T c) { + T m = a; + if (m > b) m = b; + if (m > c) m = c; + return m; +} + +inline int CheckWithinHalfULP(double b, const BigInteger& d, int dExp) { + const Double db(b); + const uint64_t bInt = db.IntegerSignificand(); + const int bExp = db.IntegerExponent(); + const int hExp = bExp - 1; + + int dS_Exp2 = 0, dS_Exp5 = 0, bS_Exp2 = 0, bS_Exp5 = 0, hS_Exp2 = 0, hS_Exp5 = 0; + + // Adjust for decimal exponent + if (dExp >= 0) { + dS_Exp2 += dExp; + dS_Exp5 += dExp; + } + else { + bS_Exp2 -= dExp; + bS_Exp5 -= dExp; + hS_Exp2 -= dExp; + hS_Exp5 -= dExp; + } + + // Adjust for binary exponent + if (bExp >= 0) + bS_Exp2 += bExp; + else { + dS_Exp2 -= bExp; + hS_Exp2 -= bExp; + } + + // Adjust for half ulp exponent + if (hExp >= 0) + hS_Exp2 += hExp; + else { + dS_Exp2 -= hExp; + bS_Exp2 -= hExp; + } + + // Remove common power of two factor from all three scaled values + int common_Exp2 = Min3(dS_Exp2, bS_Exp2, hS_Exp2); + dS_Exp2 -= common_Exp2; + bS_Exp2 -= common_Exp2; + hS_Exp2 -= common_Exp2; + + BigInteger dS = d; + dS.MultiplyPow5(static_cast(dS_Exp5)) <<= static_cast(dS_Exp2); + + BigInteger bS(bInt); + bS.MultiplyPow5(static_cast(bS_Exp5)) <<= static_cast(bS_Exp2); + + BigInteger hS(1); + hS.MultiplyPow5(static_cast(hS_Exp5)) <<= static_cast(hS_Exp2); + + BigInteger delta(0); + dS.Difference(bS, &delta); + + return delta.Compare(hS); +} + +inline bool StrtodFast(double d, int p, double* result) { + // Use fast path for string-to-double conversion if possible + // see http://www.exploringbinary.com/fast-path-decimal-to-floating-point-conversion/ + if (p > 22 && p < 22 + 16) { + // Fast Path Cases In Disguise + d *= internal::Pow10(p - 22); + p = 22; + } + + if (p >= -22 && p <= 22 && d <= 9007199254740991.0) { // 2^53 - 1 + *result = FastPath(d, p); + return true; + } + else + return false; +} + +// Compute an approximation and see if it is within 1/2 ULP +inline bool StrtodDiyFp(const char* decimals, size_t length, size_t decimalPosition, int exp, double* result) { + uint64_t significand = 0; + size_t i = 0; // 2^64 - 1 = 18446744073709551615, 1844674407370955161 = 0x1999999999999999 + for (; i < length; i++) { + if (significand > RAPIDJSON_UINT64_C2(0x19999999, 0x99999999) || + (significand == RAPIDJSON_UINT64_C2(0x19999999, 0x99999999) && decimals[i] > '5')) + break; + significand = significand * 10u + static_cast(decimals[i] - '0'); + } + + if (i < length && decimals[i] >= '5') // Rounding + significand++; + + size_t remaining = length - i; + const unsigned kUlpShift = 3; + const unsigned kUlp = 1 << kUlpShift; + int64_t error = (remaining == 0) ? 0 : kUlp / 2; + + DiyFp v(significand, 0); + v = v.Normalize(); + error <<= -v.e; + + const int dExp = static_cast(decimalPosition) - static_cast(i) + exp; + + int actualExp; + DiyFp cachedPower = GetCachedPower10(dExp, &actualExp); + if (actualExp != dExp) { + static const DiyFp kPow10[] = { + DiyFp(RAPIDJSON_UINT64_C2(0xa0000000, 00000000), -60), // 10^1 + DiyFp(RAPIDJSON_UINT64_C2(0xc8000000, 00000000), -57), // 10^2 + DiyFp(RAPIDJSON_UINT64_C2(0xfa000000, 00000000), -54), // 10^3 + DiyFp(RAPIDJSON_UINT64_C2(0x9c400000, 00000000), -50), // 10^4 + DiyFp(RAPIDJSON_UINT64_C2(0xc3500000, 00000000), -47), // 10^5 + DiyFp(RAPIDJSON_UINT64_C2(0xf4240000, 00000000), -44), // 10^6 + DiyFp(RAPIDJSON_UINT64_C2(0x98968000, 00000000), -40) // 10^7 + }; + int adjustment = dExp - actualExp - 1; + RAPIDJSON_ASSERT(adjustment >= 0 && adjustment < 7); + v = v * kPow10[adjustment]; + if (length + static_cast(adjustment)> 19u) // has more digits than decimal digits in 64-bit + error += kUlp / 2; + } + + v = v * cachedPower; + + error += kUlp + (error == 0 ? 0 : 1); + + const int oldExp = v.e; + v = v.Normalize(); + error <<= oldExp - v.e; + + const unsigned effectiveSignificandSize = Double::EffectiveSignificandSize(64 + v.e); + unsigned precisionSize = 64 - effectiveSignificandSize; + if (precisionSize + kUlpShift >= 64) { + unsigned scaleExp = (precisionSize + kUlpShift) - 63; + v.f >>= scaleExp; + v.e += scaleExp; + error = (error >> scaleExp) + 1 + static_cast(kUlp); + precisionSize -= scaleExp; + } + + DiyFp rounded(v.f >> precisionSize, v.e + static_cast(precisionSize)); + const uint64_t precisionBits = (v.f & ((uint64_t(1) << precisionSize) - 1)) * kUlp; + const uint64_t halfWay = (uint64_t(1) << (precisionSize - 1)) * kUlp; + if (precisionBits >= halfWay + static_cast(error)) { + rounded.f++; + if (rounded.f & (DiyFp::kDpHiddenBit << 1)) { // rounding overflows mantissa (issue #340) + rounded.f >>= 1; + rounded.e++; + } + } + + *result = rounded.ToDouble(); + + return halfWay - static_cast(error) >= precisionBits || precisionBits >= halfWay + static_cast(error); +} + +inline double StrtodBigInteger(double approx, const char* decimals, size_t length, size_t decimalPosition, int exp) { + const BigInteger dInt(decimals, length); + const int dExp = static_cast(decimalPosition) - static_cast(length) + exp; + Double a(approx); + int cmp = CheckWithinHalfULP(a.Value(), dInt, dExp); + if (cmp < 0) + return a.Value(); // within half ULP + else if (cmp == 0) { + // Round towards even + if (a.Significand() & 1) + return a.NextPositiveDouble(); + else + return a.Value(); + } + else // adjustment + return a.NextPositiveDouble(); +} + +inline double StrtodFullPrecision(double d, int p, const char* decimals, size_t length, size_t decimalPosition, int exp) { + RAPIDJSON_ASSERT(d >= 0.0); + RAPIDJSON_ASSERT(length >= 1); + + double result; + if (StrtodFast(d, p, &result)) + return result; + + // Trim leading zeros + while (*decimals == '0' && length > 1) { + length--; + decimals++; + decimalPosition--; + } + + // Trim trailing zeros + while (decimals[length - 1] == '0' && length > 1) { + length--; + decimalPosition--; + exp++; + } + + // Trim right-most digits + const int kMaxDecimalDigit = 780; + if (static_cast(length) > kMaxDecimalDigit) { + int delta = (static_cast(length) - kMaxDecimalDigit); + exp += delta; + decimalPosition -= static_cast(delta); + length = kMaxDecimalDigit; + } + + // If too small, underflow to zero + if (int(length) + exp < -324) + return 0.0; + + if (StrtodDiyFp(decimals, length, decimalPosition, exp, &result)) + return result; + + // Use approximation from StrtodDiyFp and make adjustment with BigInteger comparison + return StrtodBigInteger(result, decimals, length, decimalPosition, exp); +} + +} // namespace internal +RAPIDJSON_NAMESPACE_END + +#endif // RAPIDJSON_STRTOD_ diff --git a/contrib/rapidjson/1.1.0/include/rapidjson/internal/swap.h b/contrib/rapidjson/1.1.0/include/rapidjson/internal/swap.h new file mode 100644 index 0000000..666e49f --- /dev/null +++ b/contrib/rapidjson/1.1.0/include/rapidjson/internal/swap.h @@ -0,0 +1,46 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_INTERNAL_SWAP_H_ +#define RAPIDJSON_INTERNAL_SWAP_H_ + +#include "../rapidjson.h" + +#if defined(__clang__) +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(c++98-compat) +#endif + +RAPIDJSON_NAMESPACE_BEGIN +namespace internal { + +//! Custom swap() to avoid dependency on C++ header +/*! \tparam T Type of the arguments to swap, should be instantiated with primitive C++ types only. + \note This has the same semantics as std::swap(). +*/ +template +inline void Swap(T& a, T& b) RAPIDJSON_NOEXCEPT { + T tmp = a; + a = b; + b = tmp; +} + +} // namespace internal +RAPIDJSON_NAMESPACE_END + +#if defined(__clang__) +RAPIDJSON_DIAG_POP +#endif + +#endif // RAPIDJSON_INTERNAL_SWAP_H_ diff --git a/contrib/rapidjson/1.1.0/include/rapidjson/istreamwrapper.h b/contrib/rapidjson/1.1.0/include/rapidjson/istreamwrapper.h new file mode 100644 index 0000000..f5fe289 --- /dev/null +++ b/contrib/rapidjson/1.1.0/include/rapidjson/istreamwrapper.h @@ -0,0 +1,115 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_ISTREAMWRAPPER_H_ +#define RAPIDJSON_ISTREAMWRAPPER_H_ + +#include "stream.h" +#include + +#ifdef __clang__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(padded) +#endif + +#ifdef _MSC_VER +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(4351) // new behavior: elements of array 'array' will be default initialized +#endif + +RAPIDJSON_NAMESPACE_BEGIN + +//! Wrapper of \c std::basic_istream into RapidJSON's Stream concept. +/*! + The classes can be wrapped including but not limited to: + + - \c std::istringstream + - \c std::stringstream + - \c std::wistringstream + - \c std::wstringstream + - \c std::ifstream + - \c std::fstream + - \c std::wifstream + - \c std::wfstream + + \tparam StreamType Class derived from \c std::basic_istream. +*/ + +template +class BasicIStreamWrapper { +public: + typedef typename StreamType::char_type Ch; + BasicIStreamWrapper(StreamType& stream) : stream_(stream), count_(), peekBuffer_() {} + + Ch Peek() const { + typename StreamType::int_type c = stream_.peek(); + return RAPIDJSON_LIKELY(c != StreamType::traits_type::eof()) ? static_cast(c) : '\0'; + } + + Ch Take() { + typename StreamType::int_type c = stream_.get(); + if (RAPIDJSON_LIKELY(c != StreamType::traits_type::eof())) { + count_++; + return static_cast(c); + } + else + return '\0'; + } + + // tellg() may return -1 when failed. So we count by ourself. + size_t Tell() const { return count_; } + + Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; } + void Put(Ch) { RAPIDJSON_ASSERT(false); } + void Flush() { RAPIDJSON_ASSERT(false); } + size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; } + + // For encoding detection only. + const Ch* Peek4() const { + RAPIDJSON_ASSERT(sizeof(Ch) == 1); // Only usable for byte stream. + int i; + bool hasError = false; + for (i = 0; i < 4; ++i) { + typename StreamType::int_type c = stream_.get(); + if (c == StreamType::traits_type::eof()) { + hasError = true; + stream_.clear(); + break; + } + peekBuffer_[i] = static_cast(c); + } + for (--i; i >= 0; --i) + stream_.putback(peekBuffer_[i]); + return !hasError ? peekBuffer_ : 0; + } + +private: + BasicIStreamWrapper(const BasicIStreamWrapper&); + BasicIStreamWrapper& operator=(const BasicIStreamWrapper&); + + StreamType& stream_; + size_t count_; //!< Number of characters read. Note: + mutable Ch peekBuffer_[4]; +}; + +typedef BasicIStreamWrapper IStreamWrapper; +typedef BasicIStreamWrapper WIStreamWrapper; + +#if defined(__clang__) || defined(_MSC_VER) +RAPIDJSON_DIAG_POP +#endif + +RAPIDJSON_NAMESPACE_END + +#endif // RAPIDJSON_ISTREAMWRAPPER_H_ diff --git a/contrib/rapidjson/1.1.0/include/rapidjson/memorybuffer.h b/contrib/rapidjson/1.1.0/include/rapidjson/memorybuffer.h new file mode 100644 index 0000000..39bee1d --- /dev/null +++ b/contrib/rapidjson/1.1.0/include/rapidjson/memorybuffer.h @@ -0,0 +1,70 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_MEMORYBUFFER_H_ +#define RAPIDJSON_MEMORYBUFFER_H_ + +#include "stream.h" +#include "internal/stack.h" + +RAPIDJSON_NAMESPACE_BEGIN + +//! Represents an in-memory output byte stream. +/*! + This class is mainly for being wrapped by EncodedOutputStream or AutoUTFOutputStream. + + It is similar to FileWriteBuffer but the destination is an in-memory buffer instead of a file. + + Differences between MemoryBuffer and StringBuffer: + 1. StringBuffer has Encoding but MemoryBuffer is only a byte buffer. + 2. StringBuffer::GetString() returns a null-terminated string. MemoryBuffer::GetBuffer() returns a buffer without terminator. + + \tparam Allocator type for allocating memory buffer. + \note implements Stream concept +*/ +template +struct GenericMemoryBuffer { + typedef char Ch; // byte + + GenericMemoryBuffer(Allocator* allocator = 0, size_t capacity = kDefaultCapacity) : stack_(allocator, capacity) {} + + void Put(Ch c) { *stack_.template Push() = c; } + void Flush() {} + + void Clear() { stack_.Clear(); } + void ShrinkToFit() { stack_.ShrinkToFit(); } + Ch* Push(size_t count) { return stack_.template Push(count); } + void Pop(size_t count) { stack_.template Pop(count); } + + const Ch* GetBuffer() const { + return stack_.template Bottom(); + } + + size_t GetSize() const { return stack_.GetSize(); } + + static const size_t kDefaultCapacity = 256; + mutable internal::Stack stack_; +}; + +typedef GenericMemoryBuffer<> MemoryBuffer; + +//! Implement specialized version of PutN() with memset() for better performance. +template<> +inline void PutN(MemoryBuffer& memoryBuffer, char c, size_t n) { + std::memset(memoryBuffer.stack_.Push(n), c, n * sizeof(c)); +} + +RAPIDJSON_NAMESPACE_END + +#endif // RAPIDJSON_MEMORYBUFFER_H_ diff --git a/contrib/rapidjson/1.1.0/include/rapidjson/memorystream.h b/contrib/rapidjson/1.1.0/include/rapidjson/memorystream.h new file mode 100644 index 0000000..1d71d8a --- /dev/null +++ b/contrib/rapidjson/1.1.0/include/rapidjson/memorystream.h @@ -0,0 +1,71 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_MEMORYSTREAM_H_ +#define RAPIDJSON_MEMORYSTREAM_H_ + +#include "stream.h" + +#ifdef __clang__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(unreachable-code) +RAPIDJSON_DIAG_OFF(missing-noreturn) +#endif + +RAPIDJSON_NAMESPACE_BEGIN + +//! Represents an in-memory input byte stream. +/*! + This class is mainly for being wrapped by EncodedInputStream or AutoUTFInputStream. + + It is similar to FileReadBuffer but the source is an in-memory buffer instead of a file. + + Differences between MemoryStream and StringStream: + 1. StringStream has encoding but MemoryStream is a byte stream. + 2. MemoryStream needs size of the source buffer and the buffer don't need to be null terminated. StringStream assume null-terminated string as source. + 3. MemoryStream supports Peek4() for encoding detection. StringStream is specified with an encoding so it should not have Peek4(). + \note implements Stream concept +*/ +struct MemoryStream { + typedef char Ch; // byte + + MemoryStream(const Ch *src, size_t size) : src_(src), begin_(src), end_(src + size), size_(size) {} + + Ch Peek() const { return RAPIDJSON_UNLIKELY(src_ == end_) ? '\0' : *src_; } + Ch Take() { return RAPIDJSON_UNLIKELY(src_ == end_) ? '\0' : *src_++; } + size_t Tell() const { return static_cast(src_ - begin_); } + + Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; } + void Put(Ch) { RAPIDJSON_ASSERT(false); } + void Flush() { RAPIDJSON_ASSERT(false); } + size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; } + + // For encoding detection only. + const Ch* Peek4() const { + return Tell() + 4 <= size_ ? src_ : 0; + } + + const Ch* src_; //!< Current read position. + const Ch* begin_; //!< Original head of the string. + const Ch* end_; //!< End of stream. + size_t size_; //!< Size of the stream. +}; + +RAPIDJSON_NAMESPACE_END + +#ifdef __clang__ +RAPIDJSON_DIAG_POP +#endif + +#endif // RAPIDJSON_MEMORYBUFFER_H_ diff --git a/contrib/rapidjson/1.1.0/include/rapidjson/msinttypes/inttypes.h b/contrib/rapidjson/1.1.0/include/rapidjson/msinttypes/inttypes.h new file mode 100644 index 0000000..1811128 --- /dev/null +++ b/contrib/rapidjson/1.1.0/include/rapidjson/msinttypes/inttypes.h @@ -0,0 +1,316 @@ +// ISO C9x compliant inttypes.h for Microsoft Visual Studio +// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 +// +// Copyright (c) 2006-2013 Alexander Chemeris +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the product nor the names of its contributors may +// be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED +// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO +// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; +// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +/////////////////////////////////////////////////////////////////////////////// + +// The above software in this distribution may have been modified by +// THL A29 Limited ("Tencent Modifications"). +// All Tencent Modifications are Copyright (C) 2015 THL A29 Limited. + +#ifndef _MSC_VER // [ +#error "Use this header only with Microsoft Visual C++ compilers!" +#endif // _MSC_VER ] + +#ifndef _MSC_INTTYPES_H_ // [ +#define _MSC_INTTYPES_H_ + +#if _MSC_VER > 1000 +#pragma once +#endif + +#include "stdint.h" + +// miloyip: VC supports inttypes.h since VC2013 +#if _MSC_VER >= 1800 +#include +#else + +// 7.8 Format conversion of integer types + +typedef struct { + intmax_t quot; + intmax_t rem; +} imaxdiv_t; + +// 7.8.1 Macros for format specifiers + +#if !defined(__cplusplus) || defined(__STDC_FORMAT_MACROS) // [ See footnote 185 at page 198 + +// The fprintf macros for signed integers are: +#define PRId8 "d" +#define PRIi8 "i" +#define PRIdLEAST8 "d" +#define PRIiLEAST8 "i" +#define PRIdFAST8 "d" +#define PRIiFAST8 "i" + +#define PRId16 "hd" +#define PRIi16 "hi" +#define PRIdLEAST16 "hd" +#define PRIiLEAST16 "hi" +#define PRIdFAST16 "hd" +#define PRIiFAST16 "hi" + +#define PRId32 "I32d" +#define PRIi32 "I32i" +#define PRIdLEAST32 "I32d" +#define PRIiLEAST32 "I32i" +#define PRIdFAST32 "I32d" +#define PRIiFAST32 "I32i" + +#define PRId64 "I64d" +#define PRIi64 "I64i" +#define PRIdLEAST64 "I64d" +#define PRIiLEAST64 "I64i" +#define PRIdFAST64 "I64d" +#define PRIiFAST64 "I64i" + +#define PRIdMAX "I64d" +#define PRIiMAX "I64i" + +#define PRIdPTR "Id" +#define PRIiPTR "Ii" + +// The fprintf macros for unsigned integers are: +#define PRIo8 "o" +#define PRIu8 "u" +#define PRIx8 "x" +#define PRIX8 "X" +#define PRIoLEAST8 "o" +#define PRIuLEAST8 "u" +#define PRIxLEAST8 "x" +#define PRIXLEAST8 "X" +#define PRIoFAST8 "o" +#define PRIuFAST8 "u" +#define PRIxFAST8 "x" +#define PRIXFAST8 "X" + +#define PRIo16 "ho" +#define PRIu16 "hu" +#define PRIx16 "hx" +#define PRIX16 "hX" +#define PRIoLEAST16 "ho" +#define PRIuLEAST16 "hu" +#define PRIxLEAST16 "hx" +#define PRIXLEAST16 "hX" +#define PRIoFAST16 "ho" +#define PRIuFAST16 "hu" +#define PRIxFAST16 "hx" +#define PRIXFAST16 "hX" + +#define PRIo32 "I32o" +#define PRIu32 "I32u" +#define PRIx32 "I32x" +#define PRIX32 "I32X" +#define PRIoLEAST32 "I32o" +#define PRIuLEAST32 "I32u" +#define PRIxLEAST32 "I32x" +#define PRIXLEAST32 "I32X" +#define PRIoFAST32 "I32o" +#define PRIuFAST32 "I32u" +#define PRIxFAST32 "I32x" +#define PRIXFAST32 "I32X" + +#define PRIo64 "I64o" +#define PRIu64 "I64u" +#define PRIx64 "I64x" +#define PRIX64 "I64X" +#define PRIoLEAST64 "I64o" +#define PRIuLEAST64 "I64u" +#define PRIxLEAST64 "I64x" +#define PRIXLEAST64 "I64X" +#define PRIoFAST64 "I64o" +#define PRIuFAST64 "I64u" +#define PRIxFAST64 "I64x" +#define PRIXFAST64 "I64X" + +#define PRIoMAX "I64o" +#define PRIuMAX "I64u" +#define PRIxMAX "I64x" +#define PRIXMAX "I64X" + +#define PRIoPTR "Io" +#define PRIuPTR "Iu" +#define PRIxPTR "Ix" +#define PRIXPTR "IX" + +// The fscanf macros for signed integers are: +#define SCNd8 "d" +#define SCNi8 "i" +#define SCNdLEAST8 "d" +#define SCNiLEAST8 "i" +#define SCNdFAST8 "d" +#define SCNiFAST8 "i" + +#define SCNd16 "hd" +#define SCNi16 "hi" +#define SCNdLEAST16 "hd" +#define SCNiLEAST16 "hi" +#define SCNdFAST16 "hd" +#define SCNiFAST16 "hi" + +#define SCNd32 "ld" +#define SCNi32 "li" +#define SCNdLEAST32 "ld" +#define SCNiLEAST32 "li" +#define SCNdFAST32 "ld" +#define SCNiFAST32 "li" + +#define SCNd64 "I64d" +#define SCNi64 "I64i" +#define SCNdLEAST64 "I64d" +#define SCNiLEAST64 "I64i" +#define SCNdFAST64 "I64d" +#define SCNiFAST64 "I64i" + +#define SCNdMAX "I64d" +#define SCNiMAX "I64i" + +#ifdef _WIN64 // [ +# define SCNdPTR "I64d" +# define SCNiPTR "I64i" +#else // _WIN64 ][ +# define SCNdPTR "ld" +# define SCNiPTR "li" +#endif // _WIN64 ] + +// The fscanf macros for unsigned integers are: +#define SCNo8 "o" +#define SCNu8 "u" +#define SCNx8 "x" +#define SCNX8 "X" +#define SCNoLEAST8 "o" +#define SCNuLEAST8 "u" +#define SCNxLEAST8 "x" +#define SCNXLEAST8 "X" +#define SCNoFAST8 "o" +#define SCNuFAST8 "u" +#define SCNxFAST8 "x" +#define SCNXFAST8 "X" + +#define SCNo16 "ho" +#define SCNu16 "hu" +#define SCNx16 "hx" +#define SCNX16 "hX" +#define SCNoLEAST16 "ho" +#define SCNuLEAST16 "hu" +#define SCNxLEAST16 "hx" +#define SCNXLEAST16 "hX" +#define SCNoFAST16 "ho" +#define SCNuFAST16 "hu" +#define SCNxFAST16 "hx" +#define SCNXFAST16 "hX" + +#define SCNo32 "lo" +#define SCNu32 "lu" +#define SCNx32 "lx" +#define SCNX32 "lX" +#define SCNoLEAST32 "lo" +#define SCNuLEAST32 "lu" +#define SCNxLEAST32 "lx" +#define SCNXLEAST32 "lX" +#define SCNoFAST32 "lo" +#define SCNuFAST32 "lu" +#define SCNxFAST32 "lx" +#define SCNXFAST32 "lX" + +#define SCNo64 "I64o" +#define SCNu64 "I64u" +#define SCNx64 "I64x" +#define SCNX64 "I64X" +#define SCNoLEAST64 "I64o" +#define SCNuLEAST64 "I64u" +#define SCNxLEAST64 "I64x" +#define SCNXLEAST64 "I64X" +#define SCNoFAST64 "I64o" +#define SCNuFAST64 "I64u" +#define SCNxFAST64 "I64x" +#define SCNXFAST64 "I64X" + +#define SCNoMAX "I64o" +#define SCNuMAX "I64u" +#define SCNxMAX "I64x" +#define SCNXMAX "I64X" + +#ifdef _WIN64 // [ +# define SCNoPTR "I64o" +# define SCNuPTR "I64u" +# define SCNxPTR "I64x" +# define SCNXPTR "I64X" +#else // _WIN64 ][ +# define SCNoPTR "lo" +# define SCNuPTR "lu" +# define SCNxPTR "lx" +# define SCNXPTR "lX" +#endif // _WIN64 ] + +#endif // __STDC_FORMAT_MACROS ] + +// 7.8.2 Functions for greatest-width integer types + +// 7.8.2.1 The imaxabs function +#define imaxabs _abs64 + +// 7.8.2.2 The imaxdiv function + +// This is modified version of div() function from Microsoft's div.c found +// in %MSVC.NET%\crt\src\div.c +#ifdef STATIC_IMAXDIV // [ +static +#else // STATIC_IMAXDIV ][ +_inline +#endif // STATIC_IMAXDIV ] +imaxdiv_t __cdecl imaxdiv(intmax_t numer, intmax_t denom) +{ + imaxdiv_t result; + + result.quot = numer / denom; + result.rem = numer % denom; + + if (numer < 0 && result.rem > 0) { + // did division wrong; must fix up + ++result.quot; + result.rem -= denom; + } + + return result; +} + +// 7.8.2.3 The strtoimax and strtoumax functions +#define strtoimax _strtoi64 +#define strtoumax _strtoui64 + +// 7.8.2.4 The wcstoimax and wcstoumax functions +#define wcstoimax _wcstoi64 +#define wcstoumax _wcstoui64 + +#endif // _MSC_VER >= 1800 + +#endif // _MSC_INTTYPES_H_ ] diff --git a/contrib/rapidjson/1.1.0/include/rapidjson/msinttypes/stdint.h b/contrib/rapidjson/1.1.0/include/rapidjson/msinttypes/stdint.h new file mode 100644 index 0000000..3d4477b --- /dev/null +++ b/contrib/rapidjson/1.1.0/include/rapidjson/msinttypes/stdint.h @@ -0,0 +1,300 @@ +// ISO C9x compliant stdint.h for Microsoft Visual Studio +// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 +// +// Copyright (c) 2006-2013 Alexander Chemeris +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// 3. Neither the name of the product nor the names of its contributors may +// be used to endorse or promote products derived from this software +// without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED +// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO +// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; +// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +/////////////////////////////////////////////////////////////////////////////// + +// The above software in this distribution may have been modified by +// THL A29 Limited ("Tencent Modifications"). +// All Tencent Modifications are Copyright (C) 2015 THL A29 Limited. + +#ifndef _MSC_VER // [ +#error "Use this header only with Microsoft Visual C++ compilers!" +#endif // _MSC_VER ] + +#ifndef _MSC_STDINT_H_ // [ +#define _MSC_STDINT_H_ + +#if _MSC_VER > 1000 +#pragma once +#endif + +// miloyip: Originally Visual Studio 2010 uses its own stdint.h. However it generates warning with INT64_C(), so change to use this file for vs2010. +#if _MSC_VER >= 1600 // [ +#include + +#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260 + +#undef INT8_C +#undef INT16_C +#undef INT32_C +#undef INT64_C +#undef UINT8_C +#undef UINT16_C +#undef UINT32_C +#undef UINT64_C + +// 7.18.4.1 Macros for minimum-width integer constants + +#define INT8_C(val) val##i8 +#define INT16_C(val) val##i16 +#define INT32_C(val) val##i32 +#define INT64_C(val) val##i64 + +#define UINT8_C(val) val##ui8 +#define UINT16_C(val) val##ui16 +#define UINT32_C(val) val##ui32 +#define UINT64_C(val) val##ui64 + +// 7.18.4.2 Macros for greatest-width integer constants +// These #ifndef's are needed to prevent collisions with . +// Check out Issue 9 for the details. +#ifndef INTMAX_C // [ +# define INTMAX_C INT64_C +#endif // INTMAX_C ] +#ifndef UINTMAX_C // [ +# define UINTMAX_C UINT64_C +#endif // UINTMAX_C ] + +#endif // __STDC_CONSTANT_MACROS ] + +#else // ] _MSC_VER >= 1700 [ + +#include + +// For Visual Studio 6 in C++ mode and for many Visual Studio versions when +// compiling for ARM we have to wrap include with 'extern "C++" {}' +// or compiler would give many errors like this: +// error C2733: second C linkage of overloaded function 'wmemchr' not allowed +#if defined(__cplusplus) && !defined(_M_ARM) +extern "C" { +#endif +# include +#if defined(__cplusplus) && !defined(_M_ARM) +} +#endif + +// Define _W64 macros to mark types changing their size, like intptr_t. +#ifndef _W64 +# if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300 +# define _W64 __w64 +# else +# define _W64 +# endif +#endif + + +// 7.18.1 Integer types + +// 7.18.1.1 Exact-width integer types + +// Visual Studio 6 and Embedded Visual C++ 4 doesn't +// realize that, e.g. char has the same size as __int8 +// so we give up on __intX for them. +#if (_MSC_VER < 1300) + typedef signed char int8_t; + typedef signed short int16_t; + typedef signed int int32_t; + typedef unsigned char uint8_t; + typedef unsigned short uint16_t; + typedef unsigned int uint32_t; +#else + typedef signed __int8 int8_t; + typedef signed __int16 int16_t; + typedef signed __int32 int32_t; + typedef unsigned __int8 uint8_t; + typedef unsigned __int16 uint16_t; + typedef unsigned __int32 uint32_t; +#endif +typedef signed __int64 int64_t; +typedef unsigned __int64 uint64_t; + + +// 7.18.1.2 Minimum-width integer types +typedef int8_t int_least8_t; +typedef int16_t int_least16_t; +typedef int32_t int_least32_t; +typedef int64_t int_least64_t; +typedef uint8_t uint_least8_t; +typedef uint16_t uint_least16_t; +typedef uint32_t uint_least32_t; +typedef uint64_t uint_least64_t; + +// 7.18.1.3 Fastest minimum-width integer types +typedef int8_t int_fast8_t; +typedef int16_t int_fast16_t; +typedef int32_t int_fast32_t; +typedef int64_t int_fast64_t; +typedef uint8_t uint_fast8_t; +typedef uint16_t uint_fast16_t; +typedef uint32_t uint_fast32_t; +typedef uint64_t uint_fast64_t; + +// 7.18.1.4 Integer types capable of holding object pointers +#ifdef _WIN64 // [ + typedef signed __int64 intptr_t; + typedef unsigned __int64 uintptr_t; +#else // _WIN64 ][ + typedef _W64 signed int intptr_t; + typedef _W64 unsigned int uintptr_t; +#endif // _WIN64 ] + +// 7.18.1.5 Greatest-width integer types +typedef int64_t intmax_t; +typedef uint64_t uintmax_t; + + +// 7.18.2 Limits of specified-width integer types + +#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259 + +// 7.18.2.1 Limits of exact-width integer types +#define INT8_MIN ((int8_t)_I8_MIN) +#define INT8_MAX _I8_MAX +#define INT16_MIN ((int16_t)_I16_MIN) +#define INT16_MAX _I16_MAX +#define INT32_MIN ((int32_t)_I32_MIN) +#define INT32_MAX _I32_MAX +#define INT64_MIN ((int64_t)_I64_MIN) +#define INT64_MAX _I64_MAX +#define UINT8_MAX _UI8_MAX +#define UINT16_MAX _UI16_MAX +#define UINT32_MAX _UI32_MAX +#define UINT64_MAX _UI64_MAX + +// 7.18.2.2 Limits of minimum-width integer types +#define INT_LEAST8_MIN INT8_MIN +#define INT_LEAST8_MAX INT8_MAX +#define INT_LEAST16_MIN INT16_MIN +#define INT_LEAST16_MAX INT16_MAX +#define INT_LEAST32_MIN INT32_MIN +#define INT_LEAST32_MAX INT32_MAX +#define INT_LEAST64_MIN INT64_MIN +#define INT_LEAST64_MAX INT64_MAX +#define UINT_LEAST8_MAX UINT8_MAX +#define UINT_LEAST16_MAX UINT16_MAX +#define UINT_LEAST32_MAX UINT32_MAX +#define UINT_LEAST64_MAX UINT64_MAX + +// 7.18.2.3 Limits of fastest minimum-width integer types +#define INT_FAST8_MIN INT8_MIN +#define INT_FAST8_MAX INT8_MAX +#define INT_FAST16_MIN INT16_MIN +#define INT_FAST16_MAX INT16_MAX +#define INT_FAST32_MIN INT32_MIN +#define INT_FAST32_MAX INT32_MAX +#define INT_FAST64_MIN INT64_MIN +#define INT_FAST64_MAX INT64_MAX +#define UINT_FAST8_MAX UINT8_MAX +#define UINT_FAST16_MAX UINT16_MAX +#define UINT_FAST32_MAX UINT32_MAX +#define UINT_FAST64_MAX UINT64_MAX + +// 7.18.2.4 Limits of integer types capable of holding object pointers +#ifdef _WIN64 // [ +# define INTPTR_MIN INT64_MIN +# define INTPTR_MAX INT64_MAX +# define UINTPTR_MAX UINT64_MAX +#else // _WIN64 ][ +# define INTPTR_MIN INT32_MIN +# define INTPTR_MAX INT32_MAX +# define UINTPTR_MAX UINT32_MAX +#endif // _WIN64 ] + +// 7.18.2.5 Limits of greatest-width integer types +#define INTMAX_MIN INT64_MIN +#define INTMAX_MAX INT64_MAX +#define UINTMAX_MAX UINT64_MAX + +// 7.18.3 Limits of other integer types + +#ifdef _WIN64 // [ +# define PTRDIFF_MIN _I64_MIN +# define PTRDIFF_MAX _I64_MAX +#else // _WIN64 ][ +# define PTRDIFF_MIN _I32_MIN +# define PTRDIFF_MAX _I32_MAX +#endif // _WIN64 ] + +#define SIG_ATOMIC_MIN INT_MIN +#define SIG_ATOMIC_MAX INT_MAX + +#ifndef SIZE_MAX // [ +# ifdef _WIN64 // [ +# define SIZE_MAX _UI64_MAX +# else // _WIN64 ][ +# define SIZE_MAX _UI32_MAX +# endif // _WIN64 ] +#endif // SIZE_MAX ] + +// WCHAR_MIN and WCHAR_MAX are also defined in +#ifndef WCHAR_MIN // [ +# define WCHAR_MIN 0 +#endif // WCHAR_MIN ] +#ifndef WCHAR_MAX // [ +# define WCHAR_MAX _UI16_MAX +#endif // WCHAR_MAX ] + +#define WINT_MIN 0 +#define WINT_MAX _UI16_MAX + +#endif // __STDC_LIMIT_MACROS ] + + +// 7.18.4 Limits of other integer types + +#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260 + +// 7.18.4.1 Macros for minimum-width integer constants + +#define INT8_C(val) val##i8 +#define INT16_C(val) val##i16 +#define INT32_C(val) val##i32 +#define INT64_C(val) val##i64 + +#define UINT8_C(val) val##ui8 +#define UINT16_C(val) val##ui16 +#define UINT32_C(val) val##ui32 +#define UINT64_C(val) val##ui64 + +// 7.18.4.2 Macros for greatest-width integer constants +// These #ifndef's are needed to prevent collisions with . +// Check out Issue 9 for the details. +#ifndef INTMAX_C // [ +# define INTMAX_C INT64_C +#endif // INTMAX_C ] +#ifndef UINTMAX_C // [ +# define UINTMAX_C UINT64_C +#endif // UINTMAX_C ] + +#endif // __STDC_CONSTANT_MACROS ] + +#endif // _MSC_VER >= 1600 ] + +#endif // _MSC_STDINT_H_ ] diff --git a/contrib/rapidjson/1.1.0/include/rapidjson/ostreamwrapper.h b/contrib/rapidjson/1.1.0/include/rapidjson/ostreamwrapper.h new file mode 100644 index 0000000..6f4667c --- /dev/null +++ b/contrib/rapidjson/1.1.0/include/rapidjson/ostreamwrapper.h @@ -0,0 +1,81 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_OSTREAMWRAPPER_H_ +#define RAPIDJSON_OSTREAMWRAPPER_H_ + +#include "stream.h" +#include + +#ifdef __clang__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(padded) +#endif + +RAPIDJSON_NAMESPACE_BEGIN + +//! Wrapper of \c std::basic_ostream into RapidJSON's Stream concept. +/*! + The classes can be wrapped including but not limited to: + + - \c std::ostringstream + - \c std::stringstream + - \c std::wpstringstream + - \c std::wstringstream + - \c std::ifstream + - \c std::fstream + - \c std::wofstream + - \c std::wfstream + + \tparam StreamType Class derived from \c std::basic_ostream. +*/ + +template +class BasicOStreamWrapper { +public: + typedef typename StreamType::char_type Ch; + BasicOStreamWrapper(StreamType& stream) : stream_(stream) {} + + void Put(Ch c) { + stream_.put(c); + } + + void Flush() { + stream_.flush(); + } + + // Not implemented + char Peek() const { RAPIDJSON_ASSERT(false); return 0; } + char Take() { RAPIDJSON_ASSERT(false); return 0; } + size_t Tell() const { RAPIDJSON_ASSERT(false); return 0; } + char* PutBegin() { RAPIDJSON_ASSERT(false); return 0; } + size_t PutEnd(char*) { RAPIDJSON_ASSERT(false); return 0; } + +private: + BasicOStreamWrapper(const BasicOStreamWrapper&); + BasicOStreamWrapper& operator=(const BasicOStreamWrapper&); + + StreamType& stream_; +}; + +typedef BasicOStreamWrapper OStreamWrapper; +typedef BasicOStreamWrapper WOStreamWrapper; + +#ifdef __clang__ +RAPIDJSON_DIAG_POP +#endif + +RAPIDJSON_NAMESPACE_END + +#endif // RAPIDJSON_OSTREAMWRAPPER_H_ diff --git a/contrib/rapidjson/1.1.0/include/rapidjson/pointer.h b/contrib/rapidjson/1.1.0/include/rapidjson/pointer.h new file mode 100644 index 0000000..0206ac1 --- /dev/null +++ b/contrib/rapidjson/1.1.0/include/rapidjson/pointer.h @@ -0,0 +1,1358 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_POINTER_H_ +#define RAPIDJSON_POINTER_H_ + +#include "document.h" +#include "internal/itoa.h" + +#ifdef __clang__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(switch-enum) +#endif + +#ifdef _MSC_VER +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(4512) // assignment operator could not be generated +#endif + +RAPIDJSON_NAMESPACE_BEGIN + +static const SizeType kPointerInvalidIndex = ~SizeType(0); //!< Represents an invalid index in GenericPointer::Token + +//! Error code of parsing. +/*! \ingroup RAPIDJSON_ERRORS + \see GenericPointer::GenericPointer, GenericPointer::GetParseErrorCode +*/ +enum PointerParseErrorCode { + kPointerParseErrorNone = 0, //!< The parse is successful + + kPointerParseErrorTokenMustBeginWithSolidus, //!< A token must begin with a '/' + kPointerParseErrorInvalidEscape, //!< Invalid escape + kPointerParseErrorInvalidPercentEncoding, //!< Invalid percent encoding in URI fragment + kPointerParseErrorCharacterMustPercentEncode //!< A character must percent encoded in URI fragment +}; + +/////////////////////////////////////////////////////////////////////////////// +// GenericPointer + +//! Represents a JSON Pointer. Use Pointer for UTF8 encoding and default allocator. +/*! + This class implements RFC 6901 "JavaScript Object Notation (JSON) Pointer" + (https://tools.ietf.org/html/rfc6901). + + A JSON pointer is for identifying a specific value in a JSON document + (GenericDocument). It can simplify coding of DOM tree manipulation, because it + can access multiple-level depth of DOM tree with single API call. + + After it parses a string representation (e.g. "/foo/0" or URI fragment + representation (e.g. "#/foo/0") into its internal representation (tokens), + it can be used to resolve a specific value in multiple documents, or sub-tree + of documents. + + Contrary to GenericValue, Pointer can be copy constructed and copy assigned. + Apart from assignment, a Pointer cannot be modified after construction. + + Although Pointer is very convenient, please aware that constructing Pointer + involves parsing and dynamic memory allocation. A special constructor with user- + supplied tokens eliminates these. + + GenericPointer depends on GenericDocument and GenericValue. + + \tparam ValueType The value type of the DOM tree. E.g. GenericValue > + \tparam Allocator The allocator type for allocating memory for internal representation. + + \note GenericPointer uses same encoding of ValueType. + However, Allocator of GenericPointer is independent of Allocator of Value. +*/ +template +class GenericPointer { +public: + typedef typename ValueType::EncodingType EncodingType; //!< Encoding type from Value + typedef typename ValueType::Ch Ch; //!< Character type from Value + + //! A token is the basic units of internal representation. + /*! + A JSON pointer string representation "/foo/123" is parsed to two tokens: + "foo" and 123. 123 will be represented in both numeric form and string form. + They are resolved according to the actual value type (object or array). + + For token that are not numbers, or the numeric value is out of bound + (greater than limits of SizeType), they are only treated as string form + (i.e. the token's index will be equal to kPointerInvalidIndex). + + This struct is public so that user can create a Pointer without parsing and + allocation, using a special constructor. + */ + struct Token { + const Ch* name; //!< Name of the token. It has null character at the end but it can contain null character. + SizeType length; //!< Length of the name. + SizeType index; //!< A valid array index, if it is not equal to kPointerInvalidIndex. + }; + + //!@name Constructors and destructor. + //@{ + + //! Default constructor. + GenericPointer(Allocator* allocator = 0) : allocator_(allocator), ownAllocator_(), nameBuffer_(), tokens_(), tokenCount_(), parseErrorOffset_(), parseErrorCode_(kPointerParseErrorNone) {} + + //! Constructor that parses a string or URI fragment representation. + /*! + \param source A null-terminated, string or URI fragment representation of JSON pointer. + \param allocator User supplied allocator for this pointer. If no allocator is provided, it creates a self-owned one. + */ + explicit GenericPointer(const Ch* source, Allocator* allocator = 0) : allocator_(allocator), ownAllocator_(), nameBuffer_(), tokens_(), tokenCount_(), parseErrorOffset_(), parseErrorCode_(kPointerParseErrorNone) { + Parse(source, internal::StrLen(source)); + } + +#if RAPIDJSON_HAS_STDSTRING + //! Constructor that parses a string or URI fragment representation. + /*! + \param source A string or URI fragment representation of JSON pointer. + \param allocator User supplied allocator for this pointer. If no allocator is provided, it creates a self-owned one. + \note Requires the definition of the preprocessor symbol \ref RAPIDJSON_HAS_STDSTRING. + */ + explicit GenericPointer(const std::basic_string& source, Allocator* allocator = 0) : allocator_(allocator), ownAllocator_(), nameBuffer_(), tokens_(), tokenCount_(), parseErrorOffset_(), parseErrorCode_(kPointerParseErrorNone) { + Parse(source.c_str(), source.size()); + } +#endif + + //! Constructor that parses a string or URI fragment representation, with length of the source string. + /*! + \param source A string or URI fragment representation of JSON pointer. + \param length Length of source. + \param allocator User supplied allocator for this pointer. If no allocator is provided, it creates a self-owned one. + \note Slightly faster than the overload without length. + */ + GenericPointer(const Ch* source, size_t length, Allocator* allocator = 0) : allocator_(allocator), ownAllocator_(), nameBuffer_(), tokens_(), tokenCount_(), parseErrorOffset_(), parseErrorCode_(kPointerParseErrorNone) { + Parse(source, length); + } + + //! Constructor with user-supplied tokens. + /*! + This constructor let user supplies const array of tokens. + This prevents the parsing process and eliminates allocation. + This is preferred for memory constrained environments. + + \param tokens An constant array of tokens representing the JSON pointer. + \param tokenCount Number of tokens. + + \b Example + \code + #define NAME(s) { s, sizeof(s) / sizeof(s[0]) - 1, kPointerInvalidIndex } + #define INDEX(i) { #i, sizeof(#i) - 1, i } + + static const Pointer::Token kTokens[] = { NAME("foo"), INDEX(123) }; + static const Pointer p(kTokens, sizeof(kTokens) / sizeof(kTokens[0])); + // Equivalent to static const Pointer p("/foo/123"); + + #undef NAME + #undef INDEX + \endcode + */ + GenericPointer(const Token* tokens, size_t tokenCount) : allocator_(), ownAllocator_(), nameBuffer_(), tokens_(const_cast(tokens)), tokenCount_(tokenCount), parseErrorOffset_(), parseErrorCode_(kPointerParseErrorNone) {} + + //! Copy constructor. + GenericPointer(const GenericPointer& rhs, Allocator* allocator = 0) : allocator_(allocator), ownAllocator_(), nameBuffer_(), tokens_(), tokenCount_(), parseErrorOffset_(), parseErrorCode_(kPointerParseErrorNone) { + *this = rhs; + } + + //! Destructor. + ~GenericPointer() { + if (nameBuffer_) // If user-supplied tokens constructor is used, nameBuffer_ is nullptr and tokens_ are not deallocated. + Allocator::Free(tokens_); + RAPIDJSON_DELETE(ownAllocator_); + } + + //! Assignment operator. + GenericPointer& operator=(const GenericPointer& rhs) { + if (this != &rhs) { + // Do not delete ownAllcator + if (nameBuffer_) + Allocator::Free(tokens_); + + tokenCount_ = rhs.tokenCount_; + parseErrorOffset_ = rhs.parseErrorOffset_; + parseErrorCode_ = rhs.parseErrorCode_; + + if (rhs.nameBuffer_) + CopyFromRaw(rhs); // Normally parsed tokens. + else { + tokens_ = rhs.tokens_; // User supplied const tokens. + nameBuffer_ = 0; + } + } + return *this; + } + + //@} + + //!@name Append token + //@{ + + //! Append a token and return a new Pointer + /*! + \param token Token to be appended. + \param allocator Allocator for the newly return Pointer. + \return A new Pointer with appended token. + */ + GenericPointer Append(const Token& token, Allocator* allocator = 0) const { + GenericPointer r; + r.allocator_ = allocator; + Ch *p = r.CopyFromRaw(*this, 1, token.length + 1); + std::memcpy(p, token.name, (token.length + 1) * sizeof(Ch)); + r.tokens_[tokenCount_].name = p; + r.tokens_[tokenCount_].length = token.length; + r.tokens_[tokenCount_].index = token.index; + return r; + } + + //! Append a name token with length, and return a new Pointer + /*! + \param name Name to be appended. + \param length Length of name. + \param allocator Allocator for the newly return Pointer. + \return A new Pointer with appended token. + */ + GenericPointer Append(const Ch* name, SizeType length, Allocator* allocator = 0) const { + Token token = { name, length, kPointerInvalidIndex }; + return Append(token, allocator); + } + + //! Append a name token without length, and return a new Pointer + /*! + \param name Name (const Ch*) to be appended. + \param allocator Allocator for the newly return Pointer. + \return A new Pointer with appended token. + */ + template + RAPIDJSON_DISABLEIF_RETURN((internal::NotExpr::Type, Ch> >), (GenericPointer)) + Append(T* name, Allocator* allocator = 0) const { + return Append(name, StrLen(name), allocator); + } + +#if RAPIDJSON_HAS_STDSTRING + //! Append a name token, and return a new Pointer + /*! + \param name Name to be appended. + \param allocator Allocator for the newly return Pointer. + \return A new Pointer with appended token. + */ + GenericPointer Append(const std::basic_string& name, Allocator* allocator = 0) const { + return Append(name.c_str(), static_cast(name.size()), allocator); + } +#endif + + //! Append a index token, and return a new Pointer + /*! + \param index Index to be appended. + \param allocator Allocator for the newly return Pointer. + \return A new Pointer with appended token. + */ + GenericPointer Append(SizeType index, Allocator* allocator = 0) const { + char buffer[21]; + char* end = sizeof(SizeType) == 4 ? internal::u32toa(index, buffer) : internal::u64toa(index, buffer); + SizeType length = static_cast(end - buffer); + buffer[length] = '\0'; + + if (sizeof(Ch) == 1) { + Token token = { reinterpret_cast(buffer), length, index }; + return Append(token, allocator); + } + else { + Ch name[21]; + for (size_t i = 0; i <= length; i++) + name[i] = buffer[i]; + Token token = { name, length, index }; + return Append(token, allocator); + } + } + + //! Append a token by value, and return a new Pointer + /*! + \param token token to be appended. + \param allocator Allocator for the newly return Pointer. + \return A new Pointer with appended token. + */ + GenericPointer Append(const ValueType& token, Allocator* allocator = 0) const { + if (token.IsString()) + return Append(token.GetString(), token.GetStringLength(), allocator); + else { + RAPIDJSON_ASSERT(token.IsUint64()); + RAPIDJSON_ASSERT(token.GetUint64() <= SizeType(~0)); + return Append(static_cast(token.GetUint64()), allocator); + } + } + + //!@name Handling Parse Error + //@{ + + //! Check whether this is a valid pointer. + bool IsValid() const { return parseErrorCode_ == kPointerParseErrorNone; } + + //! Get the parsing error offset in code unit. + size_t GetParseErrorOffset() const { return parseErrorOffset_; } + + //! Get the parsing error code. + PointerParseErrorCode GetParseErrorCode() const { return parseErrorCode_; } + + //@} + + //! Get the allocator of this pointer. + Allocator& GetAllocator() { return *allocator_; } + + //!@name Tokens + //@{ + + //! Get the token array (const version only). + const Token* GetTokens() const { return tokens_; } + + //! Get the number of tokens. + size_t GetTokenCount() const { return tokenCount_; } + + //@} + + //!@name Equality/inequality operators + //@{ + + //! Equality operator. + /*! + \note When any pointers are invalid, always returns false. + */ + bool operator==(const GenericPointer& rhs) const { + if (!IsValid() || !rhs.IsValid() || tokenCount_ != rhs.tokenCount_) + return false; + + for (size_t i = 0; i < tokenCount_; i++) { + if (tokens_[i].index != rhs.tokens_[i].index || + tokens_[i].length != rhs.tokens_[i].length || + (tokens_[i].length != 0 && std::memcmp(tokens_[i].name, rhs.tokens_[i].name, sizeof(Ch)* tokens_[i].length) != 0)) + { + return false; + } + } + + return true; + } + + //! Inequality operator. + /*! + \note When any pointers are invalid, always returns true. + */ + bool operator!=(const GenericPointer& rhs) const { return !(*this == rhs); } + + //@} + + //!@name Stringify + //@{ + + //! Stringify the pointer into string representation. + /*! + \tparam OutputStream Type of output stream. + \param os The output stream. + */ + template + bool Stringify(OutputStream& os) const { + return Stringify(os); + } + + //! Stringify the pointer into URI fragment representation. + /*! + \tparam OutputStream Type of output stream. + \param os The output stream. + */ + template + bool StringifyUriFragment(OutputStream& os) const { + return Stringify(os); + } + + //@} + + //!@name Create value + //@{ + + //! Create a value in a subtree. + /*! + If the value is not exist, it creates all parent values and a JSON Null value. + So it always succeed and return the newly created or existing value. + + Remind that it may change types of parents according to tokens, so it + potentially removes previously stored values. For example, if a document + was an array, and "/foo" is used to create a value, then the document + will be changed to an object, and all existing array elements are lost. + + \param root Root value of a DOM subtree to be resolved. It can be any value other than document root. + \param allocator Allocator for creating the values if the specified value or its parents are not exist. + \param alreadyExist If non-null, it stores whether the resolved value is already exist. + \return The resolved newly created (a JSON Null value), or already exists value. + */ + ValueType& Create(ValueType& root, typename ValueType::AllocatorType& allocator, bool* alreadyExist = 0) const { + RAPIDJSON_ASSERT(IsValid()); + ValueType* v = &root; + bool exist = true; + for (const Token *t = tokens_; t != tokens_ + tokenCount_; ++t) { + if (v->IsArray() && t->name[0] == '-' && t->length == 1) { + v->PushBack(ValueType().Move(), allocator); + v = &((*v)[v->Size() - 1]); + exist = false; + } + else { + if (t->index == kPointerInvalidIndex) { // must be object name + if (!v->IsObject()) + v->SetObject(); // Change to Object + } + else { // object name or array index + if (!v->IsArray() && !v->IsObject()) + v->SetArray(); // Change to Array + } + + if (v->IsArray()) { + if (t->index >= v->Size()) { + v->Reserve(t->index + 1, allocator); + while (t->index >= v->Size()) + v->PushBack(ValueType().Move(), allocator); + exist = false; + } + v = &((*v)[t->index]); + } + else { + typename ValueType::MemberIterator m = v->FindMember(GenericStringRef(t->name, t->length)); + if (m == v->MemberEnd()) { + v->AddMember(ValueType(t->name, t->length, allocator).Move(), ValueType().Move(), allocator); + v = &(--v->MemberEnd())->value; // Assumes AddMember() appends at the end + exist = false; + } + else + v = &m->value; + } + } + } + + if (alreadyExist) + *alreadyExist = exist; + + return *v; + } + + //! Creates a value in a document. + /*! + \param document A document to be resolved. + \param alreadyExist If non-null, it stores whether the resolved value is already exist. + \return The resolved newly created, or already exists value. + */ + template + ValueType& Create(GenericDocument& document, bool* alreadyExist = 0) const { + return Create(document, document.GetAllocator(), alreadyExist); + } + + //@} + + //!@name Query value + //@{ + + //! Query a value in a subtree. + /*! + \param root Root value of a DOM sub-tree to be resolved. It can be any value other than document root. + \param unresolvedTokenIndex If the pointer cannot resolve a token in the pointer, this parameter can obtain the index of unresolved token. + \return Pointer to the value if it can be resolved. Otherwise null. + + \note + There are only 3 situations when a value cannot be resolved: + 1. A value in the path is not an array nor object. + 2. An object value does not contain the token. + 3. A token is out of range of an array value. + + Use unresolvedTokenIndex to retrieve the token index. + */ + ValueType* Get(ValueType& root, size_t* unresolvedTokenIndex = 0) const { + RAPIDJSON_ASSERT(IsValid()); + ValueType* v = &root; + for (const Token *t = tokens_; t != tokens_ + tokenCount_; ++t) { + switch (v->GetType()) { + case kObjectType: + { + typename ValueType::MemberIterator m = v->FindMember(GenericStringRef(t->name, t->length)); + if (m == v->MemberEnd()) + break; + v = &m->value; + } + continue; + case kArrayType: + if (t->index == kPointerInvalidIndex || t->index >= v->Size()) + break; + v = &((*v)[t->index]); + continue; + default: + break; + } + + // Error: unresolved token + if (unresolvedTokenIndex) + *unresolvedTokenIndex = static_cast(t - tokens_); + return 0; + } + return v; + } + + //! Query a const value in a const subtree. + /*! + \param root Root value of a DOM sub-tree to be resolved. It can be any value other than document root. + \return Pointer to the value if it can be resolved. Otherwise null. + */ + const ValueType* Get(const ValueType& root, size_t* unresolvedTokenIndex = 0) const { + return Get(const_cast(root), unresolvedTokenIndex); + } + + //@} + + //!@name Query a value with default + //@{ + + //! Query a value in a subtree with default value. + /*! + Similar to Get(), but if the specified value do not exists, it creates all parents and clone the default value. + So that this function always succeed. + + \param root Root value of a DOM sub-tree to be resolved. It can be any value other than document root. + \param defaultValue Default value to be cloned if the value was not exists. + \param allocator Allocator for creating the values if the specified value or its parents are not exist. + \see Create() + */ + ValueType& GetWithDefault(ValueType& root, const ValueType& defaultValue, typename ValueType::AllocatorType& allocator) const { + bool alreadyExist; + Value& v = Create(root, allocator, &alreadyExist); + return alreadyExist ? v : v.CopyFrom(defaultValue, allocator); + } + + //! Query a value in a subtree with default null-terminated string. + ValueType& GetWithDefault(ValueType& root, const Ch* defaultValue, typename ValueType::AllocatorType& allocator) const { + bool alreadyExist; + Value& v = Create(root, allocator, &alreadyExist); + return alreadyExist ? v : v.SetString(defaultValue, allocator); + } + +#if RAPIDJSON_HAS_STDSTRING + //! Query a value in a subtree with default std::basic_string. + ValueType& GetWithDefault(ValueType& root, const std::basic_string& defaultValue, typename ValueType::AllocatorType& allocator) const { + bool alreadyExist; + Value& v = Create(root, allocator, &alreadyExist); + return alreadyExist ? v : v.SetString(defaultValue, allocator); + } +#endif + + //! Query a value in a subtree with default primitive value. + /*! + \tparam T Either \ref Type, \c int, \c unsigned, \c int64_t, \c uint64_t, \c bool + */ + template + RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr, internal::IsGenericValue >), (ValueType&)) + GetWithDefault(ValueType& root, T defaultValue, typename ValueType::AllocatorType& allocator) const { + return GetWithDefault(root, ValueType(defaultValue).Move(), allocator); + } + + //! Query a value in a document with default value. + template + ValueType& GetWithDefault(GenericDocument& document, const ValueType& defaultValue) const { + return GetWithDefault(document, defaultValue, document.GetAllocator()); + } + + //! Query a value in a document with default null-terminated string. + template + ValueType& GetWithDefault(GenericDocument& document, const Ch* defaultValue) const { + return GetWithDefault(document, defaultValue, document.GetAllocator()); + } + +#if RAPIDJSON_HAS_STDSTRING + //! Query a value in a document with default std::basic_string. + template + ValueType& GetWithDefault(GenericDocument& document, const std::basic_string& defaultValue) const { + return GetWithDefault(document, defaultValue, document.GetAllocator()); + } +#endif + + //! Query a value in a document with default primitive value. + /*! + \tparam T Either \ref Type, \c int, \c unsigned, \c int64_t, \c uint64_t, \c bool + */ + template + RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr, internal::IsGenericValue >), (ValueType&)) + GetWithDefault(GenericDocument& document, T defaultValue) const { + return GetWithDefault(document, defaultValue, document.GetAllocator()); + } + + //@} + + //!@name Set a value + //@{ + + //! Set a value in a subtree, with move semantics. + /*! + It creates all parents if they are not exist or types are different to the tokens. + So this function always succeeds but potentially remove existing values. + + \param root Root value of a DOM sub-tree to be resolved. It can be any value other than document root. + \param value Value to be set. + \param allocator Allocator for creating the values if the specified value or its parents are not exist. + \see Create() + */ + ValueType& Set(ValueType& root, ValueType& value, typename ValueType::AllocatorType& allocator) const { + return Create(root, allocator) = value; + } + + //! Set a value in a subtree, with copy semantics. + ValueType& Set(ValueType& root, const ValueType& value, typename ValueType::AllocatorType& allocator) const { + return Create(root, allocator).CopyFrom(value, allocator); + } + + //! Set a null-terminated string in a subtree. + ValueType& Set(ValueType& root, const Ch* value, typename ValueType::AllocatorType& allocator) const { + return Create(root, allocator) = ValueType(value, allocator).Move(); + } + +#if RAPIDJSON_HAS_STDSTRING + //! Set a std::basic_string in a subtree. + ValueType& Set(ValueType& root, const std::basic_string& value, typename ValueType::AllocatorType& allocator) const { + return Create(root, allocator) = ValueType(value, allocator).Move(); + } +#endif + + //! Set a primitive value in a subtree. + /*! + \tparam T Either \ref Type, \c int, \c unsigned, \c int64_t, \c uint64_t, \c bool + */ + template + RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr, internal::IsGenericValue >), (ValueType&)) + Set(ValueType& root, T value, typename ValueType::AllocatorType& allocator) const { + return Create(root, allocator) = ValueType(value).Move(); + } + + //! Set a value in a document, with move semantics. + template + ValueType& Set(GenericDocument& document, ValueType& value) const { + return Create(document) = value; + } + + //! Set a value in a document, with copy semantics. + template + ValueType& Set(GenericDocument& document, const ValueType& value) const { + return Create(document).CopyFrom(value, document.GetAllocator()); + } + + //! Set a null-terminated string in a document. + template + ValueType& Set(GenericDocument& document, const Ch* value) const { + return Create(document) = ValueType(value, document.GetAllocator()).Move(); + } + +#if RAPIDJSON_HAS_STDSTRING + //! Sets a std::basic_string in a document. + template + ValueType& Set(GenericDocument& document, const std::basic_string& value) const { + return Create(document) = ValueType(value, document.GetAllocator()).Move(); + } +#endif + + //! Set a primitive value in a document. + /*! + \tparam T Either \ref Type, \c int, \c unsigned, \c int64_t, \c uint64_t, \c bool + */ + template + RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr, internal::IsGenericValue >), (ValueType&)) + Set(GenericDocument& document, T value) const { + return Create(document) = value; + } + + //@} + + //!@name Swap a value + //@{ + + //! Swap a value with a value in a subtree. + /*! + It creates all parents if they are not exist or types are different to the tokens. + So this function always succeeds but potentially remove existing values. + + \param root Root value of a DOM sub-tree to be resolved. It can be any value other than document root. + \param value Value to be swapped. + \param allocator Allocator for creating the values if the specified value or its parents are not exist. + \see Create() + */ + ValueType& Swap(ValueType& root, ValueType& value, typename ValueType::AllocatorType& allocator) const { + return Create(root, allocator).Swap(value); + } + + //! Swap a value with a value in a document. + template + ValueType& Swap(GenericDocument& document, ValueType& value) const { + return Create(document).Swap(value); + } + + //@} + + //! Erase a value in a subtree. + /*! + \param root Root value of a DOM sub-tree to be resolved. It can be any value other than document root. + \return Whether the resolved value is found and erased. + + \note Erasing with an empty pointer \c Pointer(""), i.e. the root, always fail and return false. + */ + bool Erase(ValueType& root) const { + RAPIDJSON_ASSERT(IsValid()); + if (tokenCount_ == 0) // Cannot erase the root + return false; + + ValueType* v = &root; + const Token* last = tokens_ + (tokenCount_ - 1); + for (const Token *t = tokens_; t != last; ++t) { + switch (v->GetType()) { + case kObjectType: + { + typename ValueType::MemberIterator m = v->FindMember(GenericStringRef(t->name, t->length)); + if (m == v->MemberEnd()) + return false; + v = &m->value; + } + break; + case kArrayType: + if (t->index == kPointerInvalidIndex || t->index >= v->Size()) + return false; + v = &((*v)[t->index]); + break; + default: + return false; + } + } + + switch (v->GetType()) { + case kObjectType: + return v->EraseMember(GenericStringRef(last->name, last->length)); + case kArrayType: + if (last->index == kPointerInvalidIndex || last->index >= v->Size()) + return false; + v->Erase(v->Begin() + last->index); + return true; + default: + return false; + } + } + +private: + //! Clone the content from rhs to this. + /*! + \param rhs Source pointer. + \param extraToken Extra tokens to be allocated. + \param extraNameBufferSize Extra name buffer size (in number of Ch) to be allocated. + \return Start of non-occupied name buffer, for storing extra names. + */ + Ch* CopyFromRaw(const GenericPointer& rhs, size_t extraToken = 0, size_t extraNameBufferSize = 0) { + if (!allocator_) // allocator is independently owned. + ownAllocator_ = allocator_ = RAPIDJSON_NEW(Allocator()); + + size_t nameBufferSize = rhs.tokenCount_; // null terminators for tokens + for (Token *t = rhs.tokens_; t != rhs.tokens_ + rhs.tokenCount_; ++t) + nameBufferSize += t->length; + + tokenCount_ = rhs.tokenCount_ + extraToken; + tokens_ = static_cast(allocator_->Malloc(tokenCount_ * sizeof(Token) + (nameBufferSize + extraNameBufferSize) * sizeof(Ch))); + nameBuffer_ = reinterpret_cast(tokens_ + tokenCount_); + if (rhs.tokenCount_ > 0) { + std::memcpy(tokens_, rhs.tokens_, rhs.tokenCount_ * sizeof(Token)); + } + if (nameBufferSize > 0) { + std::memcpy(nameBuffer_, rhs.nameBuffer_, nameBufferSize * sizeof(Ch)); + } + + // Adjust pointers to name buffer + std::ptrdiff_t diff = nameBuffer_ - rhs.nameBuffer_; + for (Token *t = tokens_; t != tokens_ + rhs.tokenCount_; ++t) + t->name += diff; + + return nameBuffer_ + nameBufferSize; + } + + //! Check whether a character should be percent-encoded. + /*! + According to RFC 3986 2.3 Unreserved Characters. + \param c The character (code unit) to be tested. + */ + bool NeedPercentEncode(Ch c) const { + return !((c >= '0' && c <= '9') || (c >= 'A' && c <='Z') || (c >= 'a' && c <= 'z') || c == '-' || c == '.' || c == '_' || c =='~'); + } + + //! Parse a JSON String or its URI fragment representation into tokens. +#ifndef __clang__ // -Wdocumentation + /*! + \param source Either a JSON Pointer string, or its URI fragment representation. Not need to be null terminated. + \param length Length of the source string. + \note Source cannot be JSON String Representation of JSON Pointer, e.g. In "/\u0000", \u0000 will not be unescaped. + */ +#endif + void Parse(const Ch* source, size_t length) { + RAPIDJSON_ASSERT(source != NULL); + RAPIDJSON_ASSERT(nameBuffer_ == 0); + RAPIDJSON_ASSERT(tokens_ == 0); + + // Create own allocator if user did not supply. + if (!allocator_) + ownAllocator_ = allocator_ = RAPIDJSON_NEW(Allocator()); + + // Count number of '/' as tokenCount + tokenCount_ = 0; + for (const Ch* s = source; s != source + length; s++) + if (*s == '/') + tokenCount_++; + + Token* token = tokens_ = static_cast(allocator_->Malloc(tokenCount_ * sizeof(Token) + length * sizeof(Ch))); + Ch* name = nameBuffer_ = reinterpret_cast(tokens_ + tokenCount_); + size_t i = 0; + + // Detect if it is a URI fragment + bool uriFragment = false; + if (source[i] == '#') { + uriFragment = true; + i++; + } + + if (i != length && source[i] != '/') { + parseErrorCode_ = kPointerParseErrorTokenMustBeginWithSolidus; + goto error; + } + + while (i < length) { + RAPIDJSON_ASSERT(source[i] == '/'); + i++; // consumes '/' + + token->name = name; + bool isNumber = true; + + while (i < length && source[i] != '/') { + Ch c = source[i]; + if (uriFragment) { + // Decoding percent-encoding for URI fragment + if (c == '%') { + PercentDecodeStream is(&source[i], source + length); + GenericInsituStringStream os(name); + Ch* begin = os.PutBegin(); + if (!Transcoder, EncodingType>().Validate(is, os) || !is.IsValid()) { + parseErrorCode_ = kPointerParseErrorInvalidPercentEncoding; + goto error; + } + size_t len = os.PutEnd(begin); + i += is.Tell() - 1; + if (len == 1) + c = *name; + else { + name += len; + isNumber = false; + i++; + continue; + } + } + else if (NeedPercentEncode(c)) { + parseErrorCode_ = kPointerParseErrorCharacterMustPercentEncode; + goto error; + } + } + + i++; + + // Escaping "~0" -> '~', "~1" -> '/' + if (c == '~') { + if (i < length) { + c = source[i]; + if (c == '0') c = '~'; + else if (c == '1') c = '/'; + else { + parseErrorCode_ = kPointerParseErrorInvalidEscape; + goto error; + } + i++; + } + else { + parseErrorCode_ = kPointerParseErrorInvalidEscape; + goto error; + } + } + + // First check for index: all of characters are digit + if (c < '0' || c > '9') + isNumber = false; + + *name++ = c; + } + token->length = static_cast(name - token->name); + if (token->length == 0) + isNumber = false; + *name++ = '\0'; // Null terminator + + // Second check for index: more than one digit cannot have leading zero + if (isNumber && token->length > 1 && token->name[0] == '0') + isNumber = false; + + // String to SizeType conversion + SizeType n = 0; + if (isNumber) { + for (size_t j = 0; j < token->length; j++) { + SizeType m = n * 10 + static_cast(token->name[j] - '0'); + if (m < n) { // overflow detection + isNumber = false; + break; + } + n = m; + } + } + + token->index = isNumber ? n : kPointerInvalidIndex; + token++; + } + + RAPIDJSON_ASSERT(name <= nameBuffer_ + length); // Should not overflow buffer + parseErrorCode_ = kPointerParseErrorNone; + return; + + error: + Allocator::Free(tokens_); + nameBuffer_ = 0; + tokens_ = 0; + tokenCount_ = 0; + parseErrorOffset_ = i; + return; + } + + //! Stringify to string or URI fragment representation. + /*! + \tparam uriFragment True for stringifying to URI fragment representation. False for string representation. + \tparam OutputStream type of output stream. + \param os The output stream. + */ + template + bool Stringify(OutputStream& os) const { + RAPIDJSON_ASSERT(IsValid()); + + if (uriFragment) + os.Put('#'); + + for (Token *t = tokens_; t != tokens_ + tokenCount_; ++t) { + os.Put('/'); + for (size_t j = 0; j < t->length; j++) { + Ch c = t->name[j]; + if (c == '~') { + os.Put('~'); + os.Put('0'); + } + else if (c == '/') { + os.Put('~'); + os.Put('1'); + } + else if (uriFragment && NeedPercentEncode(c)) { + // Transcode to UTF8 sequence + GenericStringStream source(&t->name[j]); + PercentEncodeStream target(os); + if (!Transcoder >().Validate(source, target)) + return false; + j += source.Tell() - 1; + } + else + os.Put(c); + } + } + return true; + } + + //! A helper stream for decoding a percent-encoded sequence into code unit. + /*! + This stream decodes %XY triplet into code unit (0-255). + If it encounters invalid characters, it sets output code unit as 0 and + mark invalid, and to be checked by IsValid(). + */ + class PercentDecodeStream { + public: + typedef typename ValueType::Ch Ch; + + //! Constructor + /*! + \param source Start of the stream + \param end Past-the-end of the stream. + */ + PercentDecodeStream(const Ch* source, const Ch* end) : src_(source), head_(source), end_(end), valid_(true) {} + + Ch Take() { + if (*src_ != '%' || src_ + 3 > end_) { // %XY triplet + valid_ = false; + return 0; + } + src_++; + Ch c = 0; + for (int j = 0; j < 2; j++) { + c = static_cast(c << 4); + Ch h = *src_; + if (h >= '0' && h <= '9') c = static_cast(c + h - '0'); + else if (h >= 'A' && h <= 'F') c = static_cast(c + h - 'A' + 10); + else if (h >= 'a' && h <= 'f') c = static_cast(c + h - 'a' + 10); + else { + valid_ = false; + return 0; + } + src_++; + } + return c; + } + + size_t Tell() const { return static_cast(src_ - head_); } + bool IsValid() const { return valid_; } + + private: + const Ch* src_; //!< Current read position. + const Ch* head_; //!< Original head of the string. + const Ch* end_; //!< Past-the-end position. + bool valid_; //!< Whether the parsing is valid. + }; + + //! A helper stream to encode character (UTF-8 code unit) into percent-encoded sequence. + template + class PercentEncodeStream { + public: + PercentEncodeStream(OutputStream& os) : os_(os) {} + void Put(char c) { // UTF-8 must be byte + unsigned char u = static_cast(c); + static const char hexDigits[16] = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F' }; + os_.Put('%'); + os_.Put(hexDigits[u >> 4]); + os_.Put(hexDigits[u & 15]); + } + private: + OutputStream& os_; + }; + + Allocator* allocator_; //!< The current allocator. It is either user-supplied or equal to ownAllocator_. + Allocator* ownAllocator_; //!< Allocator owned by this Pointer. + Ch* nameBuffer_; //!< A buffer containing all names in tokens. + Token* tokens_; //!< A list of tokens. + size_t tokenCount_; //!< Number of tokens in tokens_. + size_t parseErrorOffset_; //!< Offset in code unit when parsing fail. + PointerParseErrorCode parseErrorCode_; //!< Parsing error code. +}; + +//! GenericPointer for Value (UTF-8, default allocator). +typedef GenericPointer Pointer; + +//!@name Helper functions for GenericPointer +//@{ + +////////////////////////////////////////////////////////////////////////////// + +template +typename T::ValueType& CreateValueByPointer(T& root, const GenericPointer& pointer, typename T::AllocatorType& a) { + return pointer.Create(root, a); +} + +template +typename T::ValueType& CreateValueByPointer(T& root, const CharType(&source)[N], typename T::AllocatorType& a) { + return GenericPointer(source, N - 1).Create(root, a); +} + +// No allocator parameter + +template +typename DocumentType::ValueType& CreateValueByPointer(DocumentType& document, const GenericPointer& pointer) { + return pointer.Create(document); +} + +template +typename DocumentType::ValueType& CreateValueByPointer(DocumentType& document, const CharType(&source)[N]) { + return GenericPointer(source, N - 1).Create(document); +} + +////////////////////////////////////////////////////////////////////////////// + +template +typename T::ValueType* GetValueByPointer(T& root, const GenericPointer& pointer, size_t* unresolvedTokenIndex = 0) { + return pointer.Get(root, unresolvedTokenIndex); +} + +template +const typename T::ValueType* GetValueByPointer(const T& root, const GenericPointer& pointer, size_t* unresolvedTokenIndex = 0) { + return pointer.Get(root, unresolvedTokenIndex); +} + +template +typename T::ValueType* GetValueByPointer(T& root, const CharType (&source)[N], size_t* unresolvedTokenIndex = 0) { + return GenericPointer(source, N - 1).Get(root, unresolvedTokenIndex); +} + +template +const typename T::ValueType* GetValueByPointer(const T& root, const CharType(&source)[N], size_t* unresolvedTokenIndex = 0) { + return GenericPointer(source, N - 1).Get(root, unresolvedTokenIndex); +} + +////////////////////////////////////////////////////////////////////////////// + +template +typename T::ValueType& GetValueByPointerWithDefault(T& root, const GenericPointer& pointer, const typename T::ValueType& defaultValue, typename T::AllocatorType& a) { + return pointer.GetWithDefault(root, defaultValue, a); +} + +template +typename T::ValueType& GetValueByPointerWithDefault(T& root, const GenericPointer& pointer, const typename T::Ch* defaultValue, typename T::AllocatorType& a) { + return pointer.GetWithDefault(root, defaultValue, a); +} + +#if RAPIDJSON_HAS_STDSTRING +template +typename T::ValueType& GetValueByPointerWithDefault(T& root, const GenericPointer& pointer, const std::basic_string& defaultValue, typename T::AllocatorType& a) { + return pointer.GetWithDefault(root, defaultValue, a); +} +#endif + +template +RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr, internal::IsGenericValue >), (typename T::ValueType&)) +GetValueByPointerWithDefault(T& root, const GenericPointer& pointer, T2 defaultValue, typename T::AllocatorType& a) { + return pointer.GetWithDefault(root, defaultValue, a); +} + +template +typename T::ValueType& GetValueByPointerWithDefault(T& root, const CharType(&source)[N], const typename T::ValueType& defaultValue, typename T::AllocatorType& a) { + return GenericPointer(source, N - 1).GetWithDefault(root, defaultValue, a); +} + +template +typename T::ValueType& GetValueByPointerWithDefault(T& root, const CharType(&source)[N], const typename T::Ch* defaultValue, typename T::AllocatorType& a) { + return GenericPointer(source, N - 1).GetWithDefault(root, defaultValue, a); +} + +#if RAPIDJSON_HAS_STDSTRING +template +typename T::ValueType& GetValueByPointerWithDefault(T& root, const CharType(&source)[N], const std::basic_string& defaultValue, typename T::AllocatorType& a) { + return GenericPointer(source, N - 1).GetWithDefault(root, defaultValue, a); +} +#endif + +template +RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr, internal::IsGenericValue >), (typename T::ValueType&)) +GetValueByPointerWithDefault(T& root, const CharType(&source)[N], T2 defaultValue, typename T::AllocatorType& a) { + return GenericPointer(source, N - 1).GetWithDefault(root, defaultValue, a); +} + +// No allocator parameter + +template +typename DocumentType::ValueType& GetValueByPointerWithDefault(DocumentType& document, const GenericPointer& pointer, const typename DocumentType::ValueType& defaultValue) { + return pointer.GetWithDefault(document, defaultValue); +} + +template +typename DocumentType::ValueType& GetValueByPointerWithDefault(DocumentType& document, const GenericPointer& pointer, const typename DocumentType::Ch* defaultValue) { + return pointer.GetWithDefault(document, defaultValue); +} + +#if RAPIDJSON_HAS_STDSTRING +template +typename DocumentType::ValueType& GetValueByPointerWithDefault(DocumentType& document, const GenericPointer& pointer, const std::basic_string& defaultValue) { + return pointer.GetWithDefault(document, defaultValue); +} +#endif + +template +RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr, internal::IsGenericValue >), (typename DocumentType::ValueType&)) +GetValueByPointerWithDefault(DocumentType& document, const GenericPointer& pointer, T2 defaultValue) { + return pointer.GetWithDefault(document, defaultValue); +} + +template +typename DocumentType::ValueType& GetValueByPointerWithDefault(DocumentType& document, const CharType(&source)[N], const typename DocumentType::ValueType& defaultValue) { + return GenericPointer(source, N - 1).GetWithDefault(document, defaultValue); +} + +template +typename DocumentType::ValueType& GetValueByPointerWithDefault(DocumentType& document, const CharType(&source)[N], const typename DocumentType::Ch* defaultValue) { + return GenericPointer(source, N - 1).GetWithDefault(document, defaultValue); +} + +#if RAPIDJSON_HAS_STDSTRING +template +typename DocumentType::ValueType& GetValueByPointerWithDefault(DocumentType& document, const CharType(&source)[N], const std::basic_string& defaultValue) { + return GenericPointer(source, N - 1).GetWithDefault(document, defaultValue); +} +#endif + +template +RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr, internal::IsGenericValue >), (typename DocumentType::ValueType&)) +GetValueByPointerWithDefault(DocumentType& document, const CharType(&source)[N], T2 defaultValue) { + return GenericPointer(source, N - 1).GetWithDefault(document, defaultValue); +} + +////////////////////////////////////////////////////////////////////////////// + +template +typename T::ValueType& SetValueByPointer(T& root, const GenericPointer& pointer, typename T::ValueType& value, typename T::AllocatorType& a) { + return pointer.Set(root, value, a); +} + +template +typename T::ValueType& SetValueByPointer(T& root, const GenericPointer& pointer, const typename T::ValueType& value, typename T::AllocatorType& a) { + return pointer.Set(root, value, a); +} + +template +typename T::ValueType& SetValueByPointer(T& root, const GenericPointer& pointer, const typename T::Ch* value, typename T::AllocatorType& a) { + return pointer.Set(root, value, a); +} + +#if RAPIDJSON_HAS_STDSTRING +template +typename T::ValueType& SetValueByPointer(T& root, const GenericPointer& pointer, const std::basic_string& value, typename T::AllocatorType& a) { + return pointer.Set(root, value, a); +} +#endif + +template +RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr, internal::IsGenericValue >), (typename T::ValueType&)) +SetValueByPointer(T& root, const GenericPointer& pointer, T2 value, typename T::AllocatorType& a) { + return pointer.Set(root, value, a); +} + +template +typename T::ValueType& SetValueByPointer(T& root, const CharType(&source)[N], typename T::ValueType& value, typename T::AllocatorType& a) { + return GenericPointer(source, N - 1).Set(root, value, a); +} + +template +typename T::ValueType& SetValueByPointer(T& root, const CharType(&source)[N], const typename T::ValueType& value, typename T::AllocatorType& a) { + return GenericPointer(source, N - 1).Set(root, value, a); +} + +template +typename T::ValueType& SetValueByPointer(T& root, const CharType(&source)[N], const typename T::Ch* value, typename T::AllocatorType& a) { + return GenericPointer(source, N - 1).Set(root, value, a); +} + +#if RAPIDJSON_HAS_STDSTRING +template +typename T::ValueType& SetValueByPointer(T& root, const CharType(&source)[N], const std::basic_string& value, typename T::AllocatorType& a) { + return GenericPointer(source, N - 1).Set(root, value, a); +} +#endif + +template +RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr, internal::IsGenericValue >), (typename T::ValueType&)) +SetValueByPointer(T& root, const CharType(&source)[N], T2 value, typename T::AllocatorType& a) { + return GenericPointer(source, N - 1).Set(root, value, a); +} + +// No allocator parameter + +template +typename DocumentType::ValueType& SetValueByPointer(DocumentType& document, const GenericPointer& pointer, typename DocumentType::ValueType& value) { + return pointer.Set(document, value); +} + +template +typename DocumentType::ValueType& SetValueByPointer(DocumentType& document, const GenericPointer& pointer, const typename DocumentType::ValueType& value) { + return pointer.Set(document, value); +} + +template +typename DocumentType::ValueType& SetValueByPointer(DocumentType& document, const GenericPointer& pointer, const typename DocumentType::Ch* value) { + return pointer.Set(document, value); +} + +#if RAPIDJSON_HAS_STDSTRING +template +typename DocumentType::ValueType& SetValueByPointer(DocumentType& document, const GenericPointer& pointer, const std::basic_string& value) { + return pointer.Set(document, value); +} +#endif + +template +RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr, internal::IsGenericValue >), (typename DocumentType::ValueType&)) +SetValueByPointer(DocumentType& document, const GenericPointer& pointer, T2 value) { + return pointer.Set(document, value); +} + +template +typename DocumentType::ValueType& SetValueByPointer(DocumentType& document, const CharType(&source)[N], typename DocumentType::ValueType& value) { + return GenericPointer(source, N - 1).Set(document, value); +} + +template +typename DocumentType::ValueType& SetValueByPointer(DocumentType& document, const CharType(&source)[N], const typename DocumentType::ValueType& value) { + return GenericPointer(source, N - 1).Set(document, value); +} + +template +typename DocumentType::ValueType& SetValueByPointer(DocumentType& document, const CharType(&source)[N], const typename DocumentType::Ch* value) { + return GenericPointer(source, N - 1).Set(document, value); +} + +#if RAPIDJSON_HAS_STDSTRING +template +typename DocumentType::ValueType& SetValueByPointer(DocumentType& document, const CharType(&source)[N], const std::basic_string& value) { + return GenericPointer(source, N - 1).Set(document, value); +} +#endif + +template +RAPIDJSON_DISABLEIF_RETURN((internal::OrExpr, internal::IsGenericValue >), (typename DocumentType::ValueType&)) +SetValueByPointer(DocumentType& document, const CharType(&source)[N], T2 value) { + return GenericPointer(source, N - 1).Set(document, value); +} + +////////////////////////////////////////////////////////////////////////////// + +template +typename T::ValueType& SwapValueByPointer(T& root, const GenericPointer& pointer, typename T::ValueType& value, typename T::AllocatorType& a) { + return pointer.Swap(root, value, a); +} + +template +typename T::ValueType& SwapValueByPointer(T& root, const CharType(&source)[N], typename T::ValueType& value, typename T::AllocatorType& a) { + return GenericPointer(source, N - 1).Swap(root, value, a); +} + +template +typename DocumentType::ValueType& SwapValueByPointer(DocumentType& document, const GenericPointer& pointer, typename DocumentType::ValueType& value) { + return pointer.Swap(document, value); +} + +template +typename DocumentType::ValueType& SwapValueByPointer(DocumentType& document, const CharType(&source)[N], typename DocumentType::ValueType& value) { + return GenericPointer(source, N - 1).Swap(document, value); +} + +////////////////////////////////////////////////////////////////////////////// + +template +bool EraseValueByPointer(T& root, const GenericPointer& pointer) { + return pointer.Erase(root); +} + +template +bool EraseValueByPointer(T& root, const CharType(&source)[N]) { + return GenericPointer(source, N - 1).Erase(root); +} + +//@} + +RAPIDJSON_NAMESPACE_END + +#ifdef __clang__ +RAPIDJSON_DIAG_POP +#endif + +#ifdef _MSC_VER +RAPIDJSON_DIAG_POP +#endif + +#endif // RAPIDJSON_POINTER_H_ diff --git a/contrib/rapidjson/1.1.0/include/rapidjson/prettywriter.h b/contrib/rapidjson/1.1.0/include/rapidjson/prettywriter.h new file mode 100644 index 0000000..0dcb0fe --- /dev/null +++ b/contrib/rapidjson/1.1.0/include/rapidjson/prettywriter.h @@ -0,0 +1,255 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_PRETTYWRITER_H_ +#define RAPIDJSON_PRETTYWRITER_H_ + +#include "writer.h" + +#ifdef __GNUC__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(effc++) +#endif + +RAPIDJSON_NAMESPACE_BEGIN + +//! Combination of PrettyWriter format flags. +/*! \see PrettyWriter::SetFormatOptions + */ +enum PrettyFormatOptions { + kFormatDefault = 0, //!< Default pretty formatting. + kFormatSingleLineArray = 1 //!< Format arrays on a single line. +}; + +//! Writer with indentation and spacing. +/*! + \tparam OutputStream Type of ouptut os. + \tparam SourceEncoding Encoding of source string. + \tparam TargetEncoding Encoding of output stream. + \tparam StackAllocator Type of allocator for allocating memory of stack. +*/ +template, typename TargetEncoding = UTF8<>, typename StackAllocator = CrtAllocator, unsigned writeFlags = kWriteDefaultFlags> +class PrettyWriter : public Writer { +public: + typedef Writer Base; + typedef typename Base::Ch Ch; + + //! Constructor + /*! \param os Output stream. + \param allocator User supplied allocator. If it is null, it will create a private one. + \param levelDepth Initial capacity of stack. + */ + explicit PrettyWriter(OutputStream& os, StackAllocator* allocator = 0, size_t levelDepth = Base::kDefaultLevelDepth) : + Base(os, allocator, levelDepth), indentChar_(' '), indentCharCount_(4), formatOptions_(kFormatDefault) {} + + + explicit PrettyWriter(StackAllocator* allocator = 0, size_t levelDepth = Base::kDefaultLevelDepth) : + Base(allocator, levelDepth), indentChar_(' '), indentCharCount_(4) {} + + //! Set custom indentation. + /*! \param indentChar Character for indentation. Must be whitespace character (' ', '\\t', '\\n', '\\r'). + \param indentCharCount Number of indent characters for each indentation level. + \note The default indentation is 4 spaces. + */ + PrettyWriter& SetIndent(Ch indentChar, unsigned indentCharCount) { + RAPIDJSON_ASSERT(indentChar == ' ' || indentChar == '\t' || indentChar == '\n' || indentChar == '\r'); + indentChar_ = indentChar; + indentCharCount_ = indentCharCount; + return *this; + } + + //! Set pretty writer formatting options. + /*! \param options Formatting options. + */ + PrettyWriter& SetFormatOptions(PrettyFormatOptions options) { + formatOptions_ = options; + return *this; + } + + /*! @name Implementation of Handler + \see Handler + */ + //@{ + + bool Null() { PrettyPrefix(kNullType); return Base::WriteNull(); } + bool Bool(bool b) { PrettyPrefix(b ? kTrueType : kFalseType); return Base::WriteBool(b); } + bool Int(int i) { PrettyPrefix(kNumberType); return Base::WriteInt(i); } + bool Uint(unsigned u) { PrettyPrefix(kNumberType); return Base::WriteUint(u); } + bool Int64(int64_t i64) { PrettyPrefix(kNumberType); return Base::WriteInt64(i64); } + bool Uint64(uint64_t u64) { PrettyPrefix(kNumberType); return Base::WriteUint64(u64); } + bool Double(double d) { PrettyPrefix(kNumberType); return Base::WriteDouble(d); } + + bool RawNumber(const Ch* str, SizeType length, bool copy = false) { + (void)copy; + PrettyPrefix(kNumberType); + return Base::WriteString(str, length); + } + + bool String(const Ch* str, SizeType length, bool copy = false) { + (void)copy; + PrettyPrefix(kStringType); + return Base::WriteString(str, length); + } + +#if RAPIDJSON_HAS_STDSTRING + bool String(const std::basic_string& str) { + return String(str.data(), SizeType(str.size())); + } +#endif + + bool StartObject() { + PrettyPrefix(kObjectType); + new (Base::level_stack_.template Push()) typename Base::Level(false); + return Base::WriteStartObject(); + } + + bool Key(const Ch* str, SizeType length, bool copy = false) { return String(str, length, copy); } + +#if RAPIDJSON_HAS_STDSTRING + bool Key(const std::basic_string& str) { + return Key(str.data(), SizeType(str.size())); + } +#endif + + bool EndObject(SizeType memberCount = 0) { + (void)memberCount; + RAPIDJSON_ASSERT(Base::level_stack_.GetSize() >= sizeof(typename Base::Level)); + RAPIDJSON_ASSERT(!Base::level_stack_.template Top()->inArray); + bool empty = Base::level_stack_.template Pop(1)->valueCount == 0; + + if (!empty) { + Base::os_->Put('\n'); + WriteIndent(); + } + bool ret = Base::WriteEndObject(); + (void)ret; + RAPIDJSON_ASSERT(ret == true); + if (Base::level_stack_.Empty()) // end of json text + Base::os_->Flush(); + return true; + } + + bool StartArray() { + PrettyPrefix(kArrayType); + new (Base::level_stack_.template Push()) typename Base::Level(true); + return Base::WriteStartArray(); + } + + bool EndArray(SizeType memberCount = 0) { + (void)memberCount; + RAPIDJSON_ASSERT(Base::level_stack_.GetSize() >= sizeof(typename Base::Level)); + RAPIDJSON_ASSERT(Base::level_stack_.template Top()->inArray); + bool empty = Base::level_stack_.template Pop(1)->valueCount == 0; + + if (!empty && !(formatOptions_ & kFormatSingleLineArray)) { + Base::os_->Put('\n'); + WriteIndent(); + } + bool ret = Base::WriteEndArray(); + (void)ret; + RAPIDJSON_ASSERT(ret == true); + if (Base::level_stack_.Empty()) // end of json text + Base::os_->Flush(); + return true; + } + + //@} + + /*! @name Convenience extensions */ + //@{ + + //! Simpler but slower overload. + bool String(const Ch* str) { return String(str, internal::StrLen(str)); } + bool Key(const Ch* str) { return Key(str, internal::StrLen(str)); } + + //@} + + //! Write a raw JSON value. + /*! + For user to write a stringified JSON as a value. + + \param json A well-formed JSON value. It should not contain null character within [0, length - 1] range. + \param length Length of the json. + \param type Type of the root of json. + \note When using PrettyWriter::RawValue(), the result json may not be indented correctly. + */ + bool RawValue(const Ch* json, size_t length, Type type) { PrettyPrefix(type); return Base::WriteRawValue(json, length); } + +protected: + void PrettyPrefix(Type type) { + (void)type; + if (Base::level_stack_.GetSize() != 0) { // this value is not at root + typename Base::Level* level = Base::level_stack_.template Top(); + + if (level->inArray) { + if (level->valueCount > 0) { + Base::os_->Put(','); // add comma if it is not the first element in array + if (formatOptions_ & kFormatSingleLineArray) + Base::os_->Put(' '); + } + + if (!(formatOptions_ & kFormatSingleLineArray)) { + Base::os_->Put('\n'); + WriteIndent(); + } + } + else { // in object + if (level->valueCount > 0) { + if (level->valueCount % 2 == 0) { + Base::os_->Put(','); + Base::os_->Put('\n'); + } + else { + Base::os_->Put(':'); + Base::os_->Put(' '); + } + } + else + Base::os_->Put('\n'); + + if (level->valueCount % 2 == 0) + WriteIndent(); + } + if (!level->inArray && level->valueCount % 2 == 0) + RAPIDJSON_ASSERT(type == kStringType); // if it's in object, then even number should be a name + level->valueCount++; + } + else { + RAPIDJSON_ASSERT(!Base::hasRoot_); // Should only has one and only one root. + Base::hasRoot_ = true; + } + } + + void WriteIndent() { + size_t count = (Base::level_stack_.GetSize() / sizeof(typename Base::Level)) * indentCharCount_; + PutN(*Base::os_, static_cast(indentChar_), count); + } + + Ch indentChar_; + unsigned indentCharCount_; + PrettyFormatOptions formatOptions_; + +private: + // Prohibit copy constructor & assignment operator. + PrettyWriter(const PrettyWriter&); + PrettyWriter& operator=(const PrettyWriter&); +}; + +RAPIDJSON_NAMESPACE_END + +#ifdef __GNUC__ +RAPIDJSON_DIAG_POP +#endif + +#endif // RAPIDJSON_RAPIDJSON_H_ diff --git a/contrib/rapidjson/1.1.0/include/rapidjson/rapidjson.h b/contrib/rapidjson/1.1.0/include/rapidjson/rapidjson.h new file mode 100644 index 0000000..053b2ce --- /dev/null +++ b/contrib/rapidjson/1.1.0/include/rapidjson/rapidjson.h @@ -0,0 +1,615 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_RAPIDJSON_H_ +#define RAPIDJSON_RAPIDJSON_H_ + +/*!\file rapidjson.h + \brief common definitions and configuration + + \see RAPIDJSON_CONFIG + */ + +/*! \defgroup RAPIDJSON_CONFIG RapidJSON configuration + \brief Configuration macros for library features + + Some RapidJSON features are configurable to adapt the library to a wide + variety of platforms, environments and usage scenarios. Most of the + features can be configured in terms of overriden or predefined + preprocessor macros at compile-time. + + Some additional customization is available in the \ref RAPIDJSON_ERRORS APIs. + + \note These macros should be given on the compiler command-line + (where applicable) to avoid inconsistent values when compiling + different translation units of a single application. + */ + +#include // malloc(), realloc(), free(), size_t +#include // memset(), memcpy(), memmove(), memcmp() + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_VERSION_STRING +// +// ALWAYS synchronize the following 3 macros with corresponding variables in /CMakeLists.txt. +// + +//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN +// token stringification +#define RAPIDJSON_STRINGIFY(x) RAPIDJSON_DO_STRINGIFY(x) +#define RAPIDJSON_DO_STRINGIFY(x) #x +//!@endcond + +/*! \def RAPIDJSON_MAJOR_VERSION + \ingroup RAPIDJSON_CONFIG + \brief Major version of RapidJSON in integer. +*/ +/*! \def RAPIDJSON_MINOR_VERSION + \ingroup RAPIDJSON_CONFIG + \brief Minor version of RapidJSON in integer. +*/ +/*! \def RAPIDJSON_PATCH_VERSION + \ingroup RAPIDJSON_CONFIG + \brief Patch version of RapidJSON in integer. +*/ +/*! \def RAPIDJSON_VERSION_STRING + \ingroup RAPIDJSON_CONFIG + \brief Version of RapidJSON in ".." string format. +*/ +#define RAPIDJSON_MAJOR_VERSION 1 +#define RAPIDJSON_MINOR_VERSION 1 +#define RAPIDJSON_PATCH_VERSION 0 +#define RAPIDJSON_VERSION_STRING \ + RAPIDJSON_STRINGIFY(RAPIDJSON_MAJOR_VERSION.RAPIDJSON_MINOR_VERSION.RAPIDJSON_PATCH_VERSION) + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_NAMESPACE_(BEGIN|END) +/*! \def RAPIDJSON_NAMESPACE + \ingroup RAPIDJSON_CONFIG + \brief provide custom rapidjson namespace + + In order to avoid symbol clashes and/or "One Definition Rule" errors + between multiple inclusions of (different versions of) RapidJSON in + a single binary, users can customize the name of the main RapidJSON + namespace. + + In case of a single nesting level, defining \c RAPIDJSON_NAMESPACE + to a custom name (e.g. \c MyRapidJSON) is sufficient. If multiple + levels are needed, both \ref RAPIDJSON_NAMESPACE_BEGIN and \ref + RAPIDJSON_NAMESPACE_END need to be defined as well: + + \code + // in some .cpp file + #define RAPIDJSON_NAMESPACE my::rapidjson + #define RAPIDJSON_NAMESPACE_BEGIN namespace my { namespace rapidjson { + #define RAPIDJSON_NAMESPACE_END } } + #include "rapidjson/..." + \endcode + + \see rapidjson + */ +/*! \def RAPIDJSON_NAMESPACE_BEGIN + \ingroup RAPIDJSON_CONFIG + \brief provide custom rapidjson namespace (opening expression) + \see RAPIDJSON_NAMESPACE +*/ +/*! \def RAPIDJSON_NAMESPACE_END + \ingroup RAPIDJSON_CONFIG + \brief provide custom rapidjson namespace (closing expression) + \see RAPIDJSON_NAMESPACE +*/ +#ifndef RAPIDJSON_NAMESPACE +#define RAPIDJSON_NAMESPACE rapidjson +#endif +#ifndef RAPIDJSON_NAMESPACE_BEGIN +#define RAPIDJSON_NAMESPACE_BEGIN namespace RAPIDJSON_NAMESPACE { +#endif +#ifndef RAPIDJSON_NAMESPACE_END +#define RAPIDJSON_NAMESPACE_END } +#endif + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_HAS_STDSTRING + +#ifndef RAPIDJSON_HAS_STDSTRING +#ifdef RAPIDJSON_DOXYGEN_RUNNING +#define RAPIDJSON_HAS_STDSTRING 1 // force generation of documentation +#else +#define RAPIDJSON_HAS_STDSTRING 0 // no std::string support by default +#endif +/*! \def RAPIDJSON_HAS_STDSTRING + \ingroup RAPIDJSON_CONFIG + \brief Enable RapidJSON support for \c std::string + + By defining this preprocessor symbol to \c 1, several convenience functions for using + \ref rapidjson::GenericValue with \c std::string are enabled, especially + for construction and comparison. + + \hideinitializer +*/ +#endif // !defined(RAPIDJSON_HAS_STDSTRING) + +#if RAPIDJSON_HAS_STDSTRING +#include +#endif // RAPIDJSON_HAS_STDSTRING + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_NO_INT64DEFINE + +/*! \def RAPIDJSON_NO_INT64DEFINE + \ingroup RAPIDJSON_CONFIG + \brief Use external 64-bit integer types. + + RapidJSON requires the 64-bit integer types \c int64_t and \c uint64_t types + to be available at global scope. + + If users have their own definition, define RAPIDJSON_NO_INT64DEFINE to + prevent RapidJSON from defining its own types. +*/ +#ifndef RAPIDJSON_NO_INT64DEFINE +//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN +#if defined(_MSC_VER) && (_MSC_VER < 1800) // Visual Studio 2013 +#include "msinttypes/stdint.h" +#include "msinttypes/inttypes.h" +#else +// Other compilers should have this. +#include +#include +#endif +//!@endcond +#ifdef RAPIDJSON_DOXYGEN_RUNNING +#define RAPIDJSON_NO_INT64DEFINE +#endif +#endif // RAPIDJSON_NO_INT64TYPEDEF + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_FORCEINLINE + +#ifndef RAPIDJSON_FORCEINLINE +//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN +#if defined(_MSC_VER) && defined(NDEBUG) +#define RAPIDJSON_FORCEINLINE __forceinline +#elif defined(__GNUC__) && __GNUC__ >= 4 && defined(NDEBUG) +#define RAPIDJSON_FORCEINLINE __attribute__((always_inline)) +#else +#define RAPIDJSON_FORCEINLINE +#endif +//!@endcond +#endif // RAPIDJSON_FORCEINLINE + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_ENDIAN +#define RAPIDJSON_LITTLEENDIAN 0 //!< Little endian machine +#define RAPIDJSON_BIGENDIAN 1 //!< Big endian machine + +//! Endianness of the machine. +/*! + \def RAPIDJSON_ENDIAN + \ingroup RAPIDJSON_CONFIG + + GCC 4.6 provided macro for detecting endianness of the target machine. But other + compilers may not have this. User can define RAPIDJSON_ENDIAN to either + \ref RAPIDJSON_LITTLEENDIAN or \ref RAPIDJSON_BIGENDIAN. + + Default detection implemented with reference to + \li https://gcc.gnu.org/onlinedocs/gcc-4.6.0/cpp/Common-Predefined-Macros.html + \li http://www.boost.org/doc/libs/1_42_0/boost/detail/endian.hpp +*/ +#ifndef RAPIDJSON_ENDIAN +// Detect with GCC 4.6's macro +# ifdef __BYTE_ORDER__ +# if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ +# define RAPIDJSON_ENDIAN RAPIDJSON_LITTLEENDIAN +# elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ +# define RAPIDJSON_ENDIAN RAPIDJSON_BIGENDIAN +# else +# error Unknown machine endianess detected. User needs to define RAPIDJSON_ENDIAN. +# endif // __BYTE_ORDER__ +// Detect with GLIBC's endian.h +# elif defined(__GLIBC__) +# include +# if (__BYTE_ORDER == __LITTLE_ENDIAN) +# define RAPIDJSON_ENDIAN RAPIDJSON_LITTLEENDIAN +# elif (__BYTE_ORDER == __BIG_ENDIAN) +# define RAPIDJSON_ENDIAN RAPIDJSON_BIGENDIAN +# else +# error Unknown machine endianess detected. User needs to define RAPIDJSON_ENDIAN. +# endif // __GLIBC__ +// Detect with _LITTLE_ENDIAN and _BIG_ENDIAN macro +# elif defined(_LITTLE_ENDIAN) && !defined(_BIG_ENDIAN) +# define RAPIDJSON_ENDIAN RAPIDJSON_LITTLEENDIAN +# elif defined(_BIG_ENDIAN) && !defined(_LITTLE_ENDIAN) +# define RAPIDJSON_ENDIAN RAPIDJSON_BIGENDIAN +// Detect with architecture macros +# elif defined(__sparc) || defined(__sparc__) || defined(_POWER) || defined(__powerpc__) || defined(__ppc__) || defined(__hpux) || defined(__hppa) || defined(_MIPSEB) || defined(_POWER) || defined(__s390__) +# define RAPIDJSON_ENDIAN RAPIDJSON_BIGENDIAN +# elif defined(__i386__) || defined(__alpha__) || defined(__ia64) || defined(__ia64__) || defined(_M_IX86) || defined(_M_IA64) || defined(_M_ALPHA) || defined(__amd64) || defined(__amd64__) || defined(_M_AMD64) || defined(__x86_64) || defined(__x86_64__) || defined(_M_X64) || defined(__bfin__) +# define RAPIDJSON_ENDIAN RAPIDJSON_LITTLEENDIAN +# elif defined(_MSC_VER) && defined(_M_ARM) +# define RAPIDJSON_ENDIAN RAPIDJSON_LITTLEENDIAN +# elif defined(RAPIDJSON_DOXYGEN_RUNNING) +# define RAPIDJSON_ENDIAN +# else +# error Unknown machine endianess detected. User needs to define RAPIDJSON_ENDIAN. +# endif +#endif // RAPIDJSON_ENDIAN + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_64BIT + +//! Whether using 64-bit architecture +#ifndef RAPIDJSON_64BIT +#if defined(__LP64__) || (defined(__x86_64__) && defined(__ILP32__)) || defined(_WIN64) || defined(__EMSCRIPTEN__) +#define RAPIDJSON_64BIT 1 +#else +#define RAPIDJSON_64BIT 0 +#endif +#endif // RAPIDJSON_64BIT + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_ALIGN + +//! Data alignment of the machine. +/*! \ingroup RAPIDJSON_CONFIG + \param x pointer to align + + Some machines require strict data alignment. Currently the default uses 4 bytes + alignment on 32-bit platforms and 8 bytes alignment for 64-bit platforms. + User can customize by defining the RAPIDJSON_ALIGN function macro. +*/ +#ifndef RAPIDJSON_ALIGN +#if RAPIDJSON_64BIT == 1 +#define RAPIDJSON_ALIGN(x) (((x) + static_cast(7u)) & ~static_cast(7u)) +#else +#define RAPIDJSON_ALIGN(x) (((x) + 3u) & ~3u) +#endif +#endif + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_UINT64_C2 + +//! Construct a 64-bit literal by a pair of 32-bit integer. +/*! + 64-bit literal with or without ULL suffix is prone to compiler warnings. + UINT64_C() is C macro which cause compilation problems. + Use this macro to define 64-bit constants by a pair of 32-bit integer. +*/ +#ifndef RAPIDJSON_UINT64_C2 +#define RAPIDJSON_UINT64_C2(high32, low32) ((static_cast(high32) << 32) | static_cast(low32)) +#endif + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_48BITPOINTER_OPTIMIZATION + +//! Use only lower 48-bit address for some pointers. +/*! + \ingroup RAPIDJSON_CONFIG + + This optimization uses the fact that current X86-64 architecture only implement lower 48-bit virtual address. + The higher 16-bit can be used for storing other data. + \c GenericValue uses this optimization to reduce its size form 24 bytes to 16 bytes in 64-bit architecture. +*/ +#ifndef RAPIDJSON_48BITPOINTER_OPTIMIZATION +#if defined(__amd64__) || defined(__amd64) || defined(__x86_64__) || defined(__x86_64) || defined(_M_X64) || defined(_M_AMD64) +#define RAPIDJSON_48BITPOINTER_OPTIMIZATION 1 +#else +#define RAPIDJSON_48BITPOINTER_OPTIMIZATION 0 +#endif +#endif // RAPIDJSON_48BITPOINTER_OPTIMIZATION + +#if RAPIDJSON_48BITPOINTER_OPTIMIZATION == 1 +#if RAPIDJSON_64BIT != 1 +#error RAPIDJSON_48BITPOINTER_OPTIMIZATION can only be set to 1 when RAPIDJSON_64BIT=1 +#endif +#define RAPIDJSON_SETPOINTER(type, p, x) (p = reinterpret_cast((reinterpret_cast(p) & static_cast(RAPIDJSON_UINT64_C2(0xFFFF0000, 0x00000000))) | reinterpret_cast(reinterpret_cast(x)))) +#define RAPIDJSON_GETPOINTER(type, p) (reinterpret_cast(reinterpret_cast(p) & static_cast(RAPIDJSON_UINT64_C2(0x0000FFFF, 0xFFFFFFFF)))) +#else +#define RAPIDJSON_SETPOINTER(type, p, x) (p = (x)) +#define RAPIDJSON_GETPOINTER(type, p) (p) +#endif + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_SSE2/RAPIDJSON_SSE42/RAPIDJSON_SIMD + +/*! \def RAPIDJSON_SIMD + \ingroup RAPIDJSON_CONFIG + \brief Enable SSE2/SSE4.2 optimization. + + RapidJSON supports optimized implementations for some parsing operations + based on the SSE2 or SSE4.2 SIMD extensions on modern Intel-compatible + processors. + + To enable these optimizations, two different symbols can be defined; + \code + // Enable SSE2 optimization. + #define RAPIDJSON_SSE2 + + // Enable SSE4.2 optimization. + #define RAPIDJSON_SSE42 + \endcode + + \c RAPIDJSON_SSE42 takes precedence, if both are defined. + + If any of these symbols is defined, RapidJSON defines the macro + \c RAPIDJSON_SIMD to indicate the availability of the optimized code. +*/ +#if defined(RAPIDJSON_SSE2) || defined(RAPIDJSON_SSE42) \ + || defined(RAPIDJSON_DOXYGEN_RUNNING) +#define RAPIDJSON_SIMD +#endif + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_NO_SIZETYPEDEFINE + +#ifndef RAPIDJSON_NO_SIZETYPEDEFINE +/*! \def RAPIDJSON_NO_SIZETYPEDEFINE + \ingroup RAPIDJSON_CONFIG + \brief User-provided \c SizeType definition. + + In order to avoid using 32-bit size types for indexing strings and arrays, + define this preprocessor symbol and provide the type rapidjson::SizeType + before including RapidJSON: + \code + #define RAPIDJSON_NO_SIZETYPEDEFINE + namespace rapidjson { typedef ::std::size_t SizeType; } + #include "rapidjson/..." + \endcode + + \see rapidjson::SizeType +*/ +#ifdef RAPIDJSON_DOXYGEN_RUNNING +#define RAPIDJSON_NO_SIZETYPEDEFINE +#endif +RAPIDJSON_NAMESPACE_BEGIN +//! Size type (for string lengths, array sizes, etc.) +/*! RapidJSON uses 32-bit array/string indices even on 64-bit platforms, + instead of using \c size_t. Users may override the SizeType by defining + \ref RAPIDJSON_NO_SIZETYPEDEFINE. +*/ +typedef unsigned SizeType; +RAPIDJSON_NAMESPACE_END +#endif + +// always import std::size_t to rapidjson namespace +RAPIDJSON_NAMESPACE_BEGIN +using std::size_t; +RAPIDJSON_NAMESPACE_END + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_ASSERT + +//! Assertion. +/*! \ingroup RAPIDJSON_CONFIG + By default, rapidjson uses C \c assert() for internal assertions. + User can override it by defining RAPIDJSON_ASSERT(x) macro. + + \note Parsing errors are handled and can be customized by the + \ref RAPIDJSON_ERRORS APIs. +*/ +#ifndef RAPIDJSON_ASSERT +#include +#define RAPIDJSON_ASSERT(x) assert(x) +#endif // RAPIDJSON_ASSERT + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_STATIC_ASSERT + +// Adopt from boost +#ifndef RAPIDJSON_STATIC_ASSERT +#ifndef __clang__ +//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN +#endif +RAPIDJSON_NAMESPACE_BEGIN +template struct STATIC_ASSERTION_FAILURE; +template <> struct STATIC_ASSERTION_FAILURE { enum { value = 1 }; }; +template struct StaticAssertTest {}; +RAPIDJSON_NAMESPACE_END + +#define RAPIDJSON_JOIN(X, Y) RAPIDJSON_DO_JOIN(X, Y) +#define RAPIDJSON_DO_JOIN(X, Y) RAPIDJSON_DO_JOIN2(X, Y) +#define RAPIDJSON_DO_JOIN2(X, Y) X##Y + +#if defined(__GNUC__) +#define RAPIDJSON_STATIC_ASSERT_UNUSED_ATTRIBUTE __attribute__((unused)) +#else +#define RAPIDJSON_STATIC_ASSERT_UNUSED_ATTRIBUTE +#endif +#ifndef __clang__ +//!@endcond +#endif + +/*! \def RAPIDJSON_STATIC_ASSERT + \brief (Internal) macro to check for conditions at compile-time + \param x compile-time condition + \hideinitializer + */ +#define RAPIDJSON_STATIC_ASSERT(x) \ + typedef ::RAPIDJSON_NAMESPACE::StaticAssertTest< \ + sizeof(::RAPIDJSON_NAMESPACE::STATIC_ASSERTION_FAILURE)> \ + RAPIDJSON_JOIN(StaticAssertTypedef, __LINE__) RAPIDJSON_STATIC_ASSERT_UNUSED_ATTRIBUTE +#endif + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_LIKELY, RAPIDJSON_UNLIKELY + +//! Compiler branching hint for expression with high probability to be true. +/*! + \ingroup RAPIDJSON_CONFIG + \param x Boolean expression likely to be true. +*/ +#ifndef RAPIDJSON_LIKELY +#if defined(__GNUC__) || defined(__clang__) +#define RAPIDJSON_LIKELY(x) __builtin_expect(!!(x), 1) +#else +#define RAPIDJSON_LIKELY(x) (x) +#endif +#endif + +//! Compiler branching hint for expression with low probability to be true. +/*! + \ingroup RAPIDJSON_CONFIG + \param x Boolean expression unlikely to be true. +*/ +#ifndef RAPIDJSON_UNLIKELY +#if defined(__GNUC__) || defined(__clang__) +#define RAPIDJSON_UNLIKELY(x) __builtin_expect(!!(x), 0) +#else +#define RAPIDJSON_UNLIKELY(x) (x) +#endif +#endif + +/////////////////////////////////////////////////////////////////////////////// +// Helpers + +//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN + +#define RAPIDJSON_MULTILINEMACRO_BEGIN do { +#define RAPIDJSON_MULTILINEMACRO_END \ +} while((void)0, 0) + +// adopted from Boost +#define RAPIDJSON_VERSION_CODE(x,y,z) \ + (((x)*100000) + ((y)*100) + (z)) + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_DIAG_PUSH/POP, RAPIDJSON_DIAG_OFF + +#if defined(__GNUC__) +#define RAPIDJSON_GNUC \ + RAPIDJSON_VERSION_CODE(__GNUC__,__GNUC_MINOR__,__GNUC_PATCHLEVEL__) +#endif + +#if defined(__clang__) || (defined(RAPIDJSON_GNUC) && RAPIDJSON_GNUC >= RAPIDJSON_VERSION_CODE(4,2,0)) + +#define RAPIDJSON_PRAGMA(x) _Pragma(RAPIDJSON_STRINGIFY(x)) +#define RAPIDJSON_DIAG_PRAGMA(x) RAPIDJSON_PRAGMA(GCC diagnostic x) +#define RAPIDJSON_DIAG_OFF(x) \ + RAPIDJSON_DIAG_PRAGMA(ignored RAPIDJSON_STRINGIFY(RAPIDJSON_JOIN(-W,x))) + +// push/pop support in Clang and GCC>=4.6 +#if defined(__clang__) || (defined(RAPIDJSON_GNUC) && RAPIDJSON_GNUC >= RAPIDJSON_VERSION_CODE(4,6,0)) +#define RAPIDJSON_DIAG_PUSH RAPIDJSON_DIAG_PRAGMA(push) +#define RAPIDJSON_DIAG_POP RAPIDJSON_DIAG_PRAGMA(pop) +#else // GCC >= 4.2, < 4.6 +#define RAPIDJSON_DIAG_PUSH /* ignored */ +#define RAPIDJSON_DIAG_POP /* ignored */ +#endif + +#elif defined(_MSC_VER) + +// pragma (MSVC specific) +#define RAPIDJSON_PRAGMA(x) __pragma(x) +#define RAPIDJSON_DIAG_PRAGMA(x) RAPIDJSON_PRAGMA(warning(x)) + +#define RAPIDJSON_DIAG_OFF(x) RAPIDJSON_DIAG_PRAGMA(disable: x) +#define RAPIDJSON_DIAG_PUSH RAPIDJSON_DIAG_PRAGMA(push) +#define RAPIDJSON_DIAG_POP RAPIDJSON_DIAG_PRAGMA(pop) + +#else + +#define RAPIDJSON_DIAG_OFF(x) /* ignored */ +#define RAPIDJSON_DIAG_PUSH /* ignored */ +#define RAPIDJSON_DIAG_POP /* ignored */ + +#endif // RAPIDJSON_DIAG_* + +/////////////////////////////////////////////////////////////////////////////// +// C++11 features + +#ifndef RAPIDJSON_HAS_CXX11_RVALUE_REFS +#if defined(__clang__) +#if __has_feature(cxx_rvalue_references) && \ + (defined(_LIBCPP_VERSION) || defined(__GLIBCXX__) && __GLIBCXX__ >= 20080306) +#define RAPIDJSON_HAS_CXX11_RVALUE_REFS 1 +#else +#define RAPIDJSON_HAS_CXX11_RVALUE_REFS 0 +#endif +#elif (defined(RAPIDJSON_GNUC) && (RAPIDJSON_GNUC >= RAPIDJSON_VERSION_CODE(4,3,0)) && defined(__GXX_EXPERIMENTAL_CXX0X__)) || \ + (defined(_MSC_VER) && _MSC_VER >= 1600) + +#define RAPIDJSON_HAS_CXX11_RVALUE_REFS 1 +#else +#define RAPIDJSON_HAS_CXX11_RVALUE_REFS 0 +#endif +#endif // RAPIDJSON_HAS_CXX11_RVALUE_REFS + +#ifndef RAPIDJSON_HAS_CXX11_NOEXCEPT +#if defined(__clang__) +#define RAPIDJSON_HAS_CXX11_NOEXCEPT __has_feature(cxx_noexcept) +#elif (defined(RAPIDJSON_GNUC) && (RAPIDJSON_GNUC >= RAPIDJSON_VERSION_CODE(4,6,0)) && defined(__GXX_EXPERIMENTAL_CXX0X__)) +// (defined(_MSC_VER) && _MSC_VER >= ????) // not yet supported +#define RAPIDJSON_HAS_CXX11_NOEXCEPT 1 +#else +#define RAPIDJSON_HAS_CXX11_NOEXCEPT 0 +#endif +#endif +#if RAPIDJSON_HAS_CXX11_NOEXCEPT +#define RAPIDJSON_NOEXCEPT noexcept +#else +#define RAPIDJSON_NOEXCEPT /* noexcept */ +#endif // RAPIDJSON_HAS_CXX11_NOEXCEPT + +// no automatic detection, yet +#ifndef RAPIDJSON_HAS_CXX11_TYPETRAITS +#define RAPIDJSON_HAS_CXX11_TYPETRAITS 0 +#endif + +#ifndef RAPIDJSON_HAS_CXX11_RANGE_FOR +#if defined(__clang__) +#define RAPIDJSON_HAS_CXX11_RANGE_FOR __has_feature(cxx_range_for) +#elif (defined(RAPIDJSON_GNUC) && (RAPIDJSON_GNUC >= RAPIDJSON_VERSION_CODE(4,3,0)) && defined(__GXX_EXPERIMENTAL_CXX0X__)) || \ + (defined(_MSC_VER) && _MSC_VER >= 1700) +#define RAPIDJSON_HAS_CXX11_RANGE_FOR 1 +#else +#define RAPIDJSON_HAS_CXX11_RANGE_FOR 0 +#endif +#endif // RAPIDJSON_HAS_CXX11_RANGE_FOR + +//!@endcond + +/////////////////////////////////////////////////////////////////////////////// +// new/delete + +#ifndef RAPIDJSON_NEW +///! customization point for global \c new +#define RAPIDJSON_NEW(x) new x +#endif +#ifndef RAPIDJSON_DELETE +///! customization point for global \c delete +#define RAPIDJSON_DELETE(x) delete x +#endif + +/////////////////////////////////////////////////////////////////////////////// +// Type + +/*! \namespace rapidjson + \brief main RapidJSON namespace + \see RAPIDJSON_NAMESPACE +*/ +RAPIDJSON_NAMESPACE_BEGIN + +//! Type of JSON value +enum Type { + kNullType = 0, //!< null + kFalseType = 1, //!< false + kTrueType = 2, //!< true + kObjectType = 3, //!< object + kArrayType = 4, //!< array + kStringType = 5, //!< string + kNumberType = 6 //!< number +}; + +RAPIDJSON_NAMESPACE_END + +#endif // RAPIDJSON_RAPIDJSON_H_ diff --git a/contrib/rapidjson/1.1.0/include/rapidjson/reader.h b/contrib/rapidjson/1.1.0/include/rapidjson/reader.h new file mode 100644 index 0000000..19f8849 --- /dev/null +++ b/contrib/rapidjson/1.1.0/include/rapidjson/reader.h @@ -0,0 +1,1879 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_READER_H_ +#define RAPIDJSON_READER_H_ + +/*! \file reader.h */ + +#include "allocators.h" +#include "stream.h" +#include "encodedstream.h" +#include "internal/meta.h" +#include "internal/stack.h" +#include "internal/strtod.h" +#include + +#if defined(RAPIDJSON_SIMD) && defined(_MSC_VER) +#include +#pragma intrinsic(_BitScanForward) +#endif +#ifdef RAPIDJSON_SSE42 +#include +#elif defined(RAPIDJSON_SSE2) +#include +#endif + +#ifdef _MSC_VER +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(4127) // conditional expression is constant +RAPIDJSON_DIAG_OFF(4702) // unreachable code +#endif + +#ifdef __clang__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(old-style-cast) +RAPIDJSON_DIAG_OFF(padded) +RAPIDJSON_DIAG_OFF(switch-enum) +#endif + +#ifdef __GNUC__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(effc++) +#endif + +//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN +#define RAPIDJSON_NOTHING /* deliberately empty */ +#ifndef RAPIDJSON_PARSE_ERROR_EARLY_RETURN +#define RAPIDJSON_PARSE_ERROR_EARLY_RETURN(value) \ + RAPIDJSON_MULTILINEMACRO_BEGIN \ + if (RAPIDJSON_UNLIKELY(HasParseError())) { return value; } \ + RAPIDJSON_MULTILINEMACRO_END +#endif +#define RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID \ + RAPIDJSON_PARSE_ERROR_EARLY_RETURN(RAPIDJSON_NOTHING) +//!@endcond + +/*! \def RAPIDJSON_PARSE_ERROR_NORETURN + \ingroup RAPIDJSON_ERRORS + \brief Macro to indicate a parse error. + \param parseErrorCode \ref rapidjson::ParseErrorCode of the error + \param offset position of the error in JSON input (\c size_t) + + This macros can be used as a customization point for the internal + error handling mechanism of RapidJSON. + + A common usage model is to throw an exception instead of requiring the + caller to explicitly check the \ref rapidjson::GenericReader::Parse's + return value: + + \code + #define RAPIDJSON_PARSE_ERROR_NORETURN(parseErrorCode,offset) \ + throw ParseException(parseErrorCode, #parseErrorCode, offset) + + #include // std::runtime_error + #include "rapidjson/error/error.h" // rapidjson::ParseResult + + struct ParseException : std::runtime_error, rapidjson::ParseResult { + ParseException(rapidjson::ParseErrorCode code, const char* msg, size_t offset) + : std::runtime_error(msg), ParseResult(code, offset) {} + }; + + #include "rapidjson/reader.h" + \endcode + + \see RAPIDJSON_PARSE_ERROR, rapidjson::GenericReader::Parse + */ +#ifndef RAPIDJSON_PARSE_ERROR_NORETURN +#define RAPIDJSON_PARSE_ERROR_NORETURN(parseErrorCode, offset) \ + RAPIDJSON_MULTILINEMACRO_BEGIN \ + RAPIDJSON_ASSERT(!HasParseError()); /* Error can only be assigned once */ \ + SetParseError(parseErrorCode, offset); \ + RAPIDJSON_MULTILINEMACRO_END +#endif + +/*! \def RAPIDJSON_PARSE_ERROR + \ingroup RAPIDJSON_ERRORS + \brief (Internal) macro to indicate and handle a parse error. + \param parseErrorCode \ref rapidjson::ParseErrorCode of the error + \param offset position of the error in JSON input (\c size_t) + + Invokes RAPIDJSON_PARSE_ERROR_NORETURN and stops the parsing. + + \see RAPIDJSON_PARSE_ERROR_NORETURN + \hideinitializer + */ +#ifndef RAPIDJSON_PARSE_ERROR +#define RAPIDJSON_PARSE_ERROR(parseErrorCode, offset) \ + RAPIDJSON_MULTILINEMACRO_BEGIN \ + RAPIDJSON_PARSE_ERROR_NORETURN(parseErrorCode, offset); \ + RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; \ + RAPIDJSON_MULTILINEMACRO_END +#endif + +#include "error/error.h" // ParseErrorCode, ParseResult + +RAPIDJSON_NAMESPACE_BEGIN + +/////////////////////////////////////////////////////////////////////////////// +// ParseFlag + +/*! \def RAPIDJSON_PARSE_DEFAULT_FLAGS + \ingroup RAPIDJSON_CONFIG + \brief User-defined kParseDefaultFlags definition. + + User can define this as any \c ParseFlag combinations. +*/ +#ifndef RAPIDJSON_PARSE_DEFAULT_FLAGS +#define RAPIDJSON_PARSE_DEFAULT_FLAGS kParseNoFlags +#endif + +//! Combination of parseFlags +/*! \see Reader::Parse, Document::Parse, Document::ParseInsitu, Document::ParseStream + */ +enum ParseFlag { + kParseNoFlags = 0, //!< No flags are set. + kParseInsituFlag = 1, //!< In-situ(destructive) parsing. + kParseValidateEncodingFlag = 2, //!< Validate encoding of JSON strings. + kParseIterativeFlag = 4, //!< Iterative(constant complexity in terms of function call stack size) parsing. + kParseStopWhenDoneFlag = 8, //!< After parsing a complete JSON root from stream, stop further processing the rest of stream. When this flag is used, parser will not generate kParseErrorDocumentRootNotSingular error. + kParseFullPrecisionFlag = 16, //!< Parse number in full precision (but slower). + kParseCommentsFlag = 32, //!< Allow one-line (//) and multi-line (/**/) comments. + kParseNumbersAsStringsFlag = 64, //!< Parse all numbers (ints/doubles) as strings. + kParseTrailingCommasFlag = 128, //!< Allow trailing commas at the end of objects and arrays. + kParseNanAndInfFlag = 256, //!< Allow parsing NaN, Inf, Infinity, -Inf and -Infinity as doubles. + kParseDefaultFlags = RAPIDJSON_PARSE_DEFAULT_FLAGS //!< Default parse flags. Can be customized by defining RAPIDJSON_PARSE_DEFAULT_FLAGS +}; + +/////////////////////////////////////////////////////////////////////////////// +// Handler + +/*! \class rapidjson::Handler + \brief Concept for receiving events from GenericReader upon parsing. + The functions return true if no error occurs. If they return false, + the event publisher should terminate the process. +\code +concept Handler { + typename Ch; + + bool Null(); + bool Bool(bool b); + bool Int(int i); + bool Uint(unsigned i); + bool Int64(int64_t i); + bool Uint64(uint64_t i); + bool Double(double d); + /// enabled via kParseNumbersAsStringsFlag, string is not null-terminated (use length) + bool RawNumber(const Ch* str, SizeType length, bool copy); + bool String(const Ch* str, SizeType length, bool copy); + bool StartObject(); + bool Key(const Ch* str, SizeType length, bool copy); + bool EndObject(SizeType memberCount); + bool StartArray(); + bool EndArray(SizeType elementCount); +}; +\endcode +*/ +/////////////////////////////////////////////////////////////////////////////// +// BaseReaderHandler + +//! Default implementation of Handler. +/*! This can be used as base class of any reader handler. + \note implements Handler concept +*/ +template, typename Derived = void> +struct BaseReaderHandler { + typedef typename Encoding::Ch Ch; + + typedef typename internal::SelectIf, BaseReaderHandler, Derived>::Type Override; + + bool Default() { return true; } + bool Null() { return static_cast(*this).Default(); } + bool Bool(bool) { return static_cast(*this).Default(); } + bool Int(int) { return static_cast(*this).Default(); } + bool Uint(unsigned) { return static_cast(*this).Default(); } + bool Int64(int64_t) { return static_cast(*this).Default(); } + bool Uint64(uint64_t) { return static_cast(*this).Default(); } + bool Double(double) { return static_cast(*this).Default(); } + /// enabled via kParseNumbersAsStringsFlag, string is not null-terminated (use length) + bool RawNumber(const Ch* str, SizeType len, bool copy) { return static_cast(*this).String(str, len, copy); } + bool String(const Ch*, SizeType, bool) { return static_cast(*this).Default(); } + bool StartObject() { return static_cast(*this).Default(); } + bool Key(const Ch* str, SizeType len, bool copy) { return static_cast(*this).String(str, len, copy); } + bool EndObject(SizeType) { return static_cast(*this).Default(); } + bool StartArray() { return static_cast(*this).Default(); } + bool EndArray(SizeType) { return static_cast(*this).Default(); } +}; + +/////////////////////////////////////////////////////////////////////////////// +// StreamLocalCopy + +namespace internal { + +template::copyOptimization> +class StreamLocalCopy; + +//! Do copy optimization. +template +class StreamLocalCopy { +public: + StreamLocalCopy(Stream& original) : s(original), original_(original) {} + ~StreamLocalCopy() { original_ = s; } + + Stream s; + +private: + StreamLocalCopy& operator=(const StreamLocalCopy&) /* = delete */; + + Stream& original_; +}; + +//! Keep reference. +template +class StreamLocalCopy { +public: + StreamLocalCopy(Stream& original) : s(original) {} + + Stream& s; + +private: + StreamLocalCopy& operator=(const StreamLocalCopy&) /* = delete */; +}; + +} // namespace internal + +/////////////////////////////////////////////////////////////////////////////// +// SkipWhitespace + +//! Skip the JSON white spaces in a stream. +/*! \param is A input stream for skipping white spaces. + \note This function has SSE2/SSE4.2 specialization. +*/ +template +void SkipWhitespace(InputStream& is) { + internal::StreamLocalCopy copy(is); + InputStream& s(copy.s); + + typename InputStream::Ch c; + while ((c = s.Peek()) == ' ' || c == '\n' || c == '\r' || c == '\t') + s.Take(); +} + +inline const char* SkipWhitespace(const char* p, const char* end) { + while (p != end && (*p == ' ' || *p == '\n' || *p == '\r' || *p == '\t')) + ++p; + return p; +} + +#ifdef RAPIDJSON_SSE42 +//! Skip whitespace with SSE 4.2 pcmpistrm instruction, testing 16 8-byte characters at once. +inline const char *SkipWhitespace_SIMD(const char* p) { + // Fast return for single non-whitespace + if (*p == ' ' || *p == '\n' || *p == '\r' || *p == '\t') + ++p; + else + return p; + + // 16-byte align to the next boundary + const char* nextAligned = reinterpret_cast((reinterpret_cast(p) + 15) & static_cast(~15)); + while (p != nextAligned) + if (*p == ' ' || *p == '\n' || *p == '\r' || *p == '\t') + ++p; + else + return p; + + // The rest of string using SIMD + static const char whitespace[16] = " \n\r\t"; + const __m128i w = _mm_loadu_si128(reinterpret_cast(&whitespace[0])); + + for (;; p += 16) { + const __m128i s = _mm_load_si128(reinterpret_cast(p)); + const int r = _mm_cvtsi128_si32(_mm_cmpistrm(w, s, _SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_BIT_MASK | _SIDD_NEGATIVE_POLARITY)); + if (r != 0) { // some of characters is non-whitespace +#ifdef _MSC_VER // Find the index of first non-whitespace + unsigned long offset; + _BitScanForward(&offset, r); + return p + offset; +#else + return p + __builtin_ffs(r) - 1; +#endif + } + } +} + +inline const char *SkipWhitespace_SIMD(const char* p, const char* end) { + // Fast return for single non-whitespace + if (p != end && (*p == ' ' || *p == '\n' || *p == '\r' || *p == '\t')) + ++p; + else + return p; + + // The middle of string using SIMD + static const char whitespace[16] = " \n\r\t"; + const __m128i w = _mm_loadu_si128(reinterpret_cast(&whitespace[0])); + + for (; p <= end - 16; p += 16) { + const __m128i s = _mm_loadu_si128(reinterpret_cast(p)); + const int r = _mm_cvtsi128_si32(_mm_cmpistrm(w, s, _SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_ANY | _SIDD_BIT_MASK | _SIDD_NEGATIVE_POLARITY)); + if (r != 0) { // some of characters is non-whitespace +#ifdef _MSC_VER // Find the index of first non-whitespace + unsigned long offset; + _BitScanForward(&offset, r); + return p + offset; +#else + return p + __builtin_ffs(r) - 1; +#endif + } + } + + return SkipWhitespace(p, end); +} + +#elif defined(RAPIDJSON_SSE2) + +//! Skip whitespace with SSE2 instructions, testing 16 8-byte characters at once. +inline const char *SkipWhitespace_SIMD(const char* p) { + // Fast return for single non-whitespace + if (*p == ' ' || *p == '\n' || *p == '\r' || *p == '\t') + ++p; + else + return p; + + // 16-byte align to the next boundary + const char* nextAligned = reinterpret_cast((reinterpret_cast(p) + 15) & static_cast(~15)); + while (p != nextAligned) + if (*p == ' ' || *p == '\n' || *p == '\r' || *p == '\t') + ++p; + else + return p; + + // The rest of string + #define C16(c) { c, c, c, c, c, c, c, c, c, c, c, c, c, c, c, c } + static const char whitespaces[4][16] = { C16(' '), C16('\n'), C16('\r'), C16('\t') }; + #undef C16 + + const __m128i w0 = _mm_loadu_si128(reinterpret_cast(&whitespaces[0][0])); + const __m128i w1 = _mm_loadu_si128(reinterpret_cast(&whitespaces[1][0])); + const __m128i w2 = _mm_loadu_si128(reinterpret_cast(&whitespaces[2][0])); + const __m128i w3 = _mm_loadu_si128(reinterpret_cast(&whitespaces[3][0])); + + for (;; p += 16) { + const __m128i s = _mm_load_si128(reinterpret_cast(p)); + __m128i x = _mm_cmpeq_epi8(s, w0); + x = _mm_or_si128(x, _mm_cmpeq_epi8(s, w1)); + x = _mm_or_si128(x, _mm_cmpeq_epi8(s, w2)); + x = _mm_or_si128(x, _mm_cmpeq_epi8(s, w3)); + unsigned short r = static_cast(~_mm_movemask_epi8(x)); + if (r != 0) { // some of characters may be non-whitespace +#ifdef _MSC_VER // Find the index of first non-whitespace + unsigned long offset; + _BitScanForward(&offset, r); + return p + offset; +#else + return p + __builtin_ffs(r) - 1; +#endif + } + } +} + +inline const char *SkipWhitespace_SIMD(const char* p, const char* end) { + // Fast return for single non-whitespace + if (p != end && (*p == ' ' || *p == '\n' || *p == '\r' || *p == '\t')) + ++p; + else + return p; + + // The rest of string + #define C16(c) { c, c, c, c, c, c, c, c, c, c, c, c, c, c, c, c } + static const char whitespaces[4][16] = { C16(' '), C16('\n'), C16('\r'), C16('\t') }; + #undef C16 + + const __m128i w0 = _mm_loadu_si128(reinterpret_cast(&whitespaces[0][0])); + const __m128i w1 = _mm_loadu_si128(reinterpret_cast(&whitespaces[1][0])); + const __m128i w2 = _mm_loadu_si128(reinterpret_cast(&whitespaces[2][0])); + const __m128i w3 = _mm_loadu_si128(reinterpret_cast(&whitespaces[3][0])); + + for (; p <= end - 16; p += 16) { + const __m128i s = _mm_loadu_si128(reinterpret_cast(p)); + __m128i x = _mm_cmpeq_epi8(s, w0); + x = _mm_or_si128(x, _mm_cmpeq_epi8(s, w1)); + x = _mm_or_si128(x, _mm_cmpeq_epi8(s, w2)); + x = _mm_or_si128(x, _mm_cmpeq_epi8(s, w3)); + unsigned short r = static_cast(~_mm_movemask_epi8(x)); + if (r != 0) { // some of characters may be non-whitespace +#ifdef _MSC_VER // Find the index of first non-whitespace + unsigned long offset; + _BitScanForward(&offset, r); + return p + offset; +#else + return p + __builtin_ffs(r) - 1; +#endif + } + } + + return SkipWhitespace(p, end); +} + +#endif // RAPIDJSON_SSE2 + +#ifdef RAPIDJSON_SIMD +//! Template function specialization for InsituStringStream +template<> inline void SkipWhitespace(InsituStringStream& is) { + is.src_ = const_cast(SkipWhitespace_SIMD(is.src_)); +} + +//! Template function specialization for StringStream +template<> inline void SkipWhitespace(StringStream& is) { + is.src_ = SkipWhitespace_SIMD(is.src_); +} + +template<> inline void SkipWhitespace(EncodedInputStream, MemoryStream>& is) { + is.is_.src_ = SkipWhitespace_SIMD(is.is_.src_, is.is_.end_); +} +#endif // RAPIDJSON_SIMD + +/////////////////////////////////////////////////////////////////////////////// +// GenericReader + +//! SAX-style JSON parser. Use \ref Reader for UTF8 encoding and default allocator. +/*! GenericReader parses JSON text from a stream, and send events synchronously to an + object implementing Handler concept. + + It needs to allocate a stack for storing a single decoded string during + non-destructive parsing. + + For in-situ parsing, the decoded string is directly written to the source + text string, no temporary buffer is required. + + A GenericReader object can be reused for parsing multiple JSON text. + + \tparam SourceEncoding Encoding of the input stream. + \tparam TargetEncoding Encoding of the parse output. + \tparam StackAllocator Allocator type for stack. +*/ +template +class GenericReader { +public: + typedef typename SourceEncoding::Ch Ch; //!< SourceEncoding character type + + //! Constructor. + /*! \param stackAllocator Optional allocator for allocating stack memory. (Only use for non-destructive parsing) + \param stackCapacity stack capacity in bytes for storing a single decoded string. (Only use for non-destructive parsing) + */ + GenericReader(StackAllocator* stackAllocator = 0, size_t stackCapacity = kDefaultStackCapacity) : stack_(stackAllocator, stackCapacity), parseResult_() {} + + //! Parse JSON text. + /*! \tparam parseFlags Combination of \ref ParseFlag. + \tparam InputStream Type of input stream, implementing Stream concept. + \tparam Handler Type of handler, implementing Handler concept. + \param is Input stream to be parsed. + \param handler The handler to receive events. + \return Whether the parsing is successful. + */ + template + ParseResult Parse(InputStream& is, Handler& handler) { + if (parseFlags & kParseIterativeFlag) + return IterativeParse(is, handler); + + parseResult_.Clear(); + + ClearStackOnExit scope(*this); + + SkipWhitespaceAndComments(is); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN(parseResult_); + + if (RAPIDJSON_UNLIKELY(is.Peek() == '\0')) { + RAPIDJSON_PARSE_ERROR_NORETURN(kParseErrorDocumentEmpty, is.Tell()); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN(parseResult_); + } + else { + ParseValue(is, handler); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN(parseResult_); + + if (!(parseFlags & kParseStopWhenDoneFlag)) { + SkipWhitespaceAndComments(is); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN(parseResult_); + + if (RAPIDJSON_UNLIKELY(is.Peek() != '\0')) { + RAPIDJSON_PARSE_ERROR_NORETURN(kParseErrorDocumentRootNotSingular, is.Tell()); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN(parseResult_); + } + } + } + + return parseResult_; + } + + //! Parse JSON text (with \ref kParseDefaultFlags) + /*! \tparam InputStream Type of input stream, implementing Stream concept + \tparam Handler Type of handler, implementing Handler concept. + \param is Input stream to be parsed. + \param handler The handler to receive events. + \return Whether the parsing is successful. + */ + template + ParseResult Parse(InputStream& is, Handler& handler) { + return Parse(is, handler); + } + + //! Whether a parse error has occured in the last parsing. + bool HasParseError() const { return parseResult_.IsError(); } + + //! Get the \ref ParseErrorCode of last parsing. + ParseErrorCode GetParseErrorCode() const { return parseResult_.Code(); } + + //! Get the position of last parsing error in input, 0 otherwise. + size_t GetErrorOffset() const { return parseResult_.Offset(); } + +protected: + void SetParseError(ParseErrorCode code, size_t offset) { parseResult_.Set(code, offset); } + +private: + // Prohibit copy constructor & assignment operator. + GenericReader(const GenericReader&); + GenericReader& operator=(const GenericReader&); + + void ClearStack() { stack_.Clear(); } + + // clear stack on any exit from ParseStream, e.g. due to exception + struct ClearStackOnExit { + explicit ClearStackOnExit(GenericReader& r) : r_(r) {} + ~ClearStackOnExit() { r_.ClearStack(); } + private: + GenericReader& r_; + ClearStackOnExit(const ClearStackOnExit&); + ClearStackOnExit& operator=(const ClearStackOnExit&); + }; + + template + void SkipWhitespaceAndComments(InputStream& is) { + SkipWhitespace(is); + + if (parseFlags & kParseCommentsFlag) { + while (RAPIDJSON_UNLIKELY(Consume(is, '/'))) { + if (Consume(is, '*')) { + while (true) { + if (RAPIDJSON_UNLIKELY(is.Peek() == '\0')) + RAPIDJSON_PARSE_ERROR(kParseErrorUnspecificSyntaxError, is.Tell()); + else if (Consume(is, '*')) { + if (Consume(is, '/')) + break; + } + else + is.Take(); + } + } + else if (RAPIDJSON_LIKELY(Consume(is, '/'))) + while (is.Peek() != '\0' && is.Take() != '\n'); + else + RAPIDJSON_PARSE_ERROR(kParseErrorUnspecificSyntaxError, is.Tell()); + + SkipWhitespace(is); + } + } + } + + // Parse object: { string : value, ... } + template + void ParseObject(InputStream& is, Handler& handler) { + RAPIDJSON_ASSERT(is.Peek() == '{'); + is.Take(); // Skip '{' + + if (RAPIDJSON_UNLIKELY(!handler.StartObject())) + RAPIDJSON_PARSE_ERROR(kParseErrorTermination, is.Tell()); + + SkipWhitespaceAndComments(is); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; + + if (Consume(is, '}')) { + if (RAPIDJSON_UNLIKELY(!handler.EndObject(0))) // empty object + RAPIDJSON_PARSE_ERROR(kParseErrorTermination, is.Tell()); + return; + } + + for (SizeType memberCount = 0;;) { + if (RAPIDJSON_UNLIKELY(is.Peek() != '"')) + RAPIDJSON_PARSE_ERROR(kParseErrorObjectMissName, is.Tell()); + + ParseString(is, handler, true); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; + + SkipWhitespaceAndComments(is); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; + + if (RAPIDJSON_UNLIKELY(!Consume(is, ':'))) + RAPIDJSON_PARSE_ERROR(kParseErrorObjectMissColon, is.Tell()); + + SkipWhitespaceAndComments(is); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; + + ParseValue(is, handler); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; + + SkipWhitespaceAndComments(is); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; + + ++memberCount; + + switch (is.Peek()) { + case ',': + is.Take(); + SkipWhitespaceAndComments(is); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; + break; + case '}': + is.Take(); + if (RAPIDJSON_UNLIKELY(!handler.EndObject(memberCount))) + RAPIDJSON_PARSE_ERROR(kParseErrorTermination, is.Tell()); + return; + default: + RAPIDJSON_PARSE_ERROR(kParseErrorObjectMissCommaOrCurlyBracket, is.Tell()); break; // This useless break is only for making warning and coverage happy + } + + if (parseFlags & kParseTrailingCommasFlag) { + if (is.Peek() == '}') { + if (RAPIDJSON_UNLIKELY(!handler.EndObject(memberCount))) + RAPIDJSON_PARSE_ERROR(kParseErrorTermination, is.Tell()); + is.Take(); + return; + } + } + } + } + + // Parse array: [ value, ... ] + template + void ParseArray(InputStream& is, Handler& handler) { + RAPIDJSON_ASSERT(is.Peek() == '['); + is.Take(); // Skip '[' + + if (RAPIDJSON_UNLIKELY(!handler.StartArray())) + RAPIDJSON_PARSE_ERROR(kParseErrorTermination, is.Tell()); + + SkipWhitespaceAndComments(is); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; + + if (Consume(is, ']')) { + if (RAPIDJSON_UNLIKELY(!handler.EndArray(0))) // empty array + RAPIDJSON_PARSE_ERROR(kParseErrorTermination, is.Tell()); + return; + } + + for (SizeType elementCount = 0;;) { + ParseValue(is, handler); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; + + ++elementCount; + SkipWhitespaceAndComments(is); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; + + if (Consume(is, ',')) { + SkipWhitespaceAndComments(is); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; + } + else if (Consume(is, ']')) { + if (RAPIDJSON_UNLIKELY(!handler.EndArray(elementCount))) + RAPIDJSON_PARSE_ERROR(kParseErrorTermination, is.Tell()); + return; + } + else + RAPIDJSON_PARSE_ERROR(kParseErrorArrayMissCommaOrSquareBracket, is.Tell()); + + if (parseFlags & kParseTrailingCommasFlag) { + if (is.Peek() == ']') { + if (RAPIDJSON_UNLIKELY(!handler.EndArray(elementCount))) + RAPIDJSON_PARSE_ERROR(kParseErrorTermination, is.Tell()); + is.Take(); + return; + } + } + } + } + + template + void ParseNull(InputStream& is, Handler& handler) { + RAPIDJSON_ASSERT(is.Peek() == 'n'); + is.Take(); + + if (RAPIDJSON_LIKELY(Consume(is, 'u') && Consume(is, 'l') && Consume(is, 'l'))) { + if (RAPIDJSON_UNLIKELY(!handler.Null())) + RAPIDJSON_PARSE_ERROR(kParseErrorTermination, is.Tell()); + } + else + RAPIDJSON_PARSE_ERROR(kParseErrorValueInvalid, is.Tell()); + } + + template + void ParseTrue(InputStream& is, Handler& handler) { + RAPIDJSON_ASSERT(is.Peek() == 't'); + is.Take(); + + if (RAPIDJSON_LIKELY(Consume(is, 'r') && Consume(is, 'u') && Consume(is, 'e'))) { + if (RAPIDJSON_UNLIKELY(!handler.Bool(true))) + RAPIDJSON_PARSE_ERROR(kParseErrorTermination, is.Tell()); + } + else + RAPIDJSON_PARSE_ERROR(kParseErrorValueInvalid, is.Tell()); + } + + template + void ParseFalse(InputStream& is, Handler& handler) { + RAPIDJSON_ASSERT(is.Peek() == 'f'); + is.Take(); + + if (RAPIDJSON_LIKELY(Consume(is, 'a') && Consume(is, 'l') && Consume(is, 's') && Consume(is, 'e'))) { + if (RAPIDJSON_UNLIKELY(!handler.Bool(false))) + RAPIDJSON_PARSE_ERROR(kParseErrorTermination, is.Tell()); + } + else + RAPIDJSON_PARSE_ERROR(kParseErrorValueInvalid, is.Tell()); + } + + template + RAPIDJSON_FORCEINLINE static bool Consume(InputStream& is, typename InputStream::Ch expect) { + if (RAPIDJSON_LIKELY(is.Peek() == expect)) { + is.Take(); + return true; + } + else + return false; + } + + // Helper function to parse four hexidecimal digits in \uXXXX in ParseString(). + template + unsigned ParseHex4(InputStream& is, size_t escapeOffset) { + unsigned codepoint = 0; + for (int i = 0; i < 4; i++) { + Ch c = is.Peek(); + codepoint <<= 4; + codepoint += static_cast(c); + if (c >= '0' && c <= '9') + codepoint -= '0'; + else if (c >= 'A' && c <= 'F') + codepoint -= 'A' - 10; + else if (c >= 'a' && c <= 'f') + codepoint -= 'a' - 10; + else { + RAPIDJSON_PARSE_ERROR_NORETURN(kParseErrorStringUnicodeEscapeInvalidHex, escapeOffset); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN(0); + } + is.Take(); + } + return codepoint; + } + + template + class StackStream { + public: + typedef CharType Ch; + + StackStream(internal::Stack& stack) : stack_(stack), length_(0) {} + RAPIDJSON_FORCEINLINE void Put(Ch c) { + *stack_.template Push() = c; + ++length_; + } + + RAPIDJSON_FORCEINLINE void* Push(SizeType count) { + length_ += count; + return stack_.template Push(count); + } + + size_t Length() const { return length_; } + + Ch* Pop() { + return stack_.template Pop(length_); + } + + private: + StackStream(const StackStream&); + StackStream& operator=(const StackStream&); + + internal::Stack& stack_; + SizeType length_; + }; + + // Parse string and generate String event. Different code paths for kParseInsituFlag. + template + void ParseString(InputStream& is, Handler& handler, bool isKey = false) { + internal::StreamLocalCopy copy(is); + InputStream& s(copy.s); + + RAPIDJSON_ASSERT(s.Peek() == '\"'); + s.Take(); // Skip '\"' + + bool success = false; + if (parseFlags & kParseInsituFlag) { + typename InputStream::Ch *head = s.PutBegin(); + ParseStringToStream(s, s); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; + size_t length = s.PutEnd(head) - 1; + RAPIDJSON_ASSERT(length <= 0xFFFFFFFF); + const typename TargetEncoding::Ch* const str = reinterpret_cast(head); + success = (isKey ? handler.Key(str, SizeType(length), false) : handler.String(str, SizeType(length), false)); + } + else { + StackStream stackStream(stack_); + ParseStringToStream(s, stackStream); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; + SizeType length = static_cast(stackStream.Length()) - 1; + const typename TargetEncoding::Ch* const str = stackStream.Pop(); + success = (isKey ? handler.Key(str, length, true) : handler.String(str, length, true)); + } + if (RAPIDJSON_UNLIKELY(!success)) + RAPIDJSON_PARSE_ERROR(kParseErrorTermination, s.Tell()); + } + + // Parse string to an output is + // This function handles the prefix/suffix double quotes, escaping, and optional encoding validation. + template + RAPIDJSON_FORCEINLINE void ParseStringToStream(InputStream& is, OutputStream& os) { +//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN +#define Z16 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 + static const char escape[256] = { + Z16, Z16, 0, 0,'\"', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,'/', + Z16, Z16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,'\\', 0, 0, 0, + 0, 0,'\b', 0, 0, 0,'\f', 0, 0, 0, 0, 0, 0, 0,'\n', 0, + 0, 0,'\r', 0,'\t', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + Z16, Z16, Z16, Z16, Z16, Z16, Z16, Z16 + }; +#undef Z16 +//!@endcond + + for (;;) { + // Scan and copy string before "\\\"" or < 0x20. This is an optional optimzation. + if (!(parseFlags & kParseValidateEncodingFlag)) + ScanCopyUnescapedString(is, os); + + Ch c = is.Peek(); + if (RAPIDJSON_UNLIKELY(c == '\\')) { // Escape + size_t escapeOffset = is.Tell(); // For invalid escaping, report the inital '\\' as error offset + is.Take(); + Ch e = is.Peek(); + if ((sizeof(Ch) == 1 || unsigned(e) < 256) && RAPIDJSON_LIKELY(escape[static_cast(e)])) { + is.Take(); + os.Put(static_cast(escape[static_cast(e)])); + } + else if (RAPIDJSON_LIKELY(e == 'u')) { // Unicode + is.Take(); + unsigned codepoint = ParseHex4(is, escapeOffset); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; + if (RAPIDJSON_UNLIKELY(codepoint >= 0xD800 && codepoint <= 0xDBFF)) { + // Handle UTF-16 surrogate pair + if (RAPIDJSON_UNLIKELY(!Consume(is, '\\') || !Consume(is, 'u'))) + RAPIDJSON_PARSE_ERROR(kParseErrorStringUnicodeSurrogateInvalid, escapeOffset); + unsigned codepoint2 = ParseHex4(is, escapeOffset); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN_VOID; + if (RAPIDJSON_UNLIKELY(codepoint2 < 0xDC00 || codepoint2 > 0xDFFF)) + RAPIDJSON_PARSE_ERROR(kParseErrorStringUnicodeSurrogateInvalid, escapeOffset); + codepoint = (((codepoint - 0xD800) << 10) | (codepoint2 - 0xDC00)) + 0x10000; + } + TEncoding::Encode(os, codepoint); + } + else + RAPIDJSON_PARSE_ERROR(kParseErrorStringEscapeInvalid, escapeOffset); + } + else if (RAPIDJSON_UNLIKELY(c == '"')) { // Closing double quote + is.Take(); + os.Put('\0'); // null-terminate the string + return; + } + else if (RAPIDJSON_UNLIKELY(static_cast(c) < 0x20)) { // RFC 4627: unescaped = %x20-21 / %x23-5B / %x5D-10FFFF + if (c == '\0') + RAPIDJSON_PARSE_ERROR(kParseErrorStringMissQuotationMark, is.Tell()); + else + RAPIDJSON_PARSE_ERROR(kParseErrorStringEscapeInvalid, is.Tell()); + } + else { + size_t offset = is.Tell(); + if (RAPIDJSON_UNLIKELY((parseFlags & kParseValidateEncodingFlag ? + !Transcoder::Validate(is, os) : + !Transcoder::Transcode(is, os)))) + RAPIDJSON_PARSE_ERROR(kParseErrorStringInvalidEncoding, offset); + } + } + } + + template + static RAPIDJSON_FORCEINLINE void ScanCopyUnescapedString(InputStream&, OutputStream&) { + // Do nothing for generic version + } + +#if defined(RAPIDJSON_SSE2) || defined(RAPIDJSON_SSE42) + // StringStream -> StackStream + static RAPIDJSON_FORCEINLINE void ScanCopyUnescapedString(StringStream& is, StackStream& os) { + const char* p = is.src_; + + // Scan one by one until alignment (unaligned load may cross page boundary and cause crash) + const char* nextAligned = reinterpret_cast((reinterpret_cast(p) + 15) & static_cast(~15)); + while (p != nextAligned) + if (RAPIDJSON_UNLIKELY(*p == '\"') || RAPIDJSON_UNLIKELY(*p == '\\') || RAPIDJSON_UNLIKELY(static_cast(*p) < 0x20)) { + is.src_ = p; + return; + } + else + os.Put(*p++); + + // The rest of string using SIMD + static const char dquote[16] = { '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"' }; + static const char bslash[16] = { '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\' }; + static const char space[16] = { 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19 }; + const __m128i dq = _mm_loadu_si128(reinterpret_cast(&dquote[0])); + const __m128i bs = _mm_loadu_si128(reinterpret_cast(&bslash[0])); + const __m128i sp = _mm_loadu_si128(reinterpret_cast(&space[0])); + + for (;; p += 16) { + const __m128i s = _mm_load_si128(reinterpret_cast(p)); + const __m128i t1 = _mm_cmpeq_epi8(s, dq); + const __m128i t2 = _mm_cmpeq_epi8(s, bs); + const __m128i t3 = _mm_cmpeq_epi8(_mm_max_epu8(s, sp), sp); // s < 0x20 <=> max(s, 0x19) == 0x19 + const __m128i x = _mm_or_si128(_mm_or_si128(t1, t2), t3); + unsigned short r = static_cast(_mm_movemask_epi8(x)); + if (RAPIDJSON_UNLIKELY(r != 0)) { // some of characters is escaped + SizeType length; + #ifdef _MSC_VER // Find the index of first escaped + unsigned long offset; + _BitScanForward(&offset, r); + length = offset; + #else + length = static_cast(__builtin_ffs(r) - 1); + #endif + char* q = reinterpret_cast(os.Push(length)); + for (size_t i = 0; i < length; i++) + q[i] = p[i]; + + p += length; + break; + } + _mm_storeu_si128(reinterpret_cast<__m128i *>(os.Push(16)), s); + } + + is.src_ = p; + } + + // InsituStringStream -> InsituStringStream + static RAPIDJSON_FORCEINLINE void ScanCopyUnescapedString(InsituStringStream& is, InsituStringStream& os) { + RAPIDJSON_ASSERT(&is == &os); + (void)os; + + if (is.src_ == is.dst_) { + SkipUnescapedString(is); + return; + } + + char* p = is.src_; + char *q = is.dst_; + + // Scan one by one until alignment (unaligned load may cross page boundary and cause crash) + const char* nextAligned = reinterpret_cast((reinterpret_cast(p) + 15) & static_cast(~15)); + while (p != nextAligned) + if (RAPIDJSON_UNLIKELY(*p == '\"') || RAPIDJSON_UNLIKELY(*p == '\\') || RAPIDJSON_UNLIKELY(static_cast(*p) < 0x20)) { + is.src_ = p; + is.dst_ = q; + return; + } + else + *q++ = *p++; + + // The rest of string using SIMD + static const char dquote[16] = { '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"' }; + static const char bslash[16] = { '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\' }; + static const char space[16] = { 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19 }; + const __m128i dq = _mm_loadu_si128(reinterpret_cast(&dquote[0])); + const __m128i bs = _mm_loadu_si128(reinterpret_cast(&bslash[0])); + const __m128i sp = _mm_loadu_si128(reinterpret_cast(&space[0])); + + for (;; p += 16, q += 16) { + const __m128i s = _mm_load_si128(reinterpret_cast(p)); + const __m128i t1 = _mm_cmpeq_epi8(s, dq); + const __m128i t2 = _mm_cmpeq_epi8(s, bs); + const __m128i t3 = _mm_cmpeq_epi8(_mm_max_epu8(s, sp), sp); // s < 0x20 <=> max(s, 0x19) == 0x19 + const __m128i x = _mm_or_si128(_mm_or_si128(t1, t2), t3); + unsigned short r = static_cast(_mm_movemask_epi8(x)); + if (RAPIDJSON_UNLIKELY(r != 0)) { // some of characters is escaped + size_t length; +#ifdef _MSC_VER // Find the index of first escaped + unsigned long offset; + _BitScanForward(&offset, r); + length = offset; +#else + length = static_cast(__builtin_ffs(r) - 1); +#endif + for (const char* pend = p + length; p != pend; ) + *q++ = *p++; + break; + } + _mm_storeu_si128(reinterpret_cast<__m128i *>(q), s); + } + + is.src_ = p; + is.dst_ = q; + } + + // When read/write pointers are the same for insitu stream, just skip unescaped characters + static RAPIDJSON_FORCEINLINE void SkipUnescapedString(InsituStringStream& is) { + RAPIDJSON_ASSERT(is.src_ == is.dst_); + char* p = is.src_; + + // Scan one by one until alignment (unaligned load may cross page boundary and cause crash) + const char* nextAligned = reinterpret_cast((reinterpret_cast(p) + 15) & static_cast(~15)); + for (; p != nextAligned; p++) + if (RAPIDJSON_UNLIKELY(*p == '\"') || RAPIDJSON_UNLIKELY(*p == '\\') || RAPIDJSON_UNLIKELY(static_cast(*p) < 0x20)) { + is.src_ = is.dst_ = p; + return; + } + + // The rest of string using SIMD + static const char dquote[16] = { '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"' }; + static const char bslash[16] = { '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\' }; + static const char space[16] = { 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19 }; + const __m128i dq = _mm_loadu_si128(reinterpret_cast(&dquote[0])); + const __m128i bs = _mm_loadu_si128(reinterpret_cast(&bslash[0])); + const __m128i sp = _mm_loadu_si128(reinterpret_cast(&space[0])); + + for (;; p += 16) { + const __m128i s = _mm_load_si128(reinterpret_cast(p)); + const __m128i t1 = _mm_cmpeq_epi8(s, dq); + const __m128i t2 = _mm_cmpeq_epi8(s, bs); + const __m128i t3 = _mm_cmpeq_epi8(_mm_max_epu8(s, sp), sp); // s < 0x20 <=> max(s, 0x19) == 0x19 + const __m128i x = _mm_or_si128(_mm_or_si128(t1, t2), t3); + unsigned short r = static_cast(_mm_movemask_epi8(x)); + if (RAPIDJSON_UNLIKELY(r != 0)) { // some of characters is escaped + size_t length; +#ifdef _MSC_VER // Find the index of first escaped + unsigned long offset; + _BitScanForward(&offset, r); + length = offset; +#else + length = static_cast(__builtin_ffs(r) - 1); +#endif + p += length; + break; + } + } + + is.src_ = is.dst_ = p; + } +#endif + + template + class NumberStream; + + template + class NumberStream { + public: + typedef typename InputStream::Ch Ch; + + NumberStream(GenericReader& reader, InputStream& s) : is(s) { (void)reader; } + ~NumberStream() {} + + RAPIDJSON_FORCEINLINE Ch Peek() const { return is.Peek(); } + RAPIDJSON_FORCEINLINE Ch TakePush() { return is.Take(); } + RAPIDJSON_FORCEINLINE Ch Take() { return is.Take(); } + RAPIDJSON_FORCEINLINE void Push(char) {} + + size_t Tell() { return is.Tell(); } + size_t Length() { return 0; } + const char* Pop() { return 0; } + + protected: + NumberStream& operator=(const NumberStream&); + + InputStream& is; + }; + + template + class NumberStream : public NumberStream { + typedef NumberStream Base; + public: + NumberStream(GenericReader& reader, InputStream& is) : Base(reader, is), stackStream(reader.stack_) {} + ~NumberStream() {} + + RAPIDJSON_FORCEINLINE Ch TakePush() { + stackStream.Put(static_cast(Base::is.Peek())); + return Base::is.Take(); + } + + RAPIDJSON_FORCEINLINE void Push(char c) { + stackStream.Put(c); + } + + size_t Length() { return stackStream.Length(); } + + const char* Pop() { + stackStream.Put('\0'); + return stackStream.Pop(); + } + + private: + StackStream stackStream; + }; + + template + class NumberStream : public NumberStream { + typedef NumberStream Base; + public: + NumberStream(GenericReader& reader, InputStream& is) : Base(reader, is) {} + ~NumberStream() {} + + RAPIDJSON_FORCEINLINE Ch Take() { return Base::TakePush(); } + }; + + template + void ParseNumber(InputStream& is, Handler& handler) { + internal::StreamLocalCopy copy(is); + NumberStream s(*this, copy.s); + + size_t startOffset = s.Tell(); + double d = 0.0; + bool useNanOrInf = false; + + // Parse minus + bool minus = Consume(s, '-'); + + // Parse int: zero / ( digit1-9 *DIGIT ) + unsigned i = 0; + uint64_t i64 = 0; + bool use64bit = false; + int significandDigit = 0; + if (RAPIDJSON_UNLIKELY(s.Peek() == '0')) { + i = 0; + s.TakePush(); + } + else if (RAPIDJSON_LIKELY(s.Peek() >= '1' && s.Peek() <= '9')) { + i = static_cast(s.TakePush() - '0'); + + if (minus) + while (RAPIDJSON_LIKELY(s.Peek() >= '0' && s.Peek() <= '9')) { + if (RAPIDJSON_UNLIKELY(i >= 214748364)) { // 2^31 = 2147483648 + if (RAPIDJSON_LIKELY(i != 214748364 || s.Peek() > '8')) { + i64 = i; + use64bit = true; + break; + } + } + i = i * 10 + static_cast(s.TakePush() - '0'); + significandDigit++; + } + else + while (RAPIDJSON_LIKELY(s.Peek() >= '0' && s.Peek() <= '9')) { + if (RAPIDJSON_UNLIKELY(i >= 429496729)) { // 2^32 - 1 = 4294967295 + if (RAPIDJSON_LIKELY(i != 429496729 || s.Peek() > '5')) { + i64 = i; + use64bit = true; + break; + } + } + i = i * 10 + static_cast(s.TakePush() - '0'); + significandDigit++; + } + } + // Parse NaN or Infinity here + else if ((parseFlags & kParseNanAndInfFlag) && RAPIDJSON_LIKELY((s.Peek() == 'I' || s.Peek() == 'N'))) { + useNanOrInf = true; + if (RAPIDJSON_LIKELY(Consume(s, 'N') && Consume(s, 'a') && Consume(s, 'N'))) { + d = std::numeric_limits::quiet_NaN(); + } + else if (RAPIDJSON_LIKELY(Consume(s, 'I') && Consume(s, 'n') && Consume(s, 'f'))) { + d = (minus ? -std::numeric_limits::infinity() : std::numeric_limits::infinity()); + if (RAPIDJSON_UNLIKELY(s.Peek() == 'i' && !(Consume(s, 'i') && Consume(s, 'n') + && Consume(s, 'i') && Consume(s, 't') && Consume(s, 'y')))) + RAPIDJSON_PARSE_ERROR(kParseErrorValueInvalid, s.Tell()); + } + else + RAPIDJSON_PARSE_ERROR(kParseErrorValueInvalid, s.Tell()); + } + else + RAPIDJSON_PARSE_ERROR(kParseErrorValueInvalid, s.Tell()); + + // Parse 64bit int + bool useDouble = false; + if (use64bit) { + if (minus) + while (RAPIDJSON_LIKELY(s.Peek() >= '0' && s.Peek() <= '9')) { + if (RAPIDJSON_UNLIKELY(i64 >= RAPIDJSON_UINT64_C2(0x0CCCCCCC, 0xCCCCCCCC))) // 2^63 = 9223372036854775808 + if (RAPIDJSON_LIKELY(i64 != RAPIDJSON_UINT64_C2(0x0CCCCCCC, 0xCCCCCCCC) || s.Peek() > '8')) { + d = static_cast(i64); + useDouble = true; + break; + } + i64 = i64 * 10 + static_cast(s.TakePush() - '0'); + significandDigit++; + } + else + while (RAPIDJSON_LIKELY(s.Peek() >= '0' && s.Peek() <= '9')) { + if (RAPIDJSON_UNLIKELY(i64 >= RAPIDJSON_UINT64_C2(0x19999999, 0x99999999))) // 2^64 - 1 = 18446744073709551615 + if (RAPIDJSON_LIKELY(i64 != RAPIDJSON_UINT64_C2(0x19999999, 0x99999999) || s.Peek() > '5')) { + d = static_cast(i64); + useDouble = true; + break; + } + i64 = i64 * 10 + static_cast(s.TakePush() - '0'); + significandDigit++; + } + } + + // Force double for big integer + if (useDouble) { + while (RAPIDJSON_LIKELY(s.Peek() >= '0' && s.Peek() <= '9')) { + if (RAPIDJSON_UNLIKELY(d >= 1.7976931348623157e307)) // DBL_MAX / 10.0 + RAPIDJSON_PARSE_ERROR(kParseErrorNumberTooBig, startOffset); + d = d * 10 + (s.TakePush() - '0'); + } + } + + // Parse frac = decimal-point 1*DIGIT + int expFrac = 0; + size_t decimalPosition; + if (Consume(s, '.')) { + decimalPosition = s.Length(); + + if (RAPIDJSON_UNLIKELY(!(s.Peek() >= '0' && s.Peek() <= '9'))) + RAPIDJSON_PARSE_ERROR(kParseErrorNumberMissFraction, s.Tell()); + + if (!useDouble) { +#if RAPIDJSON_64BIT + // Use i64 to store significand in 64-bit architecture + if (!use64bit) + i64 = i; + + while (RAPIDJSON_LIKELY(s.Peek() >= '0' && s.Peek() <= '9')) { + if (i64 > RAPIDJSON_UINT64_C2(0x1FFFFF, 0xFFFFFFFF)) // 2^53 - 1 for fast path + break; + else { + i64 = i64 * 10 + static_cast(s.TakePush() - '0'); + --expFrac; + if (i64 != 0) + significandDigit++; + } + } + + d = static_cast(i64); +#else + // Use double to store significand in 32-bit architecture + d = static_cast(use64bit ? i64 : i); +#endif + useDouble = true; + } + + while (RAPIDJSON_LIKELY(s.Peek() >= '0' && s.Peek() <= '9')) { + if (significandDigit < 17) { + d = d * 10.0 + (s.TakePush() - '0'); + --expFrac; + if (RAPIDJSON_LIKELY(d > 0.0)) + significandDigit++; + } + else + s.TakePush(); + } + } + else + decimalPosition = s.Length(); // decimal position at the end of integer. + + // Parse exp = e [ minus / plus ] 1*DIGIT + int exp = 0; + if (Consume(s, 'e') || Consume(s, 'E')) { + if (!useDouble) { + d = static_cast(use64bit ? i64 : i); + useDouble = true; + } + + bool expMinus = false; + if (Consume(s, '+')) + ; + else if (Consume(s, '-')) + expMinus = true; + + if (RAPIDJSON_LIKELY(s.Peek() >= '0' && s.Peek() <= '9')) { + exp = static_cast(s.Take() - '0'); + if (expMinus) { + while (RAPIDJSON_LIKELY(s.Peek() >= '0' && s.Peek() <= '9')) { + exp = exp * 10 + static_cast(s.Take() - '0'); + if (exp >= 214748364) { // Issue #313: prevent overflow exponent + while (RAPIDJSON_UNLIKELY(s.Peek() >= '0' && s.Peek() <= '9')) // Consume the rest of exponent + s.Take(); + } + } + } + else { // positive exp + int maxExp = 308 - expFrac; + while (RAPIDJSON_LIKELY(s.Peek() >= '0' && s.Peek() <= '9')) { + exp = exp * 10 + static_cast(s.Take() - '0'); + if (RAPIDJSON_UNLIKELY(exp > maxExp)) + RAPIDJSON_PARSE_ERROR(kParseErrorNumberTooBig, startOffset); + } + } + } + else + RAPIDJSON_PARSE_ERROR(kParseErrorNumberMissExponent, s.Tell()); + + if (expMinus) + exp = -exp; + } + + // Finish parsing, call event according to the type of number. + bool cont = true; + + if (parseFlags & kParseNumbersAsStringsFlag) { + if (parseFlags & kParseInsituFlag) { + s.Pop(); // Pop stack no matter if it will be used or not. + typename InputStream::Ch* head = is.PutBegin(); + const size_t length = s.Tell() - startOffset; + RAPIDJSON_ASSERT(length <= 0xFFFFFFFF); + // unable to insert the \0 character here, it will erase the comma after this number + const typename TargetEncoding::Ch* const str = reinterpret_cast(head); + cont = handler.RawNumber(str, SizeType(length), false); + } + else { + SizeType numCharsToCopy = static_cast(s.Length()); + StringStream srcStream(s.Pop()); + StackStream dstStream(stack_); + while (numCharsToCopy--) { + Transcoder, TargetEncoding>::Transcode(srcStream, dstStream); + } + dstStream.Put('\0'); + const typename TargetEncoding::Ch* str = dstStream.Pop(); + const SizeType length = static_cast(dstStream.Length()) - 1; + cont = handler.RawNumber(str, SizeType(length), true); + } + } + else { + size_t length = s.Length(); + const char* decimal = s.Pop(); // Pop stack no matter if it will be used or not. + + if (useDouble) { + int p = exp + expFrac; + if (parseFlags & kParseFullPrecisionFlag) + d = internal::StrtodFullPrecision(d, p, decimal, length, decimalPosition, exp); + else + d = internal::StrtodNormalPrecision(d, p); + + cont = handler.Double(minus ? -d : d); + } + else if (useNanOrInf) { + cont = handler.Double(d); + } + else { + if (use64bit) { + if (minus) + cont = handler.Int64(static_cast(~i64 + 1)); + else + cont = handler.Uint64(i64); + } + else { + if (minus) + cont = handler.Int(static_cast(~i + 1)); + else + cont = handler.Uint(i); + } + } + } + if (RAPIDJSON_UNLIKELY(!cont)) + RAPIDJSON_PARSE_ERROR(kParseErrorTermination, startOffset); + } + + // Parse any JSON value + template + void ParseValue(InputStream& is, Handler& handler) { + switch (is.Peek()) { + case 'n': ParseNull (is, handler); break; + case 't': ParseTrue (is, handler); break; + case 'f': ParseFalse (is, handler); break; + case '"': ParseString(is, handler); break; + case '{': ParseObject(is, handler); break; + case '[': ParseArray (is, handler); break; + default : + ParseNumber(is, handler); + break; + + } + } + + // Iterative Parsing + + // States + enum IterativeParsingState { + IterativeParsingStartState = 0, + IterativeParsingFinishState, + IterativeParsingErrorState, + + // Object states + IterativeParsingObjectInitialState, + IterativeParsingMemberKeyState, + IterativeParsingKeyValueDelimiterState, + IterativeParsingMemberValueState, + IterativeParsingMemberDelimiterState, + IterativeParsingObjectFinishState, + + // Array states + IterativeParsingArrayInitialState, + IterativeParsingElementState, + IterativeParsingElementDelimiterState, + IterativeParsingArrayFinishState, + + // Single value state + IterativeParsingValueState + }; + + enum { cIterativeParsingStateCount = IterativeParsingValueState + 1 }; + + // Tokens + enum Token { + LeftBracketToken = 0, + RightBracketToken, + + LeftCurlyBracketToken, + RightCurlyBracketToken, + + CommaToken, + ColonToken, + + StringToken, + FalseToken, + TrueToken, + NullToken, + NumberToken, + + kTokenCount + }; + + RAPIDJSON_FORCEINLINE Token Tokenize(Ch c) { + +//!@cond RAPIDJSON_HIDDEN_FROM_DOXYGEN +#define N NumberToken +#define N16 N,N,N,N,N,N,N,N,N,N,N,N,N,N,N,N + // Maps from ASCII to Token + static const unsigned char tokenMap[256] = { + N16, // 00~0F + N16, // 10~1F + N, N, StringToken, N, N, N, N, N, N, N, N, N, CommaToken, N, N, N, // 20~2F + N, N, N, N, N, N, N, N, N, N, ColonToken, N, N, N, N, N, // 30~3F + N16, // 40~4F + N, N, N, N, N, N, N, N, N, N, N, LeftBracketToken, N, RightBracketToken, N, N, // 50~5F + N, N, N, N, N, N, FalseToken, N, N, N, N, N, N, N, NullToken, N, // 60~6F + N, N, N, N, TrueToken, N, N, N, N, N, N, LeftCurlyBracketToken, N, RightCurlyBracketToken, N, N, // 70~7F + N16, N16, N16, N16, N16, N16, N16, N16 // 80~FF + }; +#undef N +#undef N16 +//!@endcond + + if (sizeof(Ch) == 1 || static_cast(c) < 256) + return static_cast(tokenMap[static_cast(c)]); + else + return NumberToken; + } + + RAPIDJSON_FORCEINLINE IterativeParsingState Predict(IterativeParsingState state, Token token) { + // current state x one lookahead token -> new state + static const char G[cIterativeParsingStateCount][kTokenCount] = { + // Start + { + IterativeParsingArrayInitialState, // Left bracket + IterativeParsingErrorState, // Right bracket + IterativeParsingObjectInitialState, // Left curly bracket + IterativeParsingErrorState, // Right curly bracket + IterativeParsingErrorState, // Comma + IterativeParsingErrorState, // Colon + IterativeParsingValueState, // String + IterativeParsingValueState, // False + IterativeParsingValueState, // True + IterativeParsingValueState, // Null + IterativeParsingValueState // Number + }, + // Finish(sink state) + { + IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, + IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, + IterativeParsingErrorState + }, + // Error(sink state) + { + IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, + IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, + IterativeParsingErrorState + }, + // ObjectInitial + { + IterativeParsingErrorState, // Left bracket + IterativeParsingErrorState, // Right bracket + IterativeParsingErrorState, // Left curly bracket + IterativeParsingObjectFinishState, // Right curly bracket + IterativeParsingErrorState, // Comma + IterativeParsingErrorState, // Colon + IterativeParsingMemberKeyState, // String + IterativeParsingErrorState, // False + IterativeParsingErrorState, // True + IterativeParsingErrorState, // Null + IterativeParsingErrorState // Number + }, + // MemberKey + { + IterativeParsingErrorState, // Left bracket + IterativeParsingErrorState, // Right bracket + IterativeParsingErrorState, // Left curly bracket + IterativeParsingErrorState, // Right curly bracket + IterativeParsingErrorState, // Comma + IterativeParsingKeyValueDelimiterState, // Colon + IterativeParsingErrorState, // String + IterativeParsingErrorState, // False + IterativeParsingErrorState, // True + IterativeParsingErrorState, // Null + IterativeParsingErrorState // Number + }, + // KeyValueDelimiter + { + IterativeParsingArrayInitialState, // Left bracket(push MemberValue state) + IterativeParsingErrorState, // Right bracket + IterativeParsingObjectInitialState, // Left curly bracket(push MemberValue state) + IterativeParsingErrorState, // Right curly bracket + IterativeParsingErrorState, // Comma + IterativeParsingErrorState, // Colon + IterativeParsingMemberValueState, // String + IterativeParsingMemberValueState, // False + IterativeParsingMemberValueState, // True + IterativeParsingMemberValueState, // Null + IterativeParsingMemberValueState // Number + }, + // MemberValue + { + IterativeParsingErrorState, // Left bracket + IterativeParsingErrorState, // Right bracket + IterativeParsingErrorState, // Left curly bracket + IterativeParsingObjectFinishState, // Right curly bracket + IterativeParsingMemberDelimiterState, // Comma + IterativeParsingErrorState, // Colon + IterativeParsingErrorState, // String + IterativeParsingErrorState, // False + IterativeParsingErrorState, // True + IterativeParsingErrorState, // Null + IterativeParsingErrorState // Number + }, + // MemberDelimiter + { + IterativeParsingErrorState, // Left bracket + IterativeParsingErrorState, // Right bracket + IterativeParsingErrorState, // Left curly bracket + IterativeParsingObjectFinishState, // Right curly bracket + IterativeParsingErrorState, // Comma + IterativeParsingErrorState, // Colon + IterativeParsingMemberKeyState, // String + IterativeParsingErrorState, // False + IterativeParsingErrorState, // True + IterativeParsingErrorState, // Null + IterativeParsingErrorState // Number + }, + // ObjectFinish(sink state) + { + IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, + IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, + IterativeParsingErrorState + }, + // ArrayInitial + { + IterativeParsingArrayInitialState, // Left bracket(push Element state) + IterativeParsingArrayFinishState, // Right bracket + IterativeParsingObjectInitialState, // Left curly bracket(push Element state) + IterativeParsingErrorState, // Right curly bracket + IterativeParsingErrorState, // Comma + IterativeParsingErrorState, // Colon + IterativeParsingElementState, // String + IterativeParsingElementState, // False + IterativeParsingElementState, // True + IterativeParsingElementState, // Null + IterativeParsingElementState // Number + }, + // Element + { + IterativeParsingErrorState, // Left bracket + IterativeParsingArrayFinishState, // Right bracket + IterativeParsingErrorState, // Left curly bracket + IterativeParsingErrorState, // Right curly bracket + IterativeParsingElementDelimiterState, // Comma + IterativeParsingErrorState, // Colon + IterativeParsingErrorState, // String + IterativeParsingErrorState, // False + IterativeParsingErrorState, // True + IterativeParsingErrorState, // Null + IterativeParsingErrorState // Number + }, + // ElementDelimiter + { + IterativeParsingArrayInitialState, // Left bracket(push Element state) + IterativeParsingArrayFinishState, // Right bracket + IterativeParsingObjectInitialState, // Left curly bracket(push Element state) + IterativeParsingErrorState, // Right curly bracket + IterativeParsingErrorState, // Comma + IterativeParsingErrorState, // Colon + IterativeParsingElementState, // String + IterativeParsingElementState, // False + IterativeParsingElementState, // True + IterativeParsingElementState, // Null + IterativeParsingElementState // Number + }, + // ArrayFinish(sink state) + { + IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, + IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, + IterativeParsingErrorState + }, + // Single Value (sink state) + { + IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, + IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, IterativeParsingErrorState, + IterativeParsingErrorState + } + }; // End of G + + return static_cast(G[state][token]); + } + + // Make an advance in the token stream and state based on the candidate destination state which was returned by Transit(). + // May return a new state on state pop. + template + RAPIDJSON_FORCEINLINE IterativeParsingState Transit(IterativeParsingState src, Token token, IterativeParsingState dst, InputStream& is, Handler& handler) { + (void)token; + + switch (dst) { + case IterativeParsingErrorState: + return dst; + + case IterativeParsingObjectInitialState: + case IterativeParsingArrayInitialState: + { + // Push the state(Element or MemeberValue) if we are nested in another array or value of member. + // In this way we can get the correct state on ObjectFinish or ArrayFinish by frame pop. + IterativeParsingState n = src; + if (src == IterativeParsingArrayInitialState || src == IterativeParsingElementDelimiterState) + n = IterativeParsingElementState; + else if (src == IterativeParsingKeyValueDelimiterState) + n = IterativeParsingMemberValueState; + // Push current state. + *stack_.template Push(1) = n; + // Initialize and push the member/element count. + *stack_.template Push(1) = 0; + // Call handler + bool hr = (dst == IterativeParsingObjectInitialState) ? handler.StartObject() : handler.StartArray(); + // On handler short circuits the parsing. + if (!hr) { + RAPIDJSON_PARSE_ERROR_NORETURN(kParseErrorTermination, is.Tell()); + return IterativeParsingErrorState; + } + else { + is.Take(); + return dst; + } + } + + case IterativeParsingMemberKeyState: + ParseString(is, handler, true); + if (HasParseError()) + return IterativeParsingErrorState; + else + return dst; + + case IterativeParsingKeyValueDelimiterState: + RAPIDJSON_ASSERT(token == ColonToken); + is.Take(); + return dst; + + case IterativeParsingMemberValueState: + // Must be non-compound value. Or it would be ObjectInitial or ArrayInitial state. + ParseValue(is, handler); + if (HasParseError()) { + return IterativeParsingErrorState; + } + return dst; + + case IterativeParsingElementState: + // Must be non-compound value. Or it would be ObjectInitial or ArrayInitial state. + ParseValue(is, handler); + if (HasParseError()) { + return IterativeParsingErrorState; + } + return dst; + + case IterativeParsingMemberDelimiterState: + case IterativeParsingElementDelimiterState: + is.Take(); + // Update member/element count. + *stack_.template Top() = *stack_.template Top() + 1; + return dst; + + case IterativeParsingObjectFinishState: + { + // Transit from delimiter is only allowed when trailing commas are enabled + if (!(parseFlags & kParseTrailingCommasFlag) && src == IterativeParsingMemberDelimiterState) { + RAPIDJSON_PARSE_ERROR_NORETURN(kParseErrorObjectMissName, is.Tell()); + return IterativeParsingErrorState; + } + // Get member count. + SizeType c = *stack_.template Pop(1); + // If the object is not empty, count the last member. + if (src == IterativeParsingMemberValueState) + ++c; + // Restore the state. + IterativeParsingState n = static_cast(*stack_.template Pop(1)); + // Transit to Finish state if this is the topmost scope. + if (n == IterativeParsingStartState) + n = IterativeParsingFinishState; + // Call handler + bool hr = handler.EndObject(c); + // On handler short circuits the parsing. + if (!hr) { + RAPIDJSON_PARSE_ERROR_NORETURN(kParseErrorTermination, is.Tell()); + return IterativeParsingErrorState; + } + else { + is.Take(); + return n; + } + } + + case IterativeParsingArrayFinishState: + { + // Transit from delimiter is only allowed when trailing commas are enabled + if (!(parseFlags & kParseTrailingCommasFlag) && src == IterativeParsingElementDelimiterState) { + RAPIDJSON_PARSE_ERROR_NORETURN(kParseErrorValueInvalid, is.Tell()); + return IterativeParsingErrorState; + } + // Get element count. + SizeType c = *stack_.template Pop(1); + // If the array is not empty, count the last element. + if (src == IterativeParsingElementState) + ++c; + // Restore the state. + IterativeParsingState n = static_cast(*stack_.template Pop(1)); + // Transit to Finish state if this is the topmost scope. + if (n == IterativeParsingStartState) + n = IterativeParsingFinishState; + // Call handler + bool hr = handler.EndArray(c); + // On handler short circuits the parsing. + if (!hr) { + RAPIDJSON_PARSE_ERROR_NORETURN(kParseErrorTermination, is.Tell()); + return IterativeParsingErrorState; + } + else { + is.Take(); + return n; + } + } + + default: + // This branch is for IterativeParsingValueState actually. + // Use `default:` rather than + // `case IterativeParsingValueState:` is for code coverage. + + // The IterativeParsingStartState is not enumerated in this switch-case. + // It is impossible for that case. And it can be caught by following assertion. + + // The IterativeParsingFinishState is not enumerated in this switch-case either. + // It is a "derivative" state which cannot triggered from Predict() directly. + // Therefore it cannot happen here. And it can be caught by following assertion. + RAPIDJSON_ASSERT(dst == IterativeParsingValueState); + + // Must be non-compound value. Or it would be ObjectInitial or ArrayInitial state. + ParseValue(is, handler); + if (HasParseError()) { + return IterativeParsingErrorState; + } + return IterativeParsingFinishState; + } + } + + template + void HandleError(IterativeParsingState src, InputStream& is) { + if (HasParseError()) { + // Error flag has been set. + return; + } + + switch (src) { + case IterativeParsingStartState: RAPIDJSON_PARSE_ERROR(kParseErrorDocumentEmpty, is.Tell()); return; + case IterativeParsingFinishState: RAPIDJSON_PARSE_ERROR(kParseErrorDocumentRootNotSingular, is.Tell()); return; + case IterativeParsingObjectInitialState: + case IterativeParsingMemberDelimiterState: RAPIDJSON_PARSE_ERROR(kParseErrorObjectMissName, is.Tell()); return; + case IterativeParsingMemberKeyState: RAPIDJSON_PARSE_ERROR(kParseErrorObjectMissColon, is.Tell()); return; + case IterativeParsingMemberValueState: RAPIDJSON_PARSE_ERROR(kParseErrorObjectMissCommaOrCurlyBracket, is.Tell()); return; + case IterativeParsingKeyValueDelimiterState: + case IterativeParsingArrayInitialState: + case IterativeParsingElementDelimiterState: RAPIDJSON_PARSE_ERROR(kParseErrorValueInvalid, is.Tell()); return; + default: RAPIDJSON_ASSERT(src == IterativeParsingElementState); RAPIDJSON_PARSE_ERROR(kParseErrorArrayMissCommaOrSquareBracket, is.Tell()); return; + } + } + + template + ParseResult IterativeParse(InputStream& is, Handler& handler) { + parseResult_.Clear(); + ClearStackOnExit scope(*this); + IterativeParsingState state = IterativeParsingStartState; + + SkipWhitespaceAndComments(is); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN(parseResult_); + while (is.Peek() != '\0') { + Token t = Tokenize(is.Peek()); + IterativeParsingState n = Predict(state, t); + IterativeParsingState d = Transit(state, t, n, is, handler); + + if (d == IterativeParsingErrorState) { + HandleError(state, is); + break; + } + + state = d; + + // Do not further consume streams if a root JSON has been parsed. + if ((parseFlags & kParseStopWhenDoneFlag) && state == IterativeParsingFinishState) + break; + + SkipWhitespaceAndComments(is); + RAPIDJSON_PARSE_ERROR_EARLY_RETURN(parseResult_); + } + + // Handle the end of file. + if (state != IterativeParsingFinishState) + HandleError(state, is); + + return parseResult_; + } + + static const size_t kDefaultStackCapacity = 256; //!< Default stack capacity in bytes for storing a single decoded string. + internal::Stack stack_; //!< A stack for storing decoded string temporarily during non-destructive parsing. + ParseResult parseResult_; +}; // class GenericReader + +//! Reader with UTF8 encoding and default allocator. +typedef GenericReader, UTF8<> > Reader; + +RAPIDJSON_NAMESPACE_END + +#ifdef __clang__ +RAPIDJSON_DIAG_POP +#endif + + +#ifdef __GNUC__ +RAPIDJSON_DIAG_POP +#endif + +#ifdef _MSC_VER +RAPIDJSON_DIAG_POP +#endif + +#endif // RAPIDJSON_READER_H_ diff --git a/contrib/rapidjson/1.1.0/include/rapidjson/schema.h b/contrib/rapidjson/1.1.0/include/rapidjson/schema.h new file mode 100644 index 0000000..b182aa2 --- /dev/null +++ b/contrib/rapidjson/1.1.0/include/rapidjson/schema.h @@ -0,0 +1,2006 @@ +// Tencent is pleased to support the open source community by making RapidJSON available-> +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip-> All rights reserved-> +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License-> You may obtain a copy of the License at +// +// http://opensource->org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied-> See the License for the +// specific language governing permissions and limitations under the License-> + +#ifndef RAPIDJSON_SCHEMA_H_ +#define RAPIDJSON_SCHEMA_H_ + +#include "document.h" +#include "pointer.h" +#include // abs, floor + +#if !defined(RAPIDJSON_SCHEMA_USE_INTERNALREGEX) +#define RAPIDJSON_SCHEMA_USE_INTERNALREGEX 1 +#else +#define RAPIDJSON_SCHEMA_USE_INTERNALREGEX 0 +#endif + +#if !RAPIDJSON_SCHEMA_USE_INTERNALREGEX && !defined(RAPIDJSON_SCHEMA_USE_STDREGEX) && (__cplusplus >=201103L || (defined(_MSC_VER) && _MSC_VER >= 1800)) +#define RAPIDJSON_SCHEMA_USE_STDREGEX 1 +#else +#define RAPIDJSON_SCHEMA_USE_STDREGEX 0 +#endif + +#if RAPIDJSON_SCHEMA_USE_INTERNALREGEX +#include "internal/regex.h" +#elif RAPIDJSON_SCHEMA_USE_STDREGEX +#include +#endif + +#if RAPIDJSON_SCHEMA_USE_INTERNALREGEX || RAPIDJSON_SCHEMA_USE_STDREGEX +#define RAPIDJSON_SCHEMA_HAS_REGEX 1 +#else +#define RAPIDJSON_SCHEMA_HAS_REGEX 0 +#endif + +#ifndef RAPIDJSON_SCHEMA_VERBOSE +#define RAPIDJSON_SCHEMA_VERBOSE 0 +#endif + +#if RAPIDJSON_SCHEMA_VERBOSE +#include "stringbuffer.h" +#endif + +RAPIDJSON_DIAG_PUSH + +#if defined(__GNUC__) +RAPIDJSON_DIAG_OFF(effc++) +#endif + +#ifdef __clang__ +RAPIDJSON_DIAG_OFF(weak-vtables) +RAPIDJSON_DIAG_OFF(exit-time-destructors) +RAPIDJSON_DIAG_OFF(c++98-compat-pedantic) +RAPIDJSON_DIAG_OFF(variadic-macros) +#endif + +#ifdef _MSC_VER +RAPIDJSON_DIAG_OFF(4512) // assignment operator could not be generated +#endif + +RAPIDJSON_NAMESPACE_BEGIN + +/////////////////////////////////////////////////////////////////////////////// +// Verbose Utilities + +#if RAPIDJSON_SCHEMA_VERBOSE + +namespace internal { + +inline void PrintInvalidKeyword(const char* keyword) { + printf("Fail keyword: %s\n", keyword); +} + +inline void PrintInvalidKeyword(const wchar_t* keyword) { + wprintf(L"Fail keyword: %ls\n", keyword); +} + +inline void PrintInvalidDocument(const char* document) { + printf("Fail document: %s\n\n", document); +} + +inline void PrintInvalidDocument(const wchar_t* document) { + wprintf(L"Fail document: %ls\n\n", document); +} + +inline void PrintValidatorPointers(unsigned depth, const char* s, const char* d) { + printf("S: %*s%s\nD: %*s%s\n\n", depth * 4, " ", s, depth * 4, " ", d); +} + +inline void PrintValidatorPointers(unsigned depth, const wchar_t* s, const wchar_t* d) { + wprintf(L"S: %*ls%ls\nD: %*ls%ls\n\n", depth * 4, L" ", s, depth * 4, L" ", d); +} + +} // namespace internal + +#endif // RAPIDJSON_SCHEMA_VERBOSE + +/////////////////////////////////////////////////////////////////////////////// +// RAPIDJSON_INVALID_KEYWORD_RETURN + +#if RAPIDJSON_SCHEMA_VERBOSE +#define RAPIDJSON_INVALID_KEYWORD_VERBOSE(keyword) internal::PrintInvalidKeyword(keyword) +#else +#define RAPIDJSON_INVALID_KEYWORD_VERBOSE(keyword) +#endif + +#define RAPIDJSON_INVALID_KEYWORD_RETURN(keyword)\ +RAPIDJSON_MULTILINEMACRO_BEGIN\ + context.invalidKeyword = keyword.GetString();\ + RAPIDJSON_INVALID_KEYWORD_VERBOSE(keyword.GetString());\ + return false;\ +RAPIDJSON_MULTILINEMACRO_END + +/////////////////////////////////////////////////////////////////////////////// +// Forward declarations + +template +class GenericSchemaDocument; + +namespace internal { + +template +class Schema; + +/////////////////////////////////////////////////////////////////////////////// +// ISchemaValidator + +class ISchemaValidator { +public: + virtual ~ISchemaValidator() {} + virtual bool IsValid() const = 0; +}; + +/////////////////////////////////////////////////////////////////////////////// +// ISchemaStateFactory + +template +class ISchemaStateFactory { +public: + virtual ~ISchemaStateFactory() {} + virtual ISchemaValidator* CreateSchemaValidator(const SchemaType&) = 0; + virtual void DestroySchemaValidator(ISchemaValidator* validator) = 0; + virtual void* CreateHasher() = 0; + virtual uint64_t GetHashCode(void* hasher) = 0; + virtual void DestroryHasher(void* hasher) = 0; + virtual void* MallocState(size_t size) = 0; + virtual void FreeState(void* p) = 0; +}; + +/////////////////////////////////////////////////////////////////////////////// +// Hasher + +// For comparison of compound value +template +class Hasher { +public: + typedef typename Encoding::Ch Ch; + + Hasher(Allocator* allocator = 0, size_t stackCapacity = kDefaultSize) : stack_(allocator, stackCapacity) {} + + bool Null() { return WriteType(kNullType); } + bool Bool(bool b) { return WriteType(b ? kTrueType : kFalseType); } + bool Int(int i) { Number n; n.u.i = i; n.d = static_cast(i); return WriteNumber(n); } + bool Uint(unsigned u) { Number n; n.u.u = u; n.d = static_cast(u); return WriteNumber(n); } + bool Int64(int64_t i) { Number n; n.u.i = i; n.d = static_cast(i); return WriteNumber(n); } + bool Uint64(uint64_t u) { Number n; n.u.u = u; n.d = static_cast(u); return WriteNumber(n); } + bool Double(double d) { + Number n; + if (d < 0) n.u.i = static_cast(d); + else n.u.u = static_cast(d); + n.d = d; + return WriteNumber(n); + } + + bool RawNumber(const Ch* str, SizeType len, bool) { + WriteBuffer(kNumberType, str, len * sizeof(Ch)); + return true; + } + + bool String(const Ch* str, SizeType len, bool) { + WriteBuffer(kStringType, str, len * sizeof(Ch)); + return true; + } + + bool StartObject() { return true; } + bool Key(const Ch* str, SizeType len, bool copy) { return String(str, len, copy); } + bool EndObject(SizeType memberCount) { + uint64_t h = Hash(0, kObjectType); + uint64_t* kv = stack_.template Pop(memberCount * 2); + for (SizeType i = 0; i < memberCount; i++) + h ^= Hash(kv[i * 2], kv[i * 2 + 1]); // Use xor to achieve member order insensitive + *stack_.template Push() = h; + return true; + } + + bool StartArray() { return true; } + bool EndArray(SizeType elementCount) { + uint64_t h = Hash(0, kArrayType); + uint64_t* e = stack_.template Pop(elementCount); + for (SizeType i = 0; i < elementCount; i++) + h = Hash(h, e[i]); // Use hash to achieve element order sensitive + *stack_.template Push() = h; + return true; + } + + bool IsValid() const { return stack_.GetSize() == sizeof(uint64_t); } + + uint64_t GetHashCode() const { + RAPIDJSON_ASSERT(IsValid()); + return *stack_.template Top(); + } + +private: + static const size_t kDefaultSize = 256; + struct Number { + union U { + uint64_t u; + int64_t i; + }u; + double d; + }; + + bool WriteType(Type type) { return WriteBuffer(type, 0, 0); } + + bool WriteNumber(const Number& n) { return WriteBuffer(kNumberType, &n, sizeof(n)); } + + bool WriteBuffer(Type type, const void* data, size_t len) { + // FNV-1a from http://isthe.com/chongo/tech/comp/fnv/ + uint64_t h = Hash(RAPIDJSON_UINT64_C2(0x84222325, 0xcbf29ce4), type); + const unsigned char* d = static_cast(data); + for (size_t i = 0; i < len; i++) + h = Hash(h, d[i]); + *stack_.template Push() = h; + return true; + } + + static uint64_t Hash(uint64_t h, uint64_t d) { + static const uint64_t kPrime = RAPIDJSON_UINT64_C2(0x00000100, 0x000001b3); + h ^= d; + h *= kPrime; + return h; + } + + Stack stack_; +}; + +/////////////////////////////////////////////////////////////////////////////// +// SchemaValidationContext + +template +struct SchemaValidationContext { + typedef Schema SchemaType; + typedef ISchemaStateFactory SchemaValidatorFactoryType; + typedef typename SchemaType::ValueType ValueType; + typedef typename ValueType::Ch Ch; + + enum PatternValidatorType { + kPatternValidatorOnly, + kPatternValidatorWithProperty, + kPatternValidatorWithAdditionalProperty + }; + + SchemaValidationContext(SchemaValidatorFactoryType& f, const SchemaType* s) : + factory(f), + schema(s), + valueSchema(), + invalidKeyword(), + hasher(), + arrayElementHashCodes(), + validators(), + validatorCount(), + patternPropertiesValidators(), + patternPropertiesValidatorCount(), + patternPropertiesSchemas(), + patternPropertiesSchemaCount(), + valuePatternValidatorType(kPatternValidatorOnly), + propertyExist(), + inArray(false), + valueUniqueness(false), + arrayUniqueness(false) + { + } + + ~SchemaValidationContext() { + if (hasher) + factory.DestroryHasher(hasher); + if (validators) { + for (SizeType i = 0; i < validatorCount; i++) + factory.DestroySchemaValidator(validators[i]); + factory.FreeState(validators); + } + if (patternPropertiesValidators) { + for (SizeType i = 0; i < patternPropertiesValidatorCount; i++) + factory.DestroySchemaValidator(patternPropertiesValidators[i]); + factory.FreeState(patternPropertiesValidators); + } + if (patternPropertiesSchemas) + factory.FreeState(patternPropertiesSchemas); + if (propertyExist) + factory.FreeState(propertyExist); + } + + SchemaValidatorFactoryType& factory; + const SchemaType* schema; + const SchemaType* valueSchema; + const Ch* invalidKeyword; + void* hasher; // Only validator access + void* arrayElementHashCodes; // Only validator access this + ISchemaValidator** validators; + SizeType validatorCount; + ISchemaValidator** patternPropertiesValidators; + SizeType patternPropertiesValidatorCount; + const SchemaType** patternPropertiesSchemas; + SizeType patternPropertiesSchemaCount; + PatternValidatorType valuePatternValidatorType; + PatternValidatorType objectPatternValidatorType; + SizeType arrayElementIndex; + bool* propertyExist; + bool inArray; + bool valueUniqueness; + bool arrayUniqueness; +}; + +/////////////////////////////////////////////////////////////////////////////// +// Schema + +template +class Schema { +public: + typedef typename SchemaDocumentType::ValueType ValueType; + typedef typename SchemaDocumentType::AllocatorType AllocatorType; + typedef typename SchemaDocumentType::PointerType PointerType; + typedef typename ValueType::EncodingType EncodingType; + typedef typename EncodingType::Ch Ch; + typedef SchemaValidationContext Context; + typedef Schema SchemaType; + typedef GenericValue SValue; + friend class GenericSchemaDocument; + + Schema(SchemaDocumentType* schemaDocument, const PointerType& p, const ValueType& value, const ValueType& document, AllocatorType* allocator) : + allocator_(allocator), + enum_(), + enumCount_(), + not_(), + type_((1 << kTotalSchemaType) - 1), // typeless + validatorCount_(), + properties_(), + additionalPropertiesSchema_(), + patternProperties_(), + patternPropertyCount_(), + propertyCount_(), + minProperties_(), + maxProperties_(SizeType(~0)), + additionalProperties_(true), + hasDependencies_(), + hasRequired_(), + hasSchemaDependencies_(), + additionalItemsSchema_(), + itemsList_(), + itemsTuple_(), + itemsTupleCount_(), + minItems_(), + maxItems_(SizeType(~0)), + additionalItems_(true), + uniqueItems_(false), + pattern_(), + minLength_(0), + maxLength_(~SizeType(0)), + exclusiveMinimum_(false), + exclusiveMaximum_(false) + { + typedef typename SchemaDocumentType::ValueType ValueType; + typedef typename ValueType::ConstValueIterator ConstValueIterator; + typedef typename ValueType::ConstMemberIterator ConstMemberIterator; + + if (!value.IsObject()) + return; + + if (const ValueType* v = GetMember(value, GetTypeString())) { + type_ = 0; + if (v->IsString()) + AddType(*v); + else if (v->IsArray()) + for (ConstValueIterator itr = v->Begin(); itr != v->End(); ++itr) + AddType(*itr); + } + + if (const ValueType* v = GetMember(value, GetEnumString())) + if (v->IsArray() && v->Size() > 0) { + enum_ = static_cast(allocator_->Malloc(sizeof(uint64_t) * v->Size())); + for (ConstValueIterator itr = v->Begin(); itr != v->End(); ++itr) { + typedef Hasher > EnumHasherType; + char buffer[256 + 24]; + MemoryPoolAllocator<> hasherAllocator(buffer, sizeof(buffer)); + EnumHasherType h(&hasherAllocator, 256); + itr->Accept(h); + enum_[enumCount_++] = h.GetHashCode(); + } + } + + if (schemaDocument) { + AssignIfExist(allOf_, *schemaDocument, p, value, GetAllOfString(), document); + AssignIfExist(anyOf_, *schemaDocument, p, value, GetAnyOfString(), document); + AssignIfExist(oneOf_, *schemaDocument, p, value, GetOneOfString(), document); + } + + if (const ValueType* v = GetMember(value, GetNotString())) { + schemaDocument->CreateSchema(¬_, p.Append(GetNotString(), allocator_), *v, document); + notValidatorIndex_ = validatorCount_; + validatorCount_++; + } + + // Object + + const ValueType* properties = GetMember(value, GetPropertiesString()); + const ValueType* required = GetMember(value, GetRequiredString()); + const ValueType* dependencies = GetMember(value, GetDependenciesString()); + { + // Gather properties from properties/required/dependencies + SValue allProperties(kArrayType); + + if (properties && properties->IsObject()) + for (ConstMemberIterator itr = properties->MemberBegin(); itr != properties->MemberEnd(); ++itr) + AddUniqueElement(allProperties, itr->name); + + if (required && required->IsArray()) + for (ConstValueIterator itr = required->Begin(); itr != required->End(); ++itr) + if (itr->IsString()) + AddUniqueElement(allProperties, *itr); + + if (dependencies && dependencies->IsObject()) + for (ConstMemberIterator itr = dependencies->MemberBegin(); itr != dependencies->MemberEnd(); ++itr) { + AddUniqueElement(allProperties, itr->name); + if (itr->value.IsArray()) + for (ConstValueIterator i = itr->value.Begin(); i != itr->value.End(); ++i) + if (i->IsString()) + AddUniqueElement(allProperties, *i); + } + + if (allProperties.Size() > 0) { + propertyCount_ = allProperties.Size(); + properties_ = static_cast(allocator_->Malloc(sizeof(Property) * propertyCount_)); + for (SizeType i = 0; i < propertyCount_; i++) { + new (&properties_[i]) Property(); + properties_[i].name = allProperties[i]; + properties_[i].schema = GetTypeless(); + } + } + } + + if (properties && properties->IsObject()) { + PointerType q = p.Append(GetPropertiesString(), allocator_); + for (ConstMemberIterator itr = properties->MemberBegin(); itr != properties->MemberEnd(); ++itr) { + SizeType index; + if (FindPropertyIndex(itr->name, &index)) + schemaDocument->CreateSchema(&properties_[index].schema, q.Append(itr->name, allocator_), itr->value, document); + } + } + + if (const ValueType* v = GetMember(value, GetPatternPropertiesString())) { + PointerType q = p.Append(GetPatternPropertiesString(), allocator_); + patternProperties_ = static_cast(allocator_->Malloc(sizeof(PatternProperty) * v->MemberCount())); + patternPropertyCount_ = 0; + + for (ConstMemberIterator itr = v->MemberBegin(); itr != v->MemberEnd(); ++itr) { + new (&patternProperties_[patternPropertyCount_]) PatternProperty(); + patternProperties_[patternPropertyCount_].pattern = CreatePattern(itr->name); + schemaDocument->CreateSchema(&patternProperties_[patternPropertyCount_].schema, q.Append(itr->name, allocator_), itr->value, document); + patternPropertyCount_++; + } + } + + if (required && required->IsArray()) + for (ConstValueIterator itr = required->Begin(); itr != required->End(); ++itr) + if (itr->IsString()) { + SizeType index; + if (FindPropertyIndex(*itr, &index)) { + properties_[index].required = true; + hasRequired_ = true; + } + } + + if (dependencies && dependencies->IsObject()) { + PointerType q = p.Append(GetDependenciesString(), allocator_); + hasDependencies_ = true; + for (ConstMemberIterator itr = dependencies->MemberBegin(); itr != dependencies->MemberEnd(); ++itr) { + SizeType sourceIndex; + if (FindPropertyIndex(itr->name, &sourceIndex)) { + if (itr->value.IsArray()) { + properties_[sourceIndex].dependencies = static_cast(allocator_->Malloc(sizeof(bool) * propertyCount_)); + std::memset(properties_[sourceIndex].dependencies, 0, sizeof(bool)* propertyCount_); + for (ConstValueIterator targetItr = itr->value.Begin(); targetItr != itr->value.End(); ++targetItr) { + SizeType targetIndex; + if (FindPropertyIndex(*targetItr, &targetIndex)) + properties_[sourceIndex].dependencies[targetIndex] = true; + } + } + else if (itr->value.IsObject()) { + hasSchemaDependencies_ = true; + schemaDocument->CreateSchema(&properties_[sourceIndex].dependenciesSchema, q.Append(itr->name, allocator_), itr->value, document); + properties_[sourceIndex].dependenciesValidatorIndex = validatorCount_; + validatorCount_++; + } + } + } + } + + if (const ValueType* v = GetMember(value, GetAdditionalPropertiesString())) { + if (v->IsBool()) + additionalProperties_ = v->GetBool(); + else if (v->IsObject()) + schemaDocument->CreateSchema(&additionalPropertiesSchema_, p.Append(GetAdditionalPropertiesString(), allocator_), *v, document); + } + + AssignIfExist(minProperties_, value, GetMinPropertiesString()); + AssignIfExist(maxProperties_, value, GetMaxPropertiesString()); + + // Array + if (const ValueType* v = GetMember(value, GetItemsString())) { + PointerType q = p.Append(GetItemsString(), allocator_); + if (v->IsObject()) // List validation + schemaDocument->CreateSchema(&itemsList_, q, *v, document); + else if (v->IsArray()) { // Tuple validation + itemsTuple_ = static_cast(allocator_->Malloc(sizeof(const Schema*) * v->Size())); + SizeType index = 0; + for (ConstValueIterator itr = v->Begin(); itr != v->End(); ++itr, index++) + schemaDocument->CreateSchema(&itemsTuple_[itemsTupleCount_++], q.Append(index, allocator_), *itr, document); + } + } + + AssignIfExist(minItems_, value, GetMinItemsString()); + AssignIfExist(maxItems_, value, GetMaxItemsString()); + + if (const ValueType* v = GetMember(value, GetAdditionalItemsString())) { + if (v->IsBool()) + additionalItems_ = v->GetBool(); + else if (v->IsObject()) + schemaDocument->CreateSchema(&additionalItemsSchema_, p.Append(GetAdditionalItemsString(), allocator_), *v, document); + } + + AssignIfExist(uniqueItems_, value, GetUniqueItemsString()); + + // String + AssignIfExist(minLength_, value, GetMinLengthString()); + AssignIfExist(maxLength_, value, GetMaxLengthString()); + + if (const ValueType* v = GetMember(value, GetPatternString())) + pattern_ = CreatePattern(*v); + + // Number + if (const ValueType* v = GetMember(value, GetMinimumString())) + if (v->IsNumber()) + minimum_.CopyFrom(*v, *allocator_); + + if (const ValueType* v = GetMember(value, GetMaximumString())) + if (v->IsNumber()) + maximum_.CopyFrom(*v, *allocator_); + + AssignIfExist(exclusiveMinimum_, value, GetExclusiveMinimumString()); + AssignIfExist(exclusiveMaximum_, value, GetExclusiveMaximumString()); + + if (const ValueType* v = GetMember(value, GetMultipleOfString())) + if (v->IsNumber() && v->GetDouble() > 0.0) + multipleOf_.CopyFrom(*v, *allocator_); + } + + ~Schema() { + if (allocator_) { + allocator_->Free(enum_); + } + if (properties_) { + for (SizeType i = 0; i < propertyCount_; i++) + properties_[i].~Property(); + AllocatorType::Free(properties_); + } + if (patternProperties_) { + for (SizeType i = 0; i < patternPropertyCount_; i++) + patternProperties_[i].~PatternProperty(); + AllocatorType::Free(patternProperties_); + } + AllocatorType::Free(itemsTuple_); +#if RAPIDJSON_SCHEMA_HAS_REGEX + if (pattern_) { + pattern_->~RegexType(); + allocator_->Free(pattern_); + } +#endif + } + + bool BeginValue(Context& context) const { + if (context.inArray) { + if (uniqueItems_) + context.valueUniqueness = true; + + if (itemsList_) + context.valueSchema = itemsList_; + else if (itemsTuple_) { + if (context.arrayElementIndex < itemsTupleCount_) + context.valueSchema = itemsTuple_[context.arrayElementIndex]; + else if (additionalItemsSchema_) + context.valueSchema = additionalItemsSchema_; + else if (additionalItems_) + context.valueSchema = GetTypeless(); + else + RAPIDJSON_INVALID_KEYWORD_RETURN(GetItemsString()); + } + else + context.valueSchema = GetTypeless(); + + context.arrayElementIndex++; + } + return true; + } + + RAPIDJSON_FORCEINLINE bool EndValue(Context& context) const { + if (context.patternPropertiesValidatorCount > 0) { + bool otherValid = false; + SizeType count = context.patternPropertiesValidatorCount; + if (context.objectPatternValidatorType != Context::kPatternValidatorOnly) + otherValid = context.patternPropertiesValidators[--count]->IsValid(); + + bool patternValid = true; + for (SizeType i = 0; i < count; i++) + if (!context.patternPropertiesValidators[i]->IsValid()) { + patternValid = false; + break; + } + + if (context.objectPatternValidatorType == Context::kPatternValidatorOnly) { + if (!patternValid) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetPatternPropertiesString()); + } + else if (context.objectPatternValidatorType == Context::kPatternValidatorWithProperty) { + if (!patternValid || !otherValid) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetPatternPropertiesString()); + } + else if (!patternValid && !otherValid) // kPatternValidatorWithAdditionalProperty) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetPatternPropertiesString()); + } + + if (enum_) { + const uint64_t h = context.factory.GetHashCode(context.hasher); + for (SizeType i = 0; i < enumCount_; i++) + if (enum_[i] == h) + goto foundEnum; + RAPIDJSON_INVALID_KEYWORD_RETURN(GetEnumString()); + foundEnum:; + } + + if (allOf_.schemas) + for (SizeType i = allOf_.begin; i < allOf_.begin + allOf_.count; i++) + if (!context.validators[i]->IsValid()) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetAllOfString()); + + if (anyOf_.schemas) { + for (SizeType i = anyOf_.begin; i < anyOf_.begin + anyOf_.count; i++) + if (context.validators[i]->IsValid()) + goto foundAny; + RAPIDJSON_INVALID_KEYWORD_RETURN(GetAnyOfString()); + foundAny:; + } + + if (oneOf_.schemas) { + bool oneValid = false; + for (SizeType i = oneOf_.begin; i < oneOf_.begin + oneOf_.count; i++) + if (context.validators[i]->IsValid()) { + if (oneValid) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetOneOfString()); + else + oneValid = true; + } + if (!oneValid) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetOneOfString()); + } + + if (not_ && context.validators[notValidatorIndex_]->IsValid()) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetNotString()); + + return true; + } + + bool Null(Context& context) const { + if (!(type_ & (1 << kNullSchemaType))) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetTypeString()); + return CreateParallelValidator(context); + } + + bool Bool(Context& context, bool) const { + if (!(type_ & (1 << kBooleanSchemaType))) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetTypeString()); + return CreateParallelValidator(context); + } + + bool Int(Context& context, int i) const { + if (!CheckInt(context, i)) + return false; + return CreateParallelValidator(context); + } + + bool Uint(Context& context, unsigned u) const { + if (!CheckUint(context, u)) + return false; + return CreateParallelValidator(context); + } + + bool Int64(Context& context, int64_t i) const { + if (!CheckInt(context, i)) + return false; + return CreateParallelValidator(context); + } + + bool Uint64(Context& context, uint64_t u) const { + if (!CheckUint(context, u)) + return false; + return CreateParallelValidator(context); + } + + bool Double(Context& context, double d) const { + if (!(type_ & (1 << kNumberSchemaType))) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetTypeString()); + + if (!minimum_.IsNull() && !CheckDoubleMinimum(context, d)) + return false; + + if (!maximum_.IsNull() && !CheckDoubleMaximum(context, d)) + return false; + + if (!multipleOf_.IsNull() && !CheckDoubleMultipleOf(context, d)) + return false; + + return CreateParallelValidator(context); + } + + bool String(Context& context, const Ch* str, SizeType length, bool) const { + if (!(type_ & (1 << kStringSchemaType))) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetTypeString()); + + if (minLength_ != 0 || maxLength_ != SizeType(~0)) { + SizeType count; + if (internal::CountStringCodePoint(str, length, &count)) { + if (count < minLength_) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetMinLengthString()); + if (count > maxLength_) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetMaxLengthString()); + } + } + + if (pattern_ && !IsPatternMatch(pattern_, str, length)) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetPatternString()); + + return CreateParallelValidator(context); + } + + bool StartObject(Context& context) const { + if (!(type_ & (1 << kObjectSchemaType))) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetTypeString()); + + if (hasDependencies_ || hasRequired_) { + context.propertyExist = static_cast(context.factory.MallocState(sizeof(bool) * propertyCount_)); + std::memset(context.propertyExist, 0, sizeof(bool) * propertyCount_); + } + + if (patternProperties_) { // pre-allocate schema array + SizeType count = patternPropertyCount_ + 1; // extra for valuePatternValidatorType + context.patternPropertiesSchemas = static_cast(context.factory.MallocState(sizeof(const SchemaType*) * count)); + context.patternPropertiesSchemaCount = 0; + std::memset(context.patternPropertiesSchemas, 0, sizeof(SchemaType*) * count); + } + + return CreateParallelValidator(context); + } + + bool Key(Context& context, const Ch* str, SizeType len, bool) const { + if (patternProperties_) { + context.patternPropertiesSchemaCount = 0; + for (SizeType i = 0; i < patternPropertyCount_; i++) + if (patternProperties_[i].pattern && IsPatternMatch(patternProperties_[i].pattern, str, len)) + context.patternPropertiesSchemas[context.patternPropertiesSchemaCount++] = patternProperties_[i].schema; + } + + SizeType index; + if (FindPropertyIndex(ValueType(str, len).Move(), &index)) { + if (context.patternPropertiesSchemaCount > 0) { + context.patternPropertiesSchemas[context.patternPropertiesSchemaCount++] = properties_[index].schema; + context.valueSchema = GetTypeless(); + context.valuePatternValidatorType = Context::kPatternValidatorWithProperty; + } + else + context.valueSchema = properties_[index].schema; + + if (context.propertyExist) + context.propertyExist[index] = true; + + return true; + } + + if (additionalPropertiesSchema_) { + if (additionalPropertiesSchema_ && context.patternPropertiesSchemaCount > 0) { + context.patternPropertiesSchemas[context.patternPropertiesSchemaCount++] = additionalPropertiesSchema_; + context.valueSchema = GetTypeless(); + context.valuePatternValidatorType = Context::kPatternValidatorWithAdditionalProperty; + } + else + context.valueSchema = additionalPropertiesSchema_; + return true; + } + else if (additionalProperties_) { + context.valueSchema = GetTypeless(); + return true; + } + + if (context.patternPropertiesSchemaCount == 0) // patternProperties are not additional properties + RAPIDJSON_INVALID_KEYWORD_RETURN(GetAdditionalPropertiesString()); + + return true; + } + + bool EndObject(Context& context, SizeType memberCount) const { + if (hasRequired_) + for (SizeType index = 0; index < propertyCount_; index++) + if (properties_[index].required) + if (!context.propertyExist[index]) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetRequiredString()); + + if (memberCount < minProperties_) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetMinPropertiesString()); + + if (memberCount > maxProperties_) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetMaxPropertiesString()); + + if (hasDependencies_) { + for (SizeType sourceIndex = 0; sourceIndex < propertyCount_; sourceIndex++) + if (context.propertyExist[sourceIndex]) { + if (properties_[sourceIndex].dependencies) { + for (SizeType targetIndex = 0; targetIndex < propertyCount_; targetIndex++) + if (properties_[sourceIndex].dependencies[targetIndex] && !context.propertyExist[targetIndex]) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetDependenciesString()); + } + else if (properties_[sourceIndex].dependenciesSchema) + if (!context.validators[properties_[sourceIndex].dependenciesValidatorIndex]->IsValid()) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetDependenciesString()); + } + } + + return true; + } + + bool StartArray(Context& context) const { + if (!(type_ & (1 << kArraySchemaType))) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetTypeString()); + + context.arrayElementIndex = 0; + context.inArray = true; + + return CreateParallelValidator(context); + } + + bool EndArray(Context& context, SizeType elementCount) const { + context.inArray = false; + + if (elementCount < minItems_) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetMinItemsString()); + + if (elementCount > maxItems_) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetMaxItemsString()); + + return true; + } + + // Generate functions for string literal according to Ch +#define RAPIDJSON_STRING_(name, ...) \ + static const ValueType& Get##name##String() {\ + static const Ch s[] = { __VA_ARGS__, '\0' };\ + static const ValueType v(s, sizeof(s) / sizeof(Ch) - 1);\ + return v;\ + } + + RAPIDJSON_STRING_(Null, 'n', 'u', 'l', 'l') + RAPIDJSON_STRING_(Boolean, 'b', 'o', 'o', 'l', 'e', 'a', 'n') + RAPIDJSON_STRING_(Object, 'o', 'b', 'j', 'e', 'c', 't') + RAPIDJSON_STRING_(Array, 'a', 'r', 'r', 'a', 'y') + RAPIDJSON_STRING_(String, 's', 't', 'r', 'i', 'n', 'g') + RAPIDJSON_STRING_(Number, 'n', 'u', 'm', 'b', 'e', 'r') + RAPIDJSON_STRING_(Integer, 'i', 'n', 't', 'e', 'g', 'e', 'r') + RAPIDJSON_STRING_(Type, 't', 'y', 'p', 'e') + RAPIDJSON_STRING_(Enum, 'e', 'n', 'u', 'm') + RAPIDJSON_STRING_(AllOf, 'a', 'l', 'l', 'O', 'f') + RAPIDJSON_STRING_(AnyOf, 'a', 'n', 'y', 'O', 'f') + RAPIDJSON_STRING_(OneOf, 'o', 'n', 'e', 'O', 'f') + RAPIDJSON_STRING_(Not, 'n', 'o', 't') + RAPIDJSON_STRING_(Properties, 'p', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's') + RAPIDJSON_STRING_(Required, 'r', 'e', 'q', 'u', 'i', 'r', 'e', 'd') + RAPIDJSON_STRING_(Dependencies, 'd', 'e', 'p', 'e', 'n', 'd', 'e', 'n', 'c', 'i', 'e', 's') + RAPIDJSON_STRING_(PatternProperties, 'p', 'a', 't', 't', 'e', 'r', 'n', 'P', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's') + RAPIDJSON_STRING_(AdditionalProperties, 'a', 'd', 'd', 'i', 't', 'i', 'o', 'n', 'a', 'l', 'P', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's') + RAPIDJSON_STRING_(MinProperties, 'm', 'i', 'n', 'P', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's') + RAPIDJSON_STRING_(MaxProperties, 'm', 'a', 'x', 'P', 'r', 'o', 'p', 'e', 'r', 't', 'i', 'e', 's') + RAPIDJSON_STRING_(Items, 'i', 't', 'e', 'm', 's') + RAPIDJSON_STRING_(MinItems, 'm', 'i', 'n', 'I', 't', 'e', 'm', 's') + RAPIDJSON_STRING_(MaxItems, 'm', 'a', 'x', 'I', 't', 'e', 'm', 's') + RAPIDJSON_STRING_(AdditionalItems, 'a', 'd', 'd', 'i', 't', 'i', 'o', 'n', 'a', 'l', 'I', 't', 'e', 'm', 's') + RAPIDJSON_STRING_(UniqueItems, 'u', 'n', 'i', 'q', 'u', 'e', 'I', 't', 'e', 'm', 's') + RAPIDJSON_STRING_(MinLength, 'm', 'i', 'n', 'L', 'e', 'n', 'g', 't', 'h') + RAPIDJSON_STRING_(MaxLength, 'm', 'a', 'x', 'L', 'e', 'n', 'g', 't', 'h') + RAPIDJSON_STRING_(Pattern, 'p', 'a', 't', 't', 'e', 'r', 'n') + RAPIDJSON_STRING_(Minimum, 'm', 'i', 'n', 'i', 'm', 'u', 'm') + RAPIDJSON_STRING_(Maximum, 'm', 'a', 'x', 'i', 'm', 'u', 'm') + RAPIDJSON_STRING_(ExclusiveMinimum, 'e', 'x', 'c', 'l', 'u', 's', 'i', 'v', 'e', 'M', 'i', 'n', 'i', 'm', 'u', 'm') + RAPIDJSON_STRING_(ExclusiveMaximum, 'e', 'x', 'c', 'l', 'u', 's', 'i', 'v', 'e', 'M', 'a', 'x', 'i', 'm', 'u', 'm') + RAPIDJSON_STRING_(MultipleOf, 'm', 'u', 'l', 't', 'i', 'p', 'l', 'e', 'O', 'f') + +#undef RAPIDJSON_STRING_ + +private: + enum SchemaValueType { + kNullSchemaType, + kBooleanSchemaType, + kObjectSchemaType, + kArraySchemaType, + kStringSchemaType, + kNumberSchemaType, + kIntegerSchemaType, + kTotalSchemaType + }; + +#if RAPIDJSON_SCHEMA_USE_INTERNALREGEX + typedef internal::GenericRegex RegexType; +#elif RAPIDJSON_SCHEMA_USE_STDREGEX + typedef std::basic_regex RegexType; +#else + typedef char RegexType; +#endif + + struct SchemaArray { + SchemaArray() : schemas(), count() {} + ~SchemaArray() { AllocatorType::Free(schemas); } + const SchemaType** schemas; + SizeType begin; // begin index of context.validators + SizeType count; + }; + + static const SchemaType* GetTypeless() { + static SchemaType typeless(0, PointerType(), ValueType(kObjectType).Move(), ValueType(kObjectType).Move(), 0); + return &typeless; + } + + template + void AddUniqueElement(V1& a, const V2& v) { + for (typename V1::ConstValueIterator itr = a.Begin(); itr != a.End(); ++itr) + if (*itr == v) + return; + V1 c(v, *allocator_); + a.PushBack(c, *allocator_); + } + + static const ValueType* GetMember(const ValueType& value, const ValueType& name) { + typename ValueType::ConstMemberIterator itr = value.FindMember(name); + return itr != value.MemberEnd() ? &(itr->value) : 0; + } + + static void AssignIfExist(bool& out, const ValueType& value, const ValueType& name) { + if (const ValueType* v = GetMember(value, name)) + if (v->IsBool()) + out = v->GetBool(); + } + + static void AssignIfExist(SizeType& out, const ValueType& value, const ValueType& name) { + if (const ValueType* v = GetMember(value, name)) + if (v->IsUint64() && v->GetUint64() <= SizeType(~0)) + out = static_cast(v->GetUint64()); + } + + void AssignIfExist(SchemaArray& out, SchemaDocumentType& schemaDocument, const PointerType& p, const ValueType& value, const ValueType& name, const ValueType& document) { + if (const ValueType* v = GetMember(value, name)) { + if (v->IsArray() && v->Size() > 0) { + PointerType q = p.Append(name, allocator_); + out.count = v->Size(); + out.schemas = static_cast(allocator_->Malloc(out.count * sizeof(const Schema*))); + memset(out.schemas, 0, sizeof(Schema*)* out.count); + for (SizeType i = 0; i < out.count; i++) + schemaDocument.CreateSchema(&out.schemas[i], q.Append(i, allocator_), (*v)[i], document); + out.begin = validatorCount_; + validatorCount_ += out.count; + } + } + } + +#if RAPIDJSON_SCHEMA_USE_INTERNALREGEX + template + RegexType* CreatePattern(const ValueType& value) { + if (value.IsString()) { + RegexType* r = new (allocator_->Malloc(sizeof(RegexType))) RegexType(value.GetString()); + if (!r->IsValid()) { + r->~RegexType(); + AllocatorType::Free(r); + r = 0; + } + return r; + } + return 0; + } + + static bool IsPatternMatch(const RegexType* pattern, const Ch *str, SizeType) { + return pattern->Search(str); + } +#elif RAPIDJSON_SCHEMA_USE_STDREGEX + template + RegexType* CreatePattern(const ValueType& value) { + if (value.IsString()) + try { + return new (allocator_->Malloc(sizeof(RegexType))) RegexType(value.GetString(), std::size_t(value.GetStringLength()), std::regex_constants::ECMAScript); + } + catch (const std::regex_error&) { + } + return 0; + } + + static bool IsPatternMatch(const RegexType* pattern, const Ch *str, SizeType length) { + std::match_results r; + return std::regex_search(str, str + length, r, *pattern); + } +#else + template + RegexType* CreatePattern(const ValueType&) { return 0; } + + static bool IsPatternMatch(const RegexType*, const Ch *, SizeType) { return true; } +#endif // RAPIDJSON_SCHEMA_USE_STDREGEX + + void AddType(const ValueType& type) { + if (type == GetNullString() ) type_ |= 1 << kNullSchemaType; + else if (type == GetBooleanString()) type_ |= 1 << kBooleanSchemaType; + else if (type == GetObjectString() ) type_ |= 1 << kObjectSchemaType; + else if (type == GetArrayString() ) type_ |= 1 << kArraySchemaType; + else if (type == GetStringString() ) type_ |= 1 << kStringSchemaType; + else if (type == GetIntegerString()) type_ |= 1 << kIntegerSchemaType; + else if (type == GetNumberString() ) type_ |= (1 << kNumberSchemaType) | (1 << kIntegerSchemaType); + } + + bool CreateParallelValidator(Context& context) const { + if (enum_ || context.arrayUniqueness) + context.hasher = context.factory.CreateHasher(); + + if (validatorCount_) { + RAPIDJSON_ASSERT(context.validators == 0); + context.validators = static_cast(context.factory.MallocState(sizeof(ISchemaValidator*) * validatorCount_)); + context.validatorCount = validatorCount_; + + if (allOf_.schemas) + CreateSchemaValidators(context, allOf_); + + if (anyOf_.schemas) + CreateSchemaValidators(context, anyOf_); + + if (oneOf_.schemas) + CreateSchemaValidators(context, oneOf_); + + if (not_) + context.validators[notValidatorIndex_] = context.factory.CreateSchemaValidator(*not_); + + if (hasSchemaDependencies_) { + for (SizeType i = 0; i < propertyCount_; i++) + if (properties_[i].dependenciesSchema) + context.validators[properties_[i].dependenciesValidatorIndex] = context.factory.CreateSchemaValidator(*properties_[i].dependenciesSchema); + } + } + + return true; + } + + void CreateSchemaValidators(Context& context, const SchemaArray& schemas) const { + for (SizeType i = 0; i < schemas.count; i++) + context.validators[schemas.begin + i] = context.factory.CreateSchemaValidator(*schemas.schemas[i]); + } + + // O(n) + bool FindPropertyIndex(const ValueType& name, SizeType* outIndex) const { + SizeType len = name.GetStringLength(); + const Ch* str = name.GetString(); + for (SizeType index = 0; index < propertyCount_; index++) + if (properties_[index].name.GetStringLength() == len && + (std::memcmp(properties_[index].name.GetString(), str, sizeof(Ch) * len) == 0)) + { + *outIndex = index; + return true; + } + return false; + } + + bool CheckInt(Context& context, int64_t i) const { + if (!(type_ & ((1 << kIntegerSchemaType) | (1 << kNumberSchemaType)))) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetTypeString()); + + if (!minimum_.IsNull()) { + if (minimum_.IsInt64()) { + if (exclusiveMinimum_ ? i <= minimum_.GetInt64() : i < minimum_.GetInt64()) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetMinimumString()); + } + else if (minimum_.IsUint64()) { + RAPIDJSON_INVALID_KEYWORD_RETURN(GetMinimumString()); // i <= max(int64_t) < minimum.GetUint64() + } + else if (!CheckDoubleMinimum(context, static_cast(i))) + return false; + } + + if (!maximum_.IsNull()) { + if (maximum_.IsInt64()) { + if (exclusiveMaximum_ ? i >= maximum_.GetInt64() : i > maximum_.GetInt64()) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetMaximumString()); + } + else if (maximum_.IsUint64()) + /* do nothing */; // i <= max(int64_t) < maximum_.GetUint64() + else if (!CheckDoubleMaximum(context, static_cast(i))) + return false; + } + + if (!multipleOf_.IsNull()) { + if (multipleOf_.IsUint64()) { + if (static_cast(i >= 0 ? i : -i) % multipleOf_.GetUint64() != 0) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetMultipleOfString()); + } + else if (!CheckDoubleMultipleOf(context, static_cast(i))) + return false; + } + + return true; + } + + bool CheckUint(Context& context, uint64_t i) const { + if (!(type_ & ((1 << kIntegerSchemaType) | (1 << kNumberSchemaType)))) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetTypeString()); + + if (!minimum_.IsNull()) { + if (minimum_.IsUint64()) { + if (exclusiveMinimum_ ? i <= minimum_.GetUint64() : i < minimum_.GetUint64()) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetMinimumString()); + } + else if (minimum_.IsInt64()) + /* do nothing */; // i >= 0 > minimum.Getint64() + else if (!CheckDoubleMinimum(context, static_cast(i))) + return false; + } + + if (!maximum_.IsNull()) { + if (maximum_.IsUint64()) { + if (exclusiveMaximum_ ? i >= maximum_.GetUint64() : i > maximum_.GetUint64()) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetMaximumString()); + } + else if (maximum_.IsInt64()) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetMaximumString()); // i >= 0 > maximum_ + else if (!CheckDoubleMaximum(context, static_cast(i))) + return false; + } + + if (!multipleOf_.IsNull()) { + if (multipleOf_.IsUint64()) { + if (i % multipleOf_.GetUint64() != 0) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetMultipleOfString()); + } + else if (!CheckDoubleMultipleOf(context, static_cast(i))) + return false; + } + + return true; + } + + bool CheckDoubleMinimum(Context& context, double d) const { + if (exclusiveMinimum_ ? d <= minimum_.GetDouble() : d < minimum_.GetDouble()) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetMinimumString()); + return true; + } + + bool CheckDoubleMaximum(Context& context, double d) const { + if (exclusiveMaximum_ ? d >= maximum_.GetDouble() : d > maximum_.GetDouble()) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetMaximumString()); + return true; + } + + bool CheckDoubleMultipleOf(Context& context, double d) const { + double a = std::abs(d), b = std::abs(multipleOf_.GetDouble()); + double q = std::floor(a / b); + double r = a - q * b; + if (r > 0.0) + RAPIDJSON_INVALID_KEYWORD_RETURN(GetMultipleOfString()); + return true; + } + + struct Property { + Property() : schema(), dependenciesSchema(), dependenciesValidatorIndex(), dependencies(), required(false) {} + ~Property() { AllocatorType::Free(dependencies); } + SValue name; + const SchemaType* schema; + const SchemaType* dependenciesSchema; + SizeType dependenciesValidatorIndex; + bool* dependencies; + bool required; + }; + + struct PatternProperty { + PatternProperty() : schema(), pattern() {} + ~PatternProperty() { + if (pattern) { + pattern->~RegexType(); + AllocatorType::Free(pattern); + } + } + const SchemaType* schema; + RegexType* pattern; + }; + + AllocatorType* allocator_; + uint64_t* enum_; + SizeType enumCount_; + SchemaArray allOf_; + SchemaArray anyOf_; + SchemaArray oneOf_; + const SchemaType* not_; + unsigned type_; // bitmask of kSchemaType + SizeType validatorCount_; + SizeType notValidatorIndex_; + + Property* properties_; + const SchemaType* additionalPropertiesSchema_; + PatternProperty* patternProperties_; + SizeType patternPropertyCount_; + SizeType propertyCount_; + SizeType minProperties_; + SizeType maxProperties_; + bool additionalProperties_; + bool hasDependencies_; + bool hasRequired_; + bool hasSchemaDependencies_; + + const SchemaType* additionalItemsSchema_; + const SchemaType* itemsList_; + const SchemaType** itemsTuple_; + SizeType itemsTupleCount_; + SizeType minItems_; + SizeType maxItems_; + bool additionalItems_; + bool uniqueItems_; + + RegexType* pattern_; + SizeType minLength_; + SizeType maxLength_; + + SValue minimum_; + SValue maximum_; + SValue multipleOf_; + bool exclusiveMinimum_; + bool exclusiveMaximum_; +}; + +template +struct TokenHelper { + RAPIDJSON_FORCEINLINE static void AppendIndexToken(Stack& documentStack, SizeType index) { + *documentStack.template Push() = '/'; + char buffer[21]; + size_t length = static_cast((sizeof(SizeType) == 4 ? u32toa(index, buffer) : u64toa(index, buffer)) - buffer); + for (size_t i = 0; i < length; i++) + *documentStack.template Push() = buffer[i]; + } +}; + +// Partial specialized version for char to prevent buffer copying. +template +struct TokenHelper { + RAPIDJSON_FORCEINLINE static void AppendIndexToken(Stack& documentStack, SizeType index) { + if (sizeof(SizeType) == 4) { + char *buffer = documentStack.template Push(1 + 10); // '/' + uint + *buffer++ = '/'; + const char* end = internal::u32toa(index, buffer); + documentStack.template Pop(static_cast(10 - (end - buffer))); + } + else { + char *buffer = documentStack.template Push(1 + 20); // '/' + uint64 + *buffer++ = '/'; + const char* end = internal::u64toa(index, buffer); + documentStack.template Pop(static_cast(20 - (end - buffer))); + } + } +}; + +} // namespace internal + +/////////////////////////////////////////////////////////////////////////////// +// IGenericRemoteSchemaDocumentProvider + +template +class IGenericRemoteSchemaDocumentProvider { +public: + typedef typename SchemaDocumentType::Ch Ch; + + virtual ~IGenericRemoteSchemaDocumentProvider() {} + virtual const SchemaDocumentType* GetRemoteDocument(const Ch* uri, SizeType length) = 0; +}; + +/////////////////////////////////////////////////////////////////////////////// +// GenericSchemaDocument + +//! JSON schema document. +/*! + A JSON schema document is a compiled version of a JSON schema. + It is basically a tree of internal::Schema. + + \note This is an immutable class (i.e. its instance cannot be modified after construction). + \tparam ValueT Type of JSON value (e.g. \c Value ), which also determine the encoding. + \tparam Allocator Allocator type for allocating memory of this document. +*/ +template +class GenericSchemaDocument { +public: + typedef ValueT ValueType; + typedef IGenericRemoteSchemaDocumentProvider IRemoteSchemaDocumentProviderType; + typedef Allocator AllocatorType; + typedef typename ValueType::EncodingType EncodingType; + typedef typename EncodingType::Ch Ch; + typedef internal::Schema SchemaType; + typedef GenericPointer PointerType; + friend class internal::Schema; + template + friend class GenericSchemaValidator; + + //! Constructor. + /*! + Compile a JSON document into schema document. + + \param document A JSON document as source. + \param remoteProvider An optional remote schema document provider for resolving remote reference. Can be null. + \param allocator An optional allocator instance for allocating memory. Can be null. + */ + explicit GenericSchemaDocument(const ValueType& document, IRemoteSchemaDocumentProviderType* remoteProvider = 0, Allocator* allocator = 0) : + remoteProvider_(remoteProvider), + allocator_(allocator), + ownAllocator_(), + root_(), + schemaMap_(allocator, kInitialSchemaMapSize), + schemaRef_(allocator, kInitialSchemaRefSize) + { + if (!allocator_) + ownAllocator_ = allocator_ = RAPIDJSON_NEW(Allocator()); + + // Generate root schema, it will call CreateSchema() to create sub-schemas, + // And call AddRefSchema() if there are $ref. + CreateSchemaRecursive(&root_, PointerType(), document, document); + + // Resolve $ref + while (!schemaRef_.Empty()) { + SchemaRefEntry* refEntry = schemaRef_.template Pop(1); + if (const SchemaType* s = GetSchema(refEntry->target)) { + if (refEntry->schema) + *refEntry->schema = s; + + // Create entry in map if not exist + if (!GetSchema(refEntry->source)) { + new (schemaMap_.template Push()) SchemaEntry(refEntry->source, const_cast(s), false, allocator_); + } + } + refEntry->~SchemaRefEntry(); + } + + RAPIDJSON_ASSERT(root_ != 0); + + schemaRef_.ShrinkToFit(); // Deallocate all memory for ref + } + +#if RAPIDJSON_HAS_CXX11_RVALUE_REFS + //! Move constructor in C++11 + GenericSchemaDocument(GenericSchemaDocument&& rhs) RAPIDJSON_NOEXCEPT : + remoteProvider_(rhs.remoteProvider_), + allocator_(rhs.allocator_), + ownAllocator_(rhs.ownAllocator_), + root_(rhs.root_), + schemaMap_(std::move(rhs.schemaMap_)), + schemaRef_(std::move(rhs.schemaRef_)) + { + rhs.remoteProvider_ = 0; + rhs.allocator_ = 0; + rhs.ownAllocator_ = 0; + } +#endif + + //! Destructor + ~GenericSchemaDocument() { + while (!schemaMap_.Empty()) + schemaMap_.template Pop(1)->~SchemaEntry(); + + RAPIDJSON_DELETE(ownAllocator_); + } + + //! Get the root schema. + const SchemaType& GetRoot() const { return *root_; } + +private: + //! Prohibit copying + GenericSchemaDocument(const GenericSchemaDocument&); + //! Prohibit assignment + GenericSchemaDocument& operator=(const GenericSchemaDocument&); + + struct SchemaRefEntry { + SchemaRefEntry(const PointerType& s, const PointerType& t, const SchemaType** outSchema, Allocator *allocator) : source(s, allocator), target(t, allocator), schema(outSchema) {} + PointerType source; + PointerType target; + const SchemaType** schema; + }; + + struct SchemaEntry { + SchemaEntry(const PointerType& p, SchemaType* s, bool o, Allocator* allocator) : pointer(p, allocator), schema(s), owned(o) {} + ~SchemaEntry() { + if (owned) { + schema->~SchemaType(); + Allocator::Free(schema); + } + } + PointerType pointer; + SchemaType* schema; + bool owned; + }; + + void CreateSchemaRecursive(const SchemaType** schema, const PointerType& pointer, const ValueType& v, const ValueType& document) { + if (schema) + *schema = SchemaType::GetTypeless(); + + if (v.GetType() == kObjectType) { + const SchemaType* s = GetSchema(pointer); + if (!s) + CreateSchema(schema, pointer, v, document); + + for (typename ValueType::ConstMemberIterator itr = v.MemberBegin(); itr != v.MemberEnd(); ++itr) + CreateSchemaRecursive(0, pointer.Append(itr->name, allocator_), itr->value, document); + } + else if (v.GetType() == kArrayType) + for (SizeType i = 0; i < v.Size(); i++) + CreateSchemaRecursive(0, pointer.Append(i, allocator_), v[i], document); + } + + void CreateSchema(const SchemaType** schema, const PointerType& pointer, const ValueType& v, const ValueType& document) { + RAPIDJSON_ASSERT(pointer.IsValid()); + if (v.IsObject()) { + if (!HandleRefSchema(pointer, schema, v, document)) { + SchemaType* s = new (allocator_->Malloc(sizeof(SchemaType))) SchemaType(this, pointer, v, document, allocator_); + new (schemaMap_.template Push()) SchemaEntry(pointer, s, true, allocator_); + if (schema) + *schema = s; + } + } + } + + bool HandleRefSchema(const PointerType& source, const SchemaType** schema, const ValueType& v, const ValueType& document) { + static const Ch kRefString[] = { '$', 'r', 'e', 'f', '\0' }; + static const ValueType kRefValue(kRefString, 4); + + typename ValueType::ConstMemberIterator itr = v.FindMember(kRefValue); + if (itr == v.MemberEnd()) + return false; + + if (itr->value.IsString()) { + SizeType len = itr->value.GetStringLength(); + if (len > 0) { + const Ch* s = itr->value.GetString(); + SizeType i = 0; + while (i < len && s[i] != '#') // Find the first # + i++; + + if (i > 0) { // Remote reference, resolve immediately + if (remoteProvider_) { + if (const GenericSchemaDocument* remoteDocument = remoteProvider_->GetRemoteDocument(s, i - 1)) { + PointerType pointer(&s[i], len - i, allocator_); + if (pointer.IsValid()) { + if (const SchemaType* sc = remoteDocument->GetSchema(pointer)) { + if (schema) + *schema = sc; + return true; + } + } + } + } + } + else if (s[i] == '#') { // Local reference, defer resolution + PointerType pointer(&s[i], len - i, allocator_); + if (pointer.IsValid()) { + if (const ValueType* nv = pointer.Get(document)) + if (HandleRefSchema(source, schema, *nv, document)) + return true; + + new (schemaRef_.template Push()) SchemaRefEntry(source, pointer, schema, allocator_); + return true; + } + } + } + } + return false; + } + + const SchemaType* GetSchema(const PointerType& pointer) const { + for (const SchemaEntry* target = schemaMap_.template Bottom(); target != schemaMap_.template End(); ++target) + if (pointer == target->pointer) + return target->schema; + return 0; + } + + PointerType GetPointer(const SchemaType* schema) const { + for (const SchemaEntry* target = schemaMap_.template Bottom(); target != schemaMap_.template End(); ++target) + if (schema == target->schema) + return target->pointer; + return PointerType(); + } + + static const size_t kInitialSchemaMapSize = 64; + static const size_t kInitialSchemaRefSize = 64; + + IRemoteSchemaDocumentProviderType* remoteProvider_; + Allocator *allocator_; + Allocator *ownAllocator_; + const SchemaType* root_; //!< Root schema. + internal::Stack schemaMap_; // Stores created Pointer -> Schemas + internal::Stack schemaRef_; // Stores Pointer from $ref and schema which holds the $ref +}; + +//! GenericSchemaDocument using Value type. +typedef GenericSchemaDocument SchemaDocument; +//! IGenericRemoteSchemaDocumentProvider using SchemaDocument. +typedef IGenericRemoteSchemaDocumentProvider IRemoteSchemaDocumentProvider; + +/////////////////////////////////////////////////////////////////////////////// +// GenericSchemaValidator + +//! JSON Schema Validator. +/*! + A SAX style JSON schema validator. + It uses a \c GenericSchemaDocument to validate SAX events. + It delegates the incoming SAX events to an output handler. + The default output handler does nothing. + It can be reused multiple times by calling \c Reset(). + + \tparam SchemaDocumentType Type of schema document. + \tparam OutputHandler Type of output handler. Default handler does nothing. + \tparam StateAllocator Allocator for storing the internal validation states. +*/ +template < + typename SchemaDocumentType, + typename OutputHandler = BaseReaderHandler, + typename StateAllocator = CrtAllocator> +class GenericSchemaValidator : + public internal::ISchemaStateFactory, + public internal::ISchemaValidator +{ +public: + typedef typename SchemaDocumentType::SchemaType SchemaType; + typedef typename SchemaDocumentType::PointerType PointerType; + typedef typename SchemaType::EncodingType EncodingType; + typedef typename EncodingType::Ch Ch; + + //! Constructor without output handler. + /*! + \param schemaDocument The schema document to conform to. + \param allocator Optional allocator for storing internal validation states. + \param schemaStackCapacity Optional initial capacity of schema path stack. + \param documentStackCapacity Optional initial capacity of document path stack. + */ + GenericSchemaValidator( + const SchemaDocumentType& schemaDocument, + StateAllocator* allocator = 0, + size_t schemaStackCapacity = kDefaultSchemaStackCapacity, + size_t documentStackCapacity = kDefaultDocumentStackCapacity) + : + schemaDocument_(&schemaDocument), + root_(schemaDocument.GetRoot()), + outputHandler_(GetNullHandler()), + stateAllocator_(allocator), + ownStateAllocator_(0), + schemaStack_(allocator, schemaStackCapacity), + documentStack_(allocator, documentStackCapacity), + valid_(true) +#if RAPIDJSON_SCHEMA_VERBOSE + , depth_(0) +#endif + { + } + + //! Constructor with output handler. + /*! + \param schemaDocument The schema document to conform to. + \param allocator Optional allocator for storing internal validation states. + \param schemaStackCapacity Optional initial capacity of schema path stack. + \param documentStackCapacity Optional initial capacity of document path stack. + */ + GenericSchemaValidator( + const SchemaDocumentType& schemaDocument, + OutputHandler& outputHandler, + StateAllocator* allocator = 0, + size_t schemaStackCapacity = kDefaultSchemaStackCapacity, + size_t documentStackCapacity = kDefaultDocumentStackCapacity) + : + schemaDocument_(&schemaDocument), + root_(schemaDocument.GetRoot()), + outputHandler_(outputHandler), + stateAllocator_(allocator), + ownStateAllocator_(0), + schemaStack_(allocator, schemaStackCapacity), + documentStack_(allocator, documentStackCapacity), + valid_(true) +#if RAPIDJSON_SCHEMA_VERBOSE + , depth_(0) +#endif + { + } + + //! Destructor. + ~GenericSchemaValidator() { + Reset(); + RAPIDJSON_DELETE(ownStateAllocator_); + } + + //! Reset the internal states. + void Reset() { + while (!schemaStack_.Empty()) + PopSchema(); + documentStack_.Clear(); + valid_ = true; + } + + //! Checks whether the current state is valid. + // Implementation of ISchemaValidator + virtual bool IsValid() const { return valid_; } + + //! Gets the JSON pointer pointed to the invalid schema. + PointerType GetInvalidSchemaPointer() const { + return schemaStack_.Empty() ? PointerType() : schemaDocument_->GetPointer(&CurrentSchema()); + } + + //! Gets the keyword of invalid schema. + const Ch* GetInvalidSchemaKeyword() const { + return schemaStack_.Empty() ? 0 : CurrentContext().invalidKeyword; + } + + //! Gets the JSON pointer pointed to the invalid value. + PointerType GetInvalidDocumentPointer() const { + return documentStack_.Empty() ? PointerType() : PointerType(documentStack_.template Bottom(), documentStack_.GetSize() / sizeof(Ch)); + } + +#if RAPIDJSON_SCHEMA_VERBOSE +#define RAPIDJSON_SCHEMA_HANDLE_BEGIN_VERBOSE_() \ +RAPIDJSON_MULTILINEMACRO_BEGIN\ + *documentStack_.template Push() = '\0';\ + documentStack_.template Pop(1);\ + internal::PrintInvalidDocument(documentStack_.template Bottom());\ +RAPIDJSON_MULTILINEMACRO_END +#else +#define RAPIDJSON_SCHEMA_HANDLE_BEGIN_VERBOSE_() +#endif + +#define RAPIDJSON_SCHEMA_HANDLE_BEGIN_(method, arg1)\ + if (!valid_) return false; \ + if (!BeginValue() || !CurrentSchema().method arg1) {\ + RAPIDJSON_SCHEMA_HANDLE_BEGIN_VERBOSE_();\ + return valid_ = false;\ + } + +#define RAPIDJSON_SCHEMA_HANDLE_PARALLEL_(method, arg2)\ + for (Context* context = schemaStack_.template Bottom(); context != schemaStack_.template End(); context++) {\ + if (context->hasher)\ + static_cast(context->hasher)->method arg2;\ + if (context->validators)\ + for (SizeType i_ = 0; i_ < context->validatorCount; i_++)\ + static_cast(context->validators[i_])->method arg2;\ + if (context->patternPropertiesValidators)\ + for (SizeType i_ = 0; i_ < context->patternPropertiesValidatorCount; i_++)\ + static_cast(context->patternPropertiesValidators[i_])->method arg2;\ + } + +#define RAPIDJSON_SCHEMA_HANDLE_END_(method, arg2)\ + return valid_ = EndValue() && outputHandler_.method arg2 + +#define RAPIDJSON_SCHEMA_HANDLE_VALUE_(method, arg1, arg2) \ + RAPIDJSON_SCHEMA_HANDLE_BEGIN_ (method, arg1);\ + RAPIDJSON_SCHEMA_HANDLE_PARALLEL_(method, arg2);\ + RAPIDJSON_SCHEMA_HANDLE_END_ (method, arg2) + + bool Null() { RAPIDJSON_SCHEMA_HANDLE_VALUE_(Null, (CurrentContext() ), ( )); } + bool Bool(bool b) { RAPIDJSON_SCHEMA_HANDLE_VALUE_(Bool, (CurrentContext(), b), (b)); } + bool Int(int i) { RAPIDJSON_SCHEMA_HANDLE_VALUE_(Int, (CurrentContext(), i), (i)); } + bool Uint(unsigned u) { RAPIDJSON_SCHEMA_HANDLE_VALUE_(Uint, (CurrentContext(), u), (u)); } + bool Int64(int64_t i) { RAPIDJSON_SCHEMA_HANDLE_VALUE_(Int64, (CurrentContext(), i), (i)); } + bool Uint64(uint64_t u) { RAPIDJSON_SCHEMA_HANDLE_VALUE_(Uint64, (CurrentContext(), u), (u)); } + bool Double(double d) { RAPIDJSON_SCHEMA_HANDLE_VALUE_(Double, (CurrentContext(), d), (d)); } + bool RawNumber(const Ch* str, SizeType length, bool copy) + { RAPIDJSON_SCHEMA_HANDLE_VALUE_(String, (CurrentContext(), str, length, copy), (str, length, copy)); } + bool String(const Ch* str, SizeType length, bool copy) + { RAPIDJSON_SCHEMA_HANDLE_VALUE_(String, (CurrentContext(), str, length, copy), (str, length, copy)); } + + bool StartObject() { + RAPIDJSON_SCHEMA_HANDLE_BEGIN_(StartObject, (CurrentContext())); + RAPIDJSON_SCHEMA_HANDLE_PARALLEL_(StartObject, ()); + return valid_ = outputHandler_.StartObject(); + } + + bool Key(const Ch* str, SizeType len, bool copy) { + if (!valid_) return false; + AppendToken(str, len); + if (!CurrentSchema().Key(CurrentContext(), str, len, copy)) return valid_ = false; + RAPIDJSON_SCHEMA_HANDLE_PARALLEL_(Key, (str, len, copy)); + return valid_ = outputHandler_.Key(str, len, copy); + } + + bool EndObject(SizeType memberCount) { + if (!valid_) return false; + RAPIDJSON_SCHEMA_HANDLE_PARALLEL_(EndObject, (memberCount)); + if (!CurrentSchema().EndObject(CurrentContext(), memberCount)) return valid_ = false; + RAPIDJSON_SCHEMA_HANDLE_END_(EndObject, (memberCount)); + } + + bool StartArray() { + RAPIDJSON_SCHEMA_HANDLE_BEGIN_(StartArray, (CurrentContext())); + RAPIDJSON_SCHEMA_HANDLE_PARALLEL_(StartArray, ()); + return valid_ = outputHandler_.StartArray(); + } + + bool EndArray(SizeType elementCount) { + if (!valid_) return false; + RAPIDJSON_SCHEMA_HANDLE_PARALLEL_(EndArray, (elementCount)); + if (!CurrentSchema().EndArray(CurrentContext(), elementCount)) return valid_ = false; + RAPIDJSON_SCHEMA_HANDLE_END_(EndArray, (elementCount)); + } + +#undef RAPIDJSON_SCHEMA_HANDLE_BEGIN_VERBOSE_ +#undef RAPIDJSON_SCHEMA_HANDLE_BEGIN_ +#undef RAPIDJSON_SCHEMA_HANDLE_PARALLEL_ +#undef RAPIDJSON_SCHEMA_HANDLE_VALUE_ + + // Implementation of ISchemaStateFactory + virtual ISchemaValidator* CreateSchemaValidator(const SchemaType& root) { + return new (GetStateAllocator().Malloc(sizeof(GenericSchemaValidator))) GenericSchemaValidator(*schemaDocument_, root, +#if RAPIDJSON_SCHEMA_VERBOSE + depth_ + 1, +#endif + &GetStateAllocator()); + } + + virtual void DestroySchemaValidator(ISchemaValidator* validator) { + GenericSchemaValidator* v = static_cast(validator); + v->~GenericSchemaValidator(); + StateAllocator::Free(v); + } + + virtual void* CreateHasher() { + return new (GetStateAllocator().Malloc(sizeof(HasherType))) HasherType(&GetStateAllocator()); + } + + virtual uint64_t GetHashCode(void* hasher) { + return static_cast(hasher)->GetHashCode(); + } + + virtual void DestroryHasher(void* hasher) { + HasherType* h = static_cast(hasher); + h->~HasherType(); + StateAllocator::Free(h); + } + + virtual void* MallocState(size_t size) { + return GetStateAllocator().Malloc(size); + } + + virtual void FreeState(void* p) { + return StateAllocator::Free(p); + } + +private: + typedef typename SchemaType::Context Context; + typedef GenericValue, StateAllocator> HashCodeArray; + typedef internal::Hasher HasherType; + + GenericSchemaValidator( + const SchemaDocumentType& schemaDocument, + const SchemaType& root, +#if RAPIDJSON_SCHEMA_VERBOSE + unsigned depth, +#endif + StateAllocator* allocator = 0, + size_t schemaStackCapacity = kDefaultSchemaStackCapacity, + size_t documentStackCapacity = kDefaultDocumentStackCapacity) + : + schemaDocument_(&schemaDocument), + root_(root), + outputHandler_(GetNullHandler()), + stateAllocator_(allocator), + ownStateAllocator_(0), + schemaStack_(allocator, schemaStackCapacity), + documentStack_(allocator, documentStackCapacity), + valid_(true) +#if RAPIDJSON_SCHEMA_VERBOSE + , depth_(depth) +#endif + { + } + + StateAllocator& GetStateAllocator() { + if (!stateAllocator_) + stateAllocator_ = ownStateAllocator_ = RAPIDJSON_NEW(StateAllocator()); + return *stateAllocator_; + } + + bool BeginValue() { + if (schemaStack_.Empty()) + PushSchema(root_); + else { + if (CurrentContext().inArray) + internal::TokenHelper, Ch>::AppendIndexToken(documentStack_, CurrentContext().arrayElementIndex); + + if (!CurrentSchema().BeginValue(CurrentContext())) + return false; + + SizeType count = CurrentContext().patternPropertiesSchemaCount; + const SchemaType** sa = CurrentContext().patternPropertiesSchemas; + typename Context::PatternValidatorType patternValidatorType = CurrentContext().valuePatternValidatorType; + bool valueUniqueness = CurrentContext().valueUniqueness; + if (CurrentContext().valueSchema) + PushSchema(*CurrentContext().valueSchema); + + if (count > 0) { + CurrentContext().objectPatternValidatorType = patternValidatorType; + ISchemaValidator**& va = CurrentContext().patternPropertiesValidators; + SizeType& validatorCount = CurrentContext().patternPropertiesValidatorCount; + va = static_cast(MallocState(sizeof(ISchemaValidator*) * count)); + for (SizeType i = 0; i < count; i++) + va[validatorCount++] = CreateSchemaValidator(*sa[i]); + } + + CurrentContext().arrayUniqueness = valueUniqueness; + } + return true; + } + + bool EndValue() { + if (!CurrentSchema().EndValue(CurrentContext())) + return false; + +#if RAPIDJSON_SCHEMA_VERBOSE + GenericStringBuffer sb; + schemaDocument_->GetPointer(&CurrentSchema()).Stringify(sb); + + *documentStack_.template Push() = '\0'; + documentStack_.template Pop(1); + internal::PrintValidatorPointers(depth_, sb.GetString(), documentStack_.template Bottom()); +#endif + + uint64_t h = CurrentContext().arrayUniqueness ? static_cast(CurrentContext().hasher)->GetHashCode() : 0; + + PopSchema(); + + if (!schemaStack_.Empty()) { + Context& context = CurrentContext(); + if (context.valueUniqueness) { + HashCodeArray* a = static_cast(context.arrayElementHashCodes); + if (!a) + CurrentContext().arrayElementHashCodes = a = new (GetStateAllocator().Malloc(sizeof(HashCodeArray))) HashCodeArray(kArrayType); + for (typename HashCodeArray::ConstValueIterator itr = a->Begin(); itr != a->End(); ++itr) + if (itr->GetUint64() == h) + RAPIDJSON_INVALID_KEYWORD_RETURN(SchemaType::GetUniqueItemsString()); + a->PushBack(h, GetStateAllocator()); + } + } + + // Remove the last token of document pointer + while (!documentStack_.Empty() && *documentStack_.template Pop(1) != '/') + ; + + return true; + } + + void AppendToken(const Ch* str, SizeType len) { + documentStack_.template Reserve(1 + len * 2); // worst case all characters are escaped as two characters + *documentStack_.template PushUnsafe() = '/'; + for (SizeType i = 0; i < len; i++) { + if (str[i] == '~') { + *documentStack_.template PushUnsafe() = '~'; + *documentStack_.template PushUnsafe() = '0'; + } + else if (str[i] == '/') { + *documentStack_.template PushUnsafe() = '~'; + *documentStack_.template PushUnsafe() = '1'; + } + else + *documentStack_.template PushUnsafe() = str[i]; + } + } + + RAPIDJSON_FORCEINLINE void PushSchema(const SchemaType& schema) { new (schemaStack_.template Push()) Context(*this, &schema); } + + RAPIDJSON_FORCEINLINE void PopSchema() { + Context* c = schemaStack_.template Pop(1); + if (HashCodeArray* a = static_cast(c->arrayElementHashCodes)) { + a->~HashCodeArray(); + StateAllocator::Free(a); + } + c->~Context(); + } + + const SchemaType& CurrentSchema() const { return *schemaStack_.template Top()->schema; } + Context& CurrentContext() { return *schemaStack_.template Top(); } + const Context& CurrentContext() const { return *schemaStack_.template Top(); } + + static OutputHandler& GetNullHandler() { + static OutputHandler nullHandler; + return nullHandler; + } + + static const size_t kDefaultSchemaStackCapacity = 1024; + static const size_t kDefaultDocumentStackCapacity = 256; + const SchemaDocumentType* schemaDocument_; + const SchemaType& root_; + OutputHandler& outputHandler_; + StateAllocator* stateAllocator_; + StateAllocator* ownStateAllocator_; + internal::Stack schemaStack_; //!< stack to store the current path of schema (BaseSchemaType *) + internal::Stack documentStack_; //!< stack to store the current path of validating document (Ch) + bool valid_; +#if RAPIDJSON_SCHEMA_VERBOSE + unsigned depth_; +#endif +}; + +typedef GenericSchemaValidator SchemaValidator; + +/////////////////////////////////////////////////////////////////////////////// +// SchemaValidatingReader + +//! A helper class for parsing with validation. +/*! + This helper class is a functor, designed as a parameter of \ref GenericDocument::Populate(). + + \tparam parseFlags Combination of \ref ParseFlag. + \tparam InputStream Type of input stream, implementing Stream concept. + \tparam SourceEncoding Encoding of the input stream. + \tparam SchemaDocumentType Type of schema document. + \tparam StackAllocator Allocator type for stack. +*/ +template < + unsigned parseFlags, + typename InputStream, + typename SourceEncoding, + typename SchemaDocumentType = SchemaDocument, + typename StackAllocator = CrtAllocator> +class SchemaValidatingReader { +public: + typedef typename SchemaDocumentType::PointerType PointerType; + typedef typename InputStream::Ch Ch; + + //! Constructor + /*! + \param is Input stream. + \param sd Schema document. + */ + SchemaValidatingReader(InputStream& is, const SchemaDocumentType& sd) : is_(is), sd_(sd), invalidSchemaKeyword_(), isValid_(true) {} + + template + bool operator()(Handler& handler) { + GenericReader reader; + GenericSchemaValidator validator(sd_, handler); + parseResult_ = reader.template Parse(is_, validator); + + isValid_ = validator.IsValid(); + if (isValid_) { + invalidSchemaPointer_ = PointerType(); + invalidSchemaKeyword_ = 0; + invalidDocumentPointer_ = PointerType(); + } + else { + invalidSchemaPointer_ = validator.GetInvalidSchemaPointer(); + invalidSchemaKeyword_ = validator.GetInvalidSchemaKeyword(); + invalidDocumentPointer_ = validator.GetInvalidDocumentPointer(); + } + + return parseResult_; + } + + const ParseResult& GetParseResult() const { return parseResult_; } + bool IsValid() const { return isValid_; } + const PointerType& GetInvalidSchemaPointer() const { return invalidSchemaPointer_; } + const Ch* GetInvalidSchemaKeyword() const { return invalidSchemaKeyword_; } + const PointerType& GetInvalidDocumentPointer() const { return invalidDocumentPointer_; } + +private: + InputStream& is_; + const SchemaDocumentType& sd_; + + ParseResult parseResult_; + PointerType invalidSchemaPointer_; + const Ch* invalidSchemaKeyword_; + PointerType invalidDocumentPointer_; + bool isValid_; +}; + +RAPIDJSON_NAMESPACE_END +RAPIDJSON_DIAG_POP + +#endif // RAPIDJSON_SCHEMA_H_ diff --git a/contrib/rapidjson/1.1.0/include/rapidjson/stream.h b/contrib/rapidjson/1.1.0/include/rapidjson/stream.h new file mode 100644 index 0000000..fef82c2 --- /dev/null +++ b/contrib/rapidjson/1.1.0/include/rapidjson/stream.h @@ -0,0 +1,179 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#include "rapidjson.h" + +#ifndef RAPIDJSON_STREAM_H_ +#define RAPIDJSON_STREAM_H_ + +#include "encodings.h" + +RAPIDJSON_NAMESPACE_BEGIN + +/////////////////////////////////////////////////////////////////////////////// +// Stream + +/*! \class rapidjson::Stream + \brief Concept for reading and writing characters. + + For read-only stream, no need to implement PutBegin(), Put(), Flush() and PutEnd(). + + For write-only stream, only need to implement Put() and Flush(). + +\code +concept Stream { + typename Ch; //!< Character type of the stream. + + //! Read the current character from stream without moving the read cursor. + Ch Peek() const; + + //! Read the current character from stream and moving the read cursor to next character. + Ch Take(); + + //! Get the current read cursor. + //! \return Number of characters read from start. + size_t Tell(); + + //! Begin writing operation at the current read pointer. + //! \return The begin writer pointer. + Ch* PutBegin(); + + //! Write a character. + void Put(Ch c); + + //! Flush the buffer. + void Flush(); + + //! End the writing operation. + //! \param begin The begin write pointer returned by PutBegin(). + //! \return Number of characters written. + size_t PutEnd(Ch* begin); +} +\endcode +*/ + +//! Provides additional information for stream. +/*! + By using traits pattern, this type provides a default configuration for stream. + For custom stream, this type can be specialized for other configuration. + See TEST(Reader, CustomStringStream) in readertest.cpp for example. +*/ +template +struct StreamTraits { + //! Whether to make local copy of stream for optimization during parsing. + /*! + By default, for safety, streams do not use local copy optimization. + Stream that can be copied fast should specialize this, like StreamTraits. + */ + enum { copyOptimization = 0 }; +}; + +//! Reserve n characters for writing to a stream. +template +inline void PutReserve(Stream& stream, size_t count) { + (void)stream; + (void)count; +} + +//! Write character to a stream, presuming buffer is reserved. +template +inline void PutUnsafe(Stream& stream, typename Stream::Ch c) { + stream.Put(c); +} + +//! Put N copies of a character to a stream. +template +inline void PutN(Stream& stream, Ch c, size_t n) { + PutReserve(stream, n); + for (size_t i = 0; i < n; i++) + PutUnsafe(stream, c); +} + +/////////////////////////////////////////////////////////////////////////////// +// StringStream + +//! Read-only string stream. +/*! \note implements Stream concept +*/ +template +struct GenericStringStream { + typedef typename Encoding::Ch Ch; + + GenericStringStream(const Ch *src) : src_(src), head_(src) {} + + Ch Peek() const { return *src_; } + Ch Take() { return *src_++; } + size_t Tell() const { return static_cast(src_ - head_); } + + Ch* PutBegin() { RAPIDJSON_ASSERT(false); return 0; } + void Put(Ch) { RAPIDJSON_ASSERT(false); } + void Flush() { RAPIDJSON_ASSERT(false); } + size_t PutEnd(Ch*) { RAPIDJSON_ASSERT(false); return 0; } + + const Ch* src_; //!< Current read position. + const Ch* head_; //!< Original head of the string. +}; + +template +struct StreamTraits > { + enum { copyOptimization = 1 }; +}; + +//! String stream with UTF8 encoding. +typedef GenericStringStream > StringStream; + +/////////////////////////////////////////////////////////////////////////////// +// InsituStringStream + +//! A read-write string stream. +/*! This string stream is particularly designed for in-situ parsing. + \note implements Stream concept +*/ +template +struct GenericInsituStringStream { + typedef typename Encoding::Ch Ch; + + GenericInsituStringStream(Ch *src) : src_(src), dst_(0), head_(src) {} + + // Read + Ch Peek() { return *src_; } + Ch Take() { return *src_++; } + size_t Tell() { return static_cast(src_ - head_); } + + // Write + void Put(Ch c) { RAPIDJSON_ASSERT(dst_ != 0); *dst_++ = c; } + + Ch* PutBegin() { return dst_ = src_; } + size_t PutEnd(Ch* begin) { return static_cast(dst_ - begin); } + void Flush() {} + + Ch* Push(size_t count) { Ch* begin = dst_; dst_ += count; return begin; } + void Pop(size_t count) { dst_ -= count; } + + Ch* src_; + Ch* dst_; + Ch* head_; +}; + +template +struct StreamTraits > { + enum { copyOptimization = 1 }; +}; + +//! Insitu string stream with UTF8 encoding. +typedef GenericInsituStringStream > InsituStringStream; + +RAPIDJSON_NAMESPACE_END + +#endif // RAPIDJSON_STREAM_H_ diff --git a/contrib/rapidjson/1.1.0/include/rapidjson/stringbuffer.h b/contrib/rapidjson/1.1.0/include/rapidjson/stringbuffer.h new file mode 100644 index 0000000..78f34d2 --- /dev/null +++ b/contrib/rapidjson/1.1.0/include/rapidjson/stringbuffer.h @@ -0,0 +1,117 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_STRINGBUFFER_H_ +#define RAPIDJSON_STRINGBUFFER_H_ + +#include "stream.h" +#include "internal/stack.h" + +#if RAPIDJSON_HAS_CXX11_RVALUE_REFS +#include // std::move +#endif + +#include "internal/stack.h" + +#if defined(__clang__) +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(c++98-compat) +#endif + +RAPIDJSON_NAMESPACE_BEGIN + +//! Represents an in-memory output stream. +/*! + \tparam Encoding Encoding of the stream. + \tparam Allocator type for allocating memory buffer. + \note implements Stream concept +*/ +template +class GenericStringBuffer { +public: + typedef typename Encoding::Ch Ch; + + GenericStringBuffer(Allocator* allocator = 0, size_t capacity = kDefaultCapacity) : stack_(allocator, capacity) {} + +#if RAPIDJSON_HAS_CXX11_RVALUE_REFS + GenericStringBuffer(GenericStringBuffer&& rhs) : stack_(std::move(rhs.stack_)) {} + GenericStringBuffer& operator=(GenericStringBuffer&& rhs) { + if (&rhs != this) + stack_ = std::move(rhs.stack_); + return *this; + } +#endif + + void Put(Ch c) { *stack_.template Push() = c; } + void PutUnsafe(Ch c) { *stack_.template PushUnsafe() = c; } + void Flush() {} + + void Clear() { stack_.Clear(); } + void ShrinkToFit() { + // Push and pop a null terminator. This is safe. + *stack_.template Push() = '\0'; + stack_.ShrinkToFit(); + stack_.template Pop(1); + } + + void Reserve(size_t count) { stack_.template Reserve(count); } + Ch* Push(size_t count) { return stack_.template Push(count); } + Ch* PushUnsafe(size_t count) { return stack_.template PushUnsafe(count); } + void Pop(size_t count) { stack_.template Pop(count); } + + const Ch* GetString() const { + // Push and pop a null terminator. This is safe. + *stack_.template Push() = '\0'; + stack_.template Pop(1); + + return stack_.template Bottom(); + } + + size_t GetSize() const { return stack_.GetSize(); } + + static const size_t kDefaultCapacity = 256; + mutable internal::Stack stack_; + +private: + // Prohibit copy constructor & assignment operator. + GenericStringBuffer(const GenericStringBuffer&); + GenericStringBuffer& operator=(const GenericStringBuffer&); +}; + +//! String buffer with UTF8 encoding +typedef GenericStringBuffer > StringBuffer; + +template +inline void PutReserve(GenericStringBuffer& stream, size_t count) { + stream.Reserve(count); +} + +template +inline void PutUnsafe(GenericStringBuffer& stream, typename Encoding::Ch c) { + stream.PutUnsafe(c); +} + +//! Implement specialized version of PutN() with memset() for better performance. +template<> +inline void PutN(GenericStringBuffer >& stream, char c, size_t n) { + std::memset(stream.stack_.Push(n), c, n * sizeof(c)); +} + +RAPIDJSON_NAMESPACE_END + +#if defined(__clang__) +RAPIDJSON_DIAG_POP +#endif + +#endif // RAPIDJSON_STRINGBUFFER_H_ diff --git a/contrib/rapidjson/1.1.0/include/rapidjson/writer.h b/contrib/rapidjson/1.1.0/include/rapidjson/writer.h new file mode 100644 index 0000000..94f22dd --- /dev/null +++ b/contrib/rapidjson/1.1.0/include/rapidjson/writer.h @@ -0,0 +1,610 @@ +// Tencent is pleased to support the open source community by making RapidJSON available. +// +// Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. +// +// Licensed under the MIT License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// http://opensource.org/licenses/MIT +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +#ifndef RAPIDJSON_WRITER_H_ +#define RAPIDJSON_WRITER_H_ + +#include "stream.h" +#include "internal/stack.h" +#include "internal/strfunc.h" +#include "internal/dtoa.h" +#include "internal/itoa.h" +#include "stringbuffer.h" +#include // placement new + +#if defined(RAPIDJSON_SIMD) && defined(_MSC_VER) +#include +#pragma intrinsic(_BitScanForward) +#endif +#ifdef RAPIDJSON_SSE42 +#include +#elif defined(RAPIDJSON_SSE2) +#include +#endif + +#ifdef _MSC_VER +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(4127) // conditional expression is constant +#endif + +#ifdef __clang__ +RAPIDJSON_DIAG_PUSH +RAPIDJSON_DIAG_OFF(padded) +RAPIDJSON_DIAG_OFF(unreachable-code) +#endif + +RAPIDJSON_NAMESPACE_BEGIN + +/////////////////////////////////////////////////////////////////////////////// +// WriteFlag + +/*! \def RAPIDJSON_WRITE_DEFAULT_FLAGS + \ingroup RAPIDJSON_CONFIG + \brief User-defined kWriteDefaultFlags definition. + + User can define this as any \c WriteFlag combinations. +*/ +#ifndef RAPIDJSON_WRITE_DEFAULT_FLAGS +#define RAPIDJSON_WRITE_DEFAULT_FLAGS kWriteNoFlags +#endif + +//! Combination of writeFlags +enum WriteFlag { + kWriteNoFlags = 0, //!< No flags are set. + kWriteValidateEncodingFlag = 1, //!< Validate encoding of JSON strings. + kWriteNanAndInfFlag = 2, //!< Allow writing of Infinity, -Infinity and NaN. + kWriteDefaultFlags = RAPIDJSON_WRITE_DEFAULT_FLAGS //!< Default write flags. Can be customized by defining RAPIDJSON_WRITE_DEFAULT_FLAGS +}; + +//! JSON writer +/*! Writer implements the concept Handler. + It generates JSON text by events to an output os. + + User may programmatically calls the functions of a writer to generate JSON text. + + On the other side, a writer can also be passed to objects that generates events, + + for example Reader::Parse() and Document::Accept(). + + \tparam OutputStream Type of output stream. + \tparam SourceEncoding Encoding of source string. + \tparam TargetEncoding Encoding of output stream. + \tparam StackAllocator Type of allocator for allocating memory of stack. + \note implements Handler concept +*/ +template, typename TargetEncoding = UTF8<>, typename StackAllocator = CrtAllocator, unsigned writeFlags = kWriteDefaultFlags> +class Writer { +public: + typedef typename SourceEncoding::Ch Ch; + + static const int kDefaultMaxDecimalPlaces = 324; + + //! Constructor + /*! \param os Output stream. + \param stackAllocator User supplied allocator. If it is null, it will create a private one. + \param levelDepth Initial capacity of stack. + */ + explicit + Writer(OutputStream& os, StackAllocator* stackAllocator = 0, size_t levelDepth = kDefaultLevelDepth) : + os_(&os), level_stack_(stackAllocator, levelDepth * sizeof(Level)), maxDecimalPlaces_(kDefaultMaxDecimalPlaces), hasRoot_(false) {} + + explicit + Writer(StackAllocator* allocator = 0, size_t levelDepth = kDefaultLevelDepth) : + os_(0), level_stack_(allocator, levelDepth * sizeof(Level)), maxDecimalPlaces_(kDefaultMaxDecimalPlaces), hasRoot_(false) {} + + //! Reset the writer with a new stream. + /*! + This function reset the writer with a new stream and default settings, + in order to make a Writer object reusable for output multiple JSONs. + + \param os New output stream. + \code + Writer writer(os1); + writer.StartObject(); + // ... + writer.EndObject(); + + writer.Reset(os2); + writer.StartObject(); + // ... + writer.EndObject(); + \endcode + */ + void Reset(OutputStream& os) { + os_ = &os; + hasRoot_ = false; + level_stack_.Clear(); + } + + //! Checks whether the output is a complete JSON. + /*! + A complete JSON has a complete root object or array. + */ + bool IsComplete() const { + return hasRoot_ && level_stack_.Empty(); + } + + int GetMaxDecimalPlaces() const { + return maxDecimalPlaces_; + } + + //! Sets the maximum number of decimal places for double output. + /*! + This setting truncates the output with specified number of decimal places. + + For example, + + \code + writer.SetMaxDecimalPlaces(3); + writer.StartArray(); + writer.Double(0.12345); // "0.123" + writer.Double(0.0001); // "0.0" + writer.Double(1.234567890123456e30); // "1.234567890123456e30" (do not truncate significand for positive exponent) + writer.Double(1.23e-4); // "0.0" (do truncate significand for negative exponent) + writer.EndArray(); + \endcode + + The default setting does not truncate any decimal places. You can restore to this setting by calling + \code + writer.SetMaxDecimalPlaces(Writer::kDefaultMaxDecimalPlaces); + \endcode + */ + void SetMaxDecimalPlaces(int maxDecimalPlaces) { + maxDecimalPlaces_ = maxDecimalPlaces; + } + + /*!@name Implementation of Handler + \see Handler + */ + //@{ + + bool Null() { Prefix(kNullType); return EndValue(WriteNull()); } + bool Bool(bool b) { Prefix(b ? kTrueType : kFalseType); return EndValue(WriteBool(b)); } + bool Int(int i) { Prefix(kNumberType); return EndValue(WriteInt(i)); } + bool Uint(unsigned u) { Prefix(kNumberType); return EndValue(WriteUint(u)); } + bool Int64(int64_t i64) { Prefix(kNumberType); return EndValue(WriteInt64(i64)); } + bool Uint64(uint64_t u64) { Prefix(kNumberType); return EndValue(WriteUint64(u64)); } + + //! Writes the given \c double value to the stream + /*! + \param d The value to be written. + \return Whether it is succeed. + */ + bool Double(double d) { Prefix(kNumberType); return EndValue(WriteDouble(d)); } + + bool RawNumber(const Ch* str, SizeType length, bool copy = false) { + (void)copy; + Prefix(kNumberType); + return EndValue(WriteString(str, length)); + } + + bool String(const Ch* str, SizeType length, bool copy = false) { + (void)copy; + Prefix(kStringType); + return EndValue(WriteString(str, length)); + } + +#if RAPIDJSON_HAS_STDSTRING + bool String(const std::basic_string& str) { + return String(str.data(), SizeType(str.size())); + } +#endif + + bool StartObject() { + Prefix(kObjectType); + new (level_stack_.template Push()) Level(false); + return WriteStartObject(); + } + + bool Key(const Ch* str, SizeType length, bool copy = false) { return String(str, length, copy); } + + bool EndObject(SizeType memberCount = 0) { + (void)memberCount; + RAPIDJSON_ASSERT(level_stack_.GetSize() >= sizeof(Level)); + RAPIDJSON_ASSERT(!level_stack_.template Top()->inArray); + level_stack_.template Pop(1); + return EndValue(WriteEndObject()); + } + + bool StartArray() { + Prefix(kArrayType); + new (level_stack_.template Push()) Level(true); + return WriteStartArray(); + } + + bool EndArray(SizeType elementCount = 0) { + (void)elementCount; + RAPIDJSON_ASSERT(level_stack_.GetSize() >= sizeof(Level)); + RAPIDJSON_ASSERT(level_stack_.template Top()->inArray); + level_stack_.template Pop(1); + return EndValue(WriteEndArray()); + } + //@} + + /*! @name Convenience extensions */ + //@{ + + //! Simpler but slower overload. + bool String(const Ch* str) { return String(str, internal::StrLen(str)); } + bool Key(const Ch* str) { return Key(str, internal::StrLen(str)); } + + //@} + + //! Write a raw JSON value. + /*! + For user to write a stringified JSON as a value. + + \param json A well-formed JSON value. It should not contain null character within [0, length - 1] range. + \param length Length of the json. + \param type Type of the root of json. + */ + bool RawValue(const Ch* json, size_t length, Type type) { Prefix(type); return EndValue(WriteRawValue(json, length)); } + +protected: + //! Information for each nested level + struct Level { + Level(bool inArray_) : valueCount(0), inArray(inArray_) {} + size_t valueCount; //!< number of values in this level + bool inArray; //!< true if in array, otherwise in object + }; + + static const size_t kDefaultLevelDepth = 32; + + bool WriteNull() { + PutReserve(*os_, 4); + PutUnsafe(*os_, 'n'); PutUnsafe(*os_, 'u'); PutUnsafe(*os_, 'l'); PutUnsafe(*os_, 'l'); return true; + } + + bool WriteBool(bool b) { + if (b) { + PutReserve(*os_, 4); + PutUnsafe(*os_, 't'); PutUnsafe(*os_, 'r'); PutUnsafe(*os_, 'u'); PutUnsafe(*os_, 'e'); + } + else { + PutReserve(*os_, 5); + PutUnsafe(*os_, 'f'); PutUnsafe(*os_, 'a'); PutUnsafe(*os_, 'l'); PutUnsafe(*os_, 's'); PutUnsafe(*os_, 'e'); + } + return true; + } + + bool WriteInt(int i) { + char buffer[11]; + const char* end = internal::i32toa(i, buffer); + PutReserve(*os_, static_cast(end - buffer)); + for (const char* p = buffer; p != end; ++p) + PutUnsafe(*os_, static_cast(*p)); + return true; + } + + bool WriteUint(unsigned u) { + char buffer[10]; + const char* end = internal::u32toa(u, buffer); + PutReserve(*os_, static_cast(end - buffer)); + for (const char* p = buffer; p != end; ++p) + PutUnsafe(*os_, static_cast(*p)); + return true; + } + + bool WriteInt64(int64_t i64) { + char buffer[21]; + const char* end = internal::i64toa(i64, buffer); + PutReserve(*os_, static_cast(end - buffer)); + for (const char* p = buffer; p != end; ++p) + PutUnsafe(*os_, static_cast(*p)); + return true; + } + + bool WriteUint64(uint64_t u64) { + char buffer[20]; + char* end = internal::u64toa(u64, buffer); + PutReserve(*os_, static_cast(end - buffer)); + for (char* p = buffer; p != end; ++p) + PutUnsafe(*os_, static_cast(*p)); + return true; + } + + bool WriteDouble(double d) { + if (internal::Double(d).IsNanOrInf()) { + if (!(writeFlags & kWriteNanAndInfFlag)) + return false; + if (internal::Double(d).IsNan()) { + PutReserve(*os_, 3); + PutUnsafe(*os_, 'N'); PutUnsafe(*os_, 'a'); PutUnsafe(*os_, 'N'); + return true; + } + if (internal::Double(d).Sign()) { + PutReserve(*os_, 9); + PutUnsafe(*os_, '-'); + } + else + PutReserve(*os_, 8); + PutUnsafe(*os_, 'I'); PutUnsafe(*os_, 'n'); PutUnsafe(*os_, 'f'); + PutUnsafe(*os_, 'i'); PutUnsafe(*os_, 'n'); PutUnsafe(*os_, 'i'); PutUnsafe(*os_, 't'); PutUnsafe(*os_, 'y'); + return true; + } + + char buffer[25]; + char* end = internal::dtoa(d, buffer, maxDecimalPlaces_); + PutReserve(*os_, static_cast(end - buffer)); + for (char* p = buffer; p != end; ++p) + PutUnsafe(*os_, static_cast(*p)); + return true; + } + + bool WriteString(const Ch* str, SizeType length) { + static const typename TargetEncoding::Ch hexDigits[16] = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F' }; + static const char escape[256] = { +#define Z16 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 + //0 1 2 3 4 5 6 7 8 9 A B C D E F + 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'b', 't', 'n', 'u', 'f', 'r', 'u', 'u', // 00 + 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', 'u', // 10 + 0, 0, '"', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 20 + Z16, Z16, // 30~4F + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,'\\', 0, 0, 0, // 50 + Z16, Z16, Z16, Z16, Z16, Z16, Z16, Z16, Z16, Z16 // 60~FF +#undef Z16 + }; + + if (TargetEncoding::supportUnicode) + PutReserve(*os_, 2 + length * 6); // "\uxxxx..." + else + PutReserve(*os_, 2 + length * 12); // "\uxxxx\uyyyy..." + + PutUnsafe(*os_, '\"'); + GenericStringStream is(str); + while (ScanWriteUnescapedString(is, length)) { + const Ch c = is.Peek(); + if (!TargetEncoding::supportUnicode && static_cast(c) >= 0x80) { + // Unicode escaping + unsigned codepoint; + if (RAPIDJSON_UNLIKELY(!SourceEncoding::Decode(is, &codepoint))) + return false; + PutUnsafe(*os_, '\\'); + PutUnsafe(*os_, 'u'); + if (codepoint <= 0xD7FF || (codepoint >= 0xE000 && codepoint <= 0xFFFF)) { + PutUnsafe(*os_, hexDigits[(codepoint >> 12) & 15]); + PutUnsafe(*os_, hexDigits[(codepoint >> 8) & 15]); + PutUnsafe(*os_, hexDigits[(codepoint >> 4) & 15]); + PutUnsafe(*os_, hexDigits[(codepoint ) & 15]); + } + else { + RAPIDJSON_ASSERT(codepoint >= 0x010000 && codepoint <= 0x10FFFF); + // Surrogate pair + unsigned s = codepoint - 0x010000; + unsigned lead = (s >> 10) + 0xD800; + unsigned trail = (s & 0x3FF) + 0xDC00; + PutUnsafe(*os_, hexDigits[(lead >> 12) & 15]); + PutUnsafe(*os_, hexDigits[(lead >> 8) & 15]); + PutUnsafe(*os_, hexDigits[(lead >> 4) & 15]); + PutUnsafe(*os_, hexDigits[(lead ) & 15]); + PutUnsafe(*os_, '\\'); + PutUnsafe(*os_, 'u'); + PutUnsafe(*os_, hexDigits[(trail >> 12) & 15]); + PutUnsafe(*os_, hexDigits[(trail >> 8) & 15]); + PutUnsafe(*os_, hexDigits[(trail >> 4) & 15]); + PutUnsafe(*os_, hexDigits[(trail ) & 15]); + } + } + else if ((sizeof(Ch) == 1 || static_cast(c) < 256) && RAPIDJSON_UNLIKELY(escape[static_cast(c)])) { + is.Take(); + PutUnsafe(*os_, '\\'); + PutUnsafe(*os_, static_cast(escape[static_cast(c)])); + if (escape[static_cast(c)] == 'u') { + PutUnsafe(*os_, '0'); + PutUnsafe(*os_, '0'); + PutUnsafe(*os_, hexDigits[static_cast(c) >> 4]); + PutUnsafe(*os_, hexDigits[static_cast(c) & 0xF]); + } + } + else if (RAPIDJSON_UNLIKELY(!(writeFlags & kWriteValidateEncodingFlag ? + Transcoder::Validate(is, *os_) : + Transcoder::TranscodeUnsafe(is, *os_)))) + return false; + } + PutUnsafe(*os_, '\"'); + return true; + } + + bool ScanWriteUnescapedString(GenericStringStream& is, size_t length) { + return RAPIDJSON_LIKELY(is.Tell() < length); + } + + bool WriteStartObject() { os_->Put('{'); return true; } + bool WriteEndObject() { os_->Put('}'); return true; } + bool WriteStartArray() { os_->Put('['); return true; } + bool WriteEndArray() { os_->Put(']'); return true; } + + bool WriteRawValue(const Ch* json, size_t length) { + PutReserve(*os_, length); + for (size_t i = 0; i < length; i++) { + RAPIDJSON_ASSERT(json[i] != '\0'); + PutUnsafe(*os_, json[i]); + } + return true; + } + + void Prefix(Type type) { + (void)type; + if (RAPIDJSON_LIKELY(level_stack_.GetSize() != 0)) { // this value is not at root + Level* level = level_stack_.template Top(); + if (level->valueCount > 0) { + if (level->inArray) + os_->Put(','); // add comma if it is not the first element in array + else // in object + os_->Put((level->valueCount % 2 == 0) ? ',' : ':'); + } + if (!level->inArray && level->valueCount % 2 == 0) + RAPIDJSON_ASSERT(type == kStringType); // if it's in object, then even number should be a name + level->valueCount++; + } + else { + RAPIDJSON_ASSERT(!hasRoot_); // Should only has one and only one root. + hasRoot_ = true; + } + } + + // Flush the value if it is the top level one. + bool EndValue(bool ret) { + if (RAPIDJSON_UNLIKELY(level_stack_.Empty())) // end of json text + os_->Flush(); + return ret; + } + + OutputStream* os_; + internal::Stack level_stack_; + int maxDecimalPlaces_; + bool hasRoot_; + +private: + // Prohibit copy constructor & assignment operator. + Writer(const Writer&); + Writer& operator=(const Writer&); +}; + +// Full specialization for StringStream to prevent memory copying + +template<> +inline bool Writer::WriteInt(int i) { + char *buffer = os_->Push(11); + const char* end = internal::i32toa(i, buffer); + os_->Pop(static_cast(11 - (end - buffer))); + return true; +} + +template<> +inline bool Writer::WriteUint(unsigned u) { + char *buffer = os_->Push(10); + const char* end = internal::u32toa(u, buffer); + os_->Pop(static_cast(10 - (end - buffer))); + return true; +} + +template<> +inline bool Writer::WriteInt64(int64_t i64) { + char *buffer = os_->Push(21); + const char* end = internal::i64toa(i64, buffer); + os_->Pop(static_cast(21 - (end - buffer))); + return true; +} + +template<> +inline bool Writer::WriteUint64(uint64_t u) { + char *buffer = os_->Push(20); + const char* end = internal::u64toa(u, buffer); + os_->Pop(static_cast(20 - (end - buffer))); + return true; +} + +template<> +inline bool Writer::WriteDouble(double d) { + if (internal::Double(d).IsNanOrInf()) { + // Note: This code path can only be reached if (RAPIDJSON_WRITE_DEFAULT_FLAGS & kWriteNanAndInfFlag). + if (!(kWriteDefaultFlags & kWriteNanAndInfFlag)) + return false; + if (internal::Double(d).IsNan()) { + PutReserve(*os_, 3); + PutUnsafe(*os_, 'N'); PutUnsafe(*os_, 'a'); PutUnsafe(*os_, 'N'); + return true; + } + if (internal::Double(d).Sign()) { + PutReserve(*os_, 9); + PutUnsafe(*os_, '-'); + } + else + PutReserve(*os_, 8); + PutUnsafe(*os_, 'I'); PutUnsafe(*os_, 'n'); PutUnsafe(*os_, 'f'); + PutUnsafe(*os_, 'i'); PutUnsafe(*os_, 'n'); PutUnsafe(*os_, 'i'); PutUnsafe(*os_, 't'); PutUnsafe(*os_, 'y'); + return true; + } + + char *buffer = os_->Push(25); + char* end = internal::dtoa(d, buffer, maxDecimalPlaces_); + os_->Pop(static_cast(25 - (end - buffer))); + return true; +} + +#if defined(RAPIDJSON_SSE2) || defined(RAPIDJSON_SSE42) +template<> +inline bool Writer::ScanWriteUnescapedString(StringStream& is, size_t length) { + if (length < 16) + return RAPIDJSON_LIKELY(is.Tell() < length); + + if (!RAPIDJSON_LIKELY(is.Tell() < length)) + return false; + + const char* p = is.src_; + const char* end = is.head_ + length; + const char* nextAligned = reinterpret_cast((reinterpret_cast(p) + 15) & static_cast(~15)); + const char* endAligned = reinterpret_cast(reinterpret_cast(end) & static_cast(~15)); + if (nextAligned > end) + return true; + + while (p != nextAligned) + if (*p < 0x20 || *p == '\"' || *p == '\\') { + is.src_ = p; + return RAPIDJSON_LIKELY(is.Tell() < length); + } + else + os_->PutUnsafe(*p++); + + // The rest of string using SIMD + static const char dquote[16] = { '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"', '\"' }; + static const char bslash[16] = { '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\', '\\' }; + static const char space[16] = { 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19, 0x19 }; + const __m128i dq = _mm_loadu_si128(reinterpret_cast(&dquote[0])); + const __m128i bs = _mm_loadu_si128(reinterpret_cast(&bslash[0])); + const __m128i sp = _mm_loadu_si128(reinterpret_cast(&space[0])); + + for (; p != endAligned; p += 16) { + const __m128i s = _mm_load_si128(reinterpret_cast(p)); + const __m128i t1 = _mm_cmpeq_epi8(s, dq); + const __m128i t2 = _mm_cmpeq_epi8(s, bs); + const __m128i t3 = _mm_cmpeq_epi8(_mm_max_epu8(s, sp), sp); // s < 0x20 <=> max(s, 0x19) == 0x19 + const __m128i x = _mm_or_si128(_mm_or_si128(t1, t2), t3); + unsigned short r = static_cast(_mm_movemask_epi8(x)); + if (RAPIDJSON_UNLIKELY(r != 0)) { // some of characters is escaped + SizeType len; +#ifdef _MSC_VER // Find the index of first escaped + unsigned long offset; + _BitScanForward(&offset, r); + len = offset; +#else + len = static_cast(__builtin_ffs(r) - 1); +#endif + char* q = reinterpret_cast(os_->PushUnsafe(len)); + for (size_t i = 0; i < len; i++) + q[i] = p[i]; + + p += len; + break; + } + _mm_storeu_si128(reinterpret_cast<__m128i *>(os_->PushUnsafe(16)), s); + } + + is.src_ = p; + return RAPIDJSON_LIKELY(is.Tell() < length); +} +#endif // defined(RAPIDJSON_SSE2) || defined(RAPIDJSON_SSE42) + +RAPIDJSON_NAMESPACE_END + +#ifdef _MSC_VER +RAPIDJSON_DIAG_POP +#endif + +#ifdef __clang__ +RAPIDJSON_DIAG_POP +#endif + +#endif // RAPIDJSON_RAPIDJSON_H_ diff --git a/contrib/rapidjson/LICENSE b/contrib/rapidjson/LICENSE new file mode 100644 index 0000000..7ccc161 --- /dev/null +++ b/contrib/rapidjson/LICENSE @@ -0,0 +1,57 @@ +Tencent is pleased to support the open source community by making RapidJSON available. + +Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. All rights reserved. + +If you have downloaded a copy of the RapidJSON binary from Tencent, please note that the RapidJSON binary is licensed under the MIT License. +If you have downloaded a copy of the RapidJSON source code from Tencent, please note that RapidJSON source code is licensed under the MIT License, except for the third-party components listed below which are subject to different license terms. Your integration of RapidJSON into your own projects may require compliance with the MIT License, as well as the other licenses applicable to the third-party components included within RapidJSON. To avoid the problematic JSON license in your own projects, it's sufficient to exclude the bin/jsonchecker/ directory, as it's the only code under the JSON license. +A copy of the MIT License is included in this file. + +Other dependencies and licenses: + +Open Source Software Licensed Under the BSD License: +-------------------------------------------------------------------- + +The msinttypes r29 +Copyright (c) 2006-2013 Alexander Chemeris +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. +* Neither the name of copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Open Source Software Licensed Under the JSON License: +-------------------------------------------------------------------- + +json.org +Copyright (c) 2002 JSON.org +All Rights Reserved. + +JSON_checker +Copyright (c) 2002 JSON.org +All Rights Reserved. + + +Terms of the JSON License: +--------------------------------------------------- + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +The Software shall be used for Good, not Evil. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +Terms of the MIT License: +-------------------------------------------------------------------- + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/contrib/supercluster/0.3.2/LICENSE b/contrib/supercluster/0.3.2/LICENSE new file mode 100644 index 0000000..14101c2 --- /dev/null +++ b/contrib/supercluster/0.3.2/LICENSE @@ -0,0 +1,13 @@ +Copyright (c) 2016, Mapbox + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/contrib/supercluster/0.3.2/README.md b/contrib/supercluster/0.3.2/README.md new file mode 100644 index 0000000..2db025e --- /dev/null +++ b/contrib/supercluster/0.3.2/README.md @@ -0,0 +1,3 @@ +A C++14 port of [supercluster](https://github.com/mapbox/supercluster), a fast 2D point clustering library for use in interactive maps. + +[![Build Status](https://travis-ci.org/mapbox/supercluster.hpp.svg?branch=master)](https://travis-ci.org/mapbox/supercluster.hpp) diff --git a/contrib/supercluster/0.3.2/include/supercluster.hpp b/contrib/supercluster/0.3.2/include/supercluster.hpp new file mode 100644 index 0000000..baeb42b --- /dev/null +++ b/contrib/supercluster/0.3.2/include/supercluster.hpp @@ -0,0 +1,386 @@ +#pragma once + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#ifdef DEBUG_TIMER +#include +#include +#endif + +namespace mapbox { +namespace supercluster { + +using namespace mapbox::geometry; +using namespace mapbox::feature; + +struct Cluster { + const point pos; + const std::uint32_t num_points; + std::uint32_t id; + std::uint32_t parent_id = 0; + bool visited = false; + + Cluster(const point pos_, const std::uint32_t num_points_, const std::uint32_t id_) + : pos(pos_), num_points(num_points_), id(id_) { + } + + mapbox::feature::feature toGeoJSON() const { + const double x = (pos.x - 0.5) * 360.0; + const double y = + 360.0 * std::atan(std::exp((180.0 - pos.y * 360.0) * M_PI / 180)) / M_PI - 90.0; + return { point{ x, y }, getProperties(), + identifier(static_cast(id)) }; + } + + property_map getProperties() const { + property_map properties{ { "cluster", true }, + { "cluster_id", static_cast(id) }, + { "point_count", static_cast(num_points) } }; + + std::stringstream ss; + if (num_points >= 1000) { + ss << std::fixed; + if (num_points < 10000) { + ss << std::setprecision(1); + } + ss << double(num_points) / 1000 << "k"; + } else { + ss << num_points; + } + properties.emplace("point_count_abbreviated", ss.str()); + + return properties; + } +}; + +} // namespace supercluster +} // namespace mapbox + +namespace kdbush { + +using Cluster = mapbox::supercluster::Cluster; + +template <> +struct nth<0, Cluster> { + inline static double get(const Cluster &c) { + return c.pos.x; + }; +}; +template <> +struct nth<1, Cluster> { + inline static double get(const Cluster &c) { + return c.pos.y; + }; +}; + +} // namespace kdbush + +namespace mapbox { +namespace supercluster { + +#ifdef DEBUG_TIMER +class Timer { +public: + std::chrono::high_resolution_clock::time_point started; + Timer() { + started = std::chrono::high_resolution_clock::now(); + } + void operator()(std::string msg) { + const auto now = std::chrono::high_resolution_clock::now(); + const auto ms = std::chrono::duration_cast(now - started); + std::cerr << msg << ": " << double(ms.count()) / 1000 << "ms\n"; + started = now; + } +}; +#endif + +struct Options { + std::uint8_t minZoom = 0; // min zoom to generate clusters on + std::uint8_t maxZoom = 16; // max zoom level to cluster the points on + std::uint16_t radius = 40; // cluster radius in pixels + std::uint16_t extent = 512; // tile extent (radius is calculated relative to it) +}; + +class Supercluster { + using GeoJSONPoint = point; + using GeoJSONFeature = mapbox::feature::feature; + using GeoJSONFeatures = feature_collection; + + using TilePoint = point; + using TileFeature = mapbox::feature::feature; + using TileFeatures = feature_collection; + +public: + const GeoJSONFeatures features; + const Options options; + + Supercluster(const GeoJSONFeatures &features_, const Options options_ = Options()) + : features(features_), options(options_) { + +#ifdef DEBUG_TIMER + Timer timer; +#endif + // convert and index initial points + zooms.emplace(options.maxZoom + 1, features); +#ifdef DEBUG_TIMER + timer(std::to_string(features.size()) + " initial points"); +#endif + for (int z = options.maxZoom; z >= options.minZoom; z--) { + // cluster points from the previous zoom level + const double r = options.radius / (options.extent * std::pow(2, z)); + zooms.emplace(z, Zoom(zooms[z + 1], r, z)); +#ifdef DEBUG_TIMER + timer(std::to_string(zooms[z].clusters.size()) + " clusters"); +#endif + } + } + + TileFeatures getTile(const std::uint8_t z, const std::uint32_t x_, const std::uint32_t y) const { + TileFeatures result; + + const auto zoom_iter = zooms.find(limitZoom(z)); + assert(zoom_iter != zooms.end()); + const auto &zoom = zoom_iter->second; + + std::uint32_t z2 = std::pow(2, z); + const double r = static_cast(options.radius) / options.extent; + std::int32_t x = x_; + + const auto visitor = [&, this](const auto &id) { + assert(id < zoom.clusters.size()); + const auto &c = zoom.clusters[id]; + + const TilePoint point(::round(this->options.extent * (c.pos.x * z2 - x)), + ::round(this->options.extent * (c.pos.y * z2 - y))); + + if (c.num_points == 1) { + const auto &original_feature = this->features[c.id]; + result.emplace_back(point, original_feature.properties, original_feature.id); + } else { + result.emplace_back( + point, c.getProperties(), + identifier(static_cast(c.id))); + } + }; + + const double top = (y - r) / z2; + const double bottom = (y + 1 + r) / z2; + + zoom.tree.range((x - r) / z2, top, (x + 1 + r) / z2, bottom, visitor); + + if (x_ == 0) { + x = z2; + zoom.tree.range(1 - r / z2, top, 1, bottom, visitor); + } + if (x_ == z2 - 1) { + x = -1; + zoom.tree.range(0, top, r / z2, bottom, visitor); + } + + return result; + } + + GeoJSONFeatures getChildren(const std::uint32_t cluster_id) const { + GeoJSONFeatures children; + eachChild(cluster_id, + [&, this](const auto &c) { children.push_back(this->clusterToGeoJSON(c)); }); + return children; + } + + GeoJSONFeatures getLeaves(const std::uint32_t cluster_id, + const std::uint32_t limit = 10, + const std::uint32_t offset = 0) const { + GeoJSONFeatures leaves; + std::uint32_t skipped = 0; + std::uint32_t limit_ = limit; + eachLeaf(cluster_id, limit_, offset, skipped, + [&, this](const auto &c) { leaves.push_back(this->clusterToGeoJSON(c)); }); + return leaves; + } + + std::uint8_t getClusterExpansionZoom(std::uint32_t cluster_id) const { + auto cluster_zoom = (cluster_id % 32) - 1; + while (cluster_zoom <= options.maxZoom) { + std::uint32_t num_children = 0; + + eachChild(cluster_id, [&](const auto &c) { + num_children++; + cluster_id = c.id; + }); + + cluster_zoom++; + + if (num_children != 1) + break; + } + return cluster_zoom; + } + +private: + struct Zoom { + kdbush::KDBush tree; + std::vector clusters; + + Zoom() = default; + + Zoom(const GeoJSONFeatures &features_) { + // generate a cluster object for each point + std::uint32_t i = 0; + + for (const auto &f : features_) { + clusters.emplace_back(project(f.geometry.get()), 1, i++); + } + + tree.fill(clusters); + } + + Zoom(Zoom &previous, const double r, const std::uint8_t zoom) { + + // The zoom parameter is restricted to [minZoom, maxZoom] by caller + assert( ((zoom + 1) & 0b11111) == (zoom + 1) ); + + // Since point index is encoded in the upper 27 bits, clamp the count of clusters + const auto previous_clusters_size = std::min(previous.clusters.size(), static_cast::size_type>(0x7ffffff)); + + for (std::size_t i = 0; i < previous_clusters_size; i++) { + auto &p = previous.clusters[i]; + + if (p.visited) { + continue; + } + + p.visited = true; + + auto num_points = p.num_points; + point weight = p.pos * double(num_points); + + std::uint32_t id = static_cast((i << 5) + (zoom + 1)); + + // find all nearby points + previous.tree.within(p.pos.x, p.pos.y, r, [&](const auto &neighbor_id) { + assert(neighbor_id < previous.clusters.size()); + auto &b = previous.clusters[neighbor_id]; + + // filter out neighbors that are already processed + if (b.visited) { + return; + } + + b.visited = true; + b.parent_id = id; + + // accumulate coordinates for calculating weighted center + weight += b.pos * double(b.num_points); + num_points += b.num_points; + }); + + if (num_points == 1) { + clusters.emplace_back(weight, 1, p.id); + } else { + p.parent_id = id; + clusters.emplace_back(weight / double(num_points), num_points, id); + } + } + + tree.fill(clusters); + } + }; + + std::unordered_map zooms; + + std::uint8_t limitZoom(const std::uint8_t z) const { + if (z < options.minZoom) + return options.minZoom; + if (z > options.maxZoom + 1) + return options.maxZoom + 1; + return z; + } + + template + void eachChild(const std::uint32_t cluster_id, const TVisitor &visitor) const { + const auto origin_id = cluster_id >> 5; + const auto origin_zoom = cluster_id % 32; + + const auto zoom_iter = zooms.find(origin_zoom); + if (zoom_iter == zooms.end()) { + throw std::runtime_error("No cluster with the specified id."); + } + + auto &zoom = zoom_iter->second; + if (origin_id >= zoom.clusters.size()) { + throw std::runtime_error("No cluster with the specified id."); + } + + const double r = options.radius / (double(options.extent) * std::pow(2, origin_zoom - 1)); + const auto &origin = zoom.clusters[origin_id]; + + bool hasChildren = false; + + zoom.tree.within(origin.pos.x, origin.pos.y, r, [&](const auto &id) { + assert(id < zoom.clusters.size()); + const auto &cluster_child = zoom.clusters[id]; + if (cluster_child.parent_id == cluster_id) { + visitor(cluster_child); + hasChildren = true; + } + }); + + if (!hasChildren) { + throw std::runtime_error("No cluster with the specified id."); + } + } + + template + void eachLeaf(const std::uint32_t cluster_id, + std::uint32_t &limit, + const std::uint32_t offset, + std::uint32_t &skipped, + const TVisitor &visitor) const { + + eachChild(cluster_id, [&, this](const auto &cluster_leaf) { + if (limit == 0) + return; + if (cluster_leaf.num_points > 1) { + if (skipped + cluster_leaf.num_points <= offset) { + // skip the whole cluster + skipped += cluster_leaf.num_points; + } else { + // enter the cluster + this->eachLeaf(cluster_leaf.id, limit, offset, skipped, visitor); + // exit the cluster + } + } else if (skipped < offset) { + // skip a single point + skipped++; + } else { + // visit a single point + visitor(cluster_leaf); + limit--; + } + }); + } + + GeoJSONFeature clusterToGeoJSON(const Cluster &c) const { + return c.num_points == 1 ? features[c.id] : c.toGeoJSON(); + } + + static point project(const GeoJSONPoint &p) { + const auto lngX = p.x / 360 + 0.5; + const double sine = std::sin(p.y * M_PI / 180); + const double y = 0.5 - 0.25 * std::log((1 + sine) / (1 - sine)) / M_PI; + const auto latY = std::min(std::max(y, 0.0), 1.0); + return { lngX, latY }; + } +}; + +} // namespace supercluster +} // namespace mapbox diff --git a/contrib/variant/1.2.0/LICENSE b/contrib/variant/1.2.0/LICENSE new file mode 100644 index 0000000..6c4ce40 --- /dev/null +++ b/contrib/variant/1.2.0/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) MapBox +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +- Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +- Redistributions in binary form must reproduce the above copyright notice, this + list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. +- Neither the name "MapBox" nor the names of its contributors may be + used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/contrib/variant/1.2.0/README.md b/contrib/variant/1.2.0/README.md new file mode 100644 index 0000000..6bf97f0 --- /dev/null +++ b/contrib/variant/1.2.0/README.md @@ -0,0 +1,248 @@ +# Mapbox Variant + +An header-only alternative to `boost::variant` for C++11 and C++14 + +[![Build Status](https://secure.travis-ci.org/mapbox/variant.svg)](https://travis-ci.org/mapbox/variant) +[![Build status](https://ci.appveyor.com/api/projects/status/v9tatx21j1k0fcgy)](https://ci.appveyor.com/project/Mapbox/variant) +[![Coverage Status](https://coveralls.io/repos/mapbox/variant/badge.svg?branch=master&service=github)](https://coveralls.io/r/mapbox/variant?branch=master) + +## Introduction + +Variant's basic building blocks are: + +- `variant` - a type-safe representation for sum-types / discriminated unions +- `recursive_wrapper` - a helper type to represent recursive "tree-like" variants +- `apply_visitor(visitor, myVariant)` - to invoke a custom visitor on the variant's underlying type +- `get()` - a function to directly unwrap a variant's underlying type +- `.match([](Type){})` - a variant convenience member function taking an arbitrary number of lambdas creating a visitor behind the scenes and applying it to the variant + +### Basic Usage - HTTP API Example + +Suppose you want to represent a HTTP API response which is either a JSON result or an error: + +```c++ +struct Result { + Json object; +}; + +struct Error { + int32_t code; + string message; +}; +``` + +You can represent this at type level using a variant which is either an `Error` or a `Result`: + +```c++ +using Response = variant; + +Response makeRequest() { + return Error{501, "Not Implemented"}; +} + +Response ret = makeRequest(); +``` + +To see which type the `Response` holds you pattern match on the variant unwrapping the underlying value: + +```c++ +ret.match([] (Result r) { print(r.object); }, + [] (Error e) { print(e.message); }); +``` + +Instead of using the variant's convenience `.match` pattern matching function you can create a type visitor functor and use `apply_visitor` manually: + +```c++ +struct ResponseVisitor { + void operator()(Result r) const { + print(r.object); + } + + void operator()(Error e) const { + print(e.message); + } +}; + +ResponseVisitor visitor; +apply_visitor(visitor, ret); +``` + +In both cases the compiler makes sure you handle all types the variant can represent at compile. + +### Recursive Variants - JSON Example + +[JSON](http://www.json.org/) consists of types `String`, `Number`, `True`, `False`, `Null`, `Array` and `Object`. + +```c++ +struct String { string value; }; +struct Number { double value; }; +struct True { }; +struct False { }; +struct Null { }; +struct Array { vector values; }; +struct Object { unordered_map values; }; +``` + +This works for primitive types but how do we represent recursive types such as `Array` which can hold multiple elements and `Array` itself, too? + +For these use cases Variant provides a `recursive_wrapper` helper type which lets you express recursive Variants. + +```c++ +struct String { string value; }; +struct Number { double value; }; +struct True { }; +struct False { }; +struct Null { }; + +// Forward declarations only +struct Array; +struct Object; + +using Value = variant, recursive_wrapper>; + +struct Array { + vector values; +}; + +struct Object { + unordered_map values; +}; +``` + +For walking the JSON representation you can again either create a `JSONVisitor`: + +```c++ +struct JSONVisitor { + + void operator()(Null) const { + print("null"); + } + + // same for all other JSON types +}; + +JSONVisitor visitor; +apply_visitor(visitor, json); +``` + +Or use the convenience `.match` pattern matching function: + +```c++ +json.match([] (Null) { print("null"); }, + ...); +``` + +To summarize: use `recursive_wrapper` to represent recursive "tree-like" representations: + +```c++ +struct Empty { }; +struct Node; + +using Tree = variant>; + +struct Node { + uint64_t value; +} +``` + +### Advanced Usage Tips + +Creating type aliases for variants is a great way to reduce repetition. +Keep in mind those type aliases are not checked at type level, though. +We recommend creating a new type for all but basic variant usage: + +```c++ +// the compiler can't tell the following two apart +using APIResult = variant; +using FilesystemResult = variant; + +// new type +struct APIResult : variant { + using Base = variant; + using Base::Base; +} +``` + +## Why use Mapbox Variant? + +Mapbox variant has the same speedy performance of `boost::variant` but is +faster to compile, results in smaller binaries, and has no dependencies. + +For example on OS X 10.9 with clang++ and libc++: + +Test | Mapbox Variant | Boost Variant +---- | -------------- | ------------- +Size of pre-compiled header (release / debug) | 2.8/2.8 MB | 12/15 MB +Size of simple program linking variant (release / debug) | 8/24 K | 12/40 K +Time to compile header | 185 ms | 675 ms + +(Numbers from an older version of Mapbox variant.) + +## Goals + +Mapbox `variant` has been a very valuable, lightweight alternative for apps +that can use c++11 or c++14 but that do not want a boost dependency. +Mapbox `variant` has also been useful in apps that do depend on boost, like +mapnik, to help (slightly) with compile times and to majorly lessen dependence +on boost in core headers. The original goal and near term goal is to maintain +external API compatibility with `boost::variant` such that Mapbox `variant` +can be a "drop in". At the same time the goal is to stay minimal: Only +implement the features that are actually needed in existing software. So being +an "incomplete" implementation is just fine. + +Currently Mapbox variant doesn't try to be API compatible with the upcoming +variant standard, because the standard is not finished and it would be too much +work. But we'll revisit this decision in the future if needed. + +If Mapbox variant is not for you, have a look at [these other +implementations](doc/other_implementations.md). + +Want to know more about the upcoming standard? Have a look at our +[overview](doc/standards_effort.md). + +Most modern high-level languages provide ways to express sum types directly. +If you're curious have a look at Haskell's pattern matching or Rust's and Swift's enums. + +## Depends + +- Compiler supporting `-std=c++11` or `-std=c++14` + +Tested with: + +- g++-4.7 +- g++-4.8 +- g++-4.9 +- g++-5.2 +- clang++-3.5 +- clang++-3.6 +- clang++-3.7 +- clang++-3.8 +- clang++-3.9 +- Visual Studio 2015 + +## Unit Tests + +On Unix systems compile and run the unit tests with `make test`. + +On Windows run `scripts/build-local.bat`. + +## Limitations + +- The `variant` can not hold references (something like `variant` is + not possible). You might want to try `std::reference_wrapper` instead. + +## Deprecations + +- The included implementation of `optional` is deprecated and will be removed + in a future version. See [issue #64](https://github.com/mapbox/variant/issues/64). +- Old versions of the code needed visitors to derive from `static_visitor`. + This is not needed any more and marked as deprecated. The `static_visitor` + class will be removed in future versions. + +## Benchmarks + + make bench + +## Check object sizes + + make sizes /path/to/boost/variant.hpp diff --git a/contrib/variant/1.2.0/include/mapbox/optional.hpp b/contrib/variant/1.2.0/include/mapbox/optional.hpp new file mode 100644 index 0000000..d84705c --- /dev/null +++ b/contrib/variant/1.2.0/include/mapbox/optional.hpp @@ -0,0 +1,74 @@ +#ifndef MAPBOX_UTIL_OPTIONAL_HPP +#define MAPBOX_UTIL_OPTIONAL_HPP + +#pragma message("This implementation of optional is deprecated. See https://github.com/mapbox/variant/issues/64.") + +#include +#include + +#include + +namespace mapbox { +namespace util { + +template +class optional +{ + static_assert(!std::is_reference::value, "optional doesn't support references"); + + struct none_type + { + }; + + variant variant_; + +public: + optional() = default; + + optional(optional const& rhs) + { + if (this != &rhs) + { // protect against invalid self-assignment + variant_ = rhs.variant_; + } + } + + optional(T const& v) { variant_ = v; } + + explicit operator bool() const noexcept { return variant_.template is(); } + + T const& get() const { return variant_.template get(); } + T& get() { return variant_.template get(); } + + T const& operator*() const { return this->get(); } + T operator*() { return this->get(); } + + optional& operator=(T const& v) + { + variant_ = v; + return *this; + } + + optional& operator=(optional const& rhs) + { + if (this != &rhs) + { + variant_ = rhs.variant_; + } + return *this; + } + + template + void emplace(Args&&... args) + { + variant_ = T{std::forward(args)...}; + } + + void reset() { variant_ = none_type{}; } + +}; // class optional + +} // namespace util +} // namespace mapbox + +#endif // MAPBOX_UTIL_OPTIONAL_HPP diff --git a/contrib/variant/1.2.0/include/mapbox/recursive_wrapper.hpp b/contrib/variant/1.2.0/include/mapbox/recursive_wrapper.hpp new file mode 100644 index 0000000..4ffcbd7 --- /dev/null +++ b/contrib/variant/1.2.0/include/mapbox/recursive_wrapper.hpp @@ -0,0 +1,122 @@ +#ifndef MAPBOX_UTIL_RECURSIVE_WRAPPER_HPP +#define MAPBOX_UTIL_RECURSIVE_WRAPPER_HPP + +// Based on variant/recursive_wrapper.hpp from boost. +// +// Original license: +// +// Copyright (c) 2002-2003 +// Eric Friedman, Itay Maman +// +// Distributed under the Boost Software License, Version 1.0. (See +// accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) + +#include +#include + +namespace mapbox { +namespace util { + +template +class recursive_wrapper +{ + + T* p_; + + void assign(T const& rhs) + { + this->get() = rhs; + } + +public: + using type = T; + + /** + * Default constructor default initializes the internally stored value. + * For POD types this means nothing is done and the storage is + * uninitialized. + * + * @throws std::bad_alloc if there is insufficient memory for an object + * of type T. + * @throws any exception thrown by the default constructur of T. + */ + recursive_wrapper() + : p_(new T){} + + ~recursive_wrapper() noexcept { delete p_; } + + recursive_wrapper(recursive_wrapper const& operand) + : p_(new T(operand.get())) {} + + recursive_wrapper(T const& operand) + : p_(new T(operand)) {} + + recursive_wrapper(recursive_wrapper&& operand) + : p_(new T(std::move(operand.get()))) {} + + recursive_wrapper(T&& operand) + : p_(new T(std::move(operand))) {} + + inline recursive_wrapper& operator=(recursive_wrapper const& rhs) + { + assign(rhs.get()); + return *this; + } + + inline recursive_wrapper& operator=(T const& rhs) + { + assign(rhs); + return *this; + } + + inline void swap(recursive_wrapper& operand) noexcept + { + T* temp = operand.p_; + operand.p_ = p_; + p_ = temp; + } + + recursive_wrapper& operator=(recursive_wrapper&& rhs) noexcept + { + swap(rhs); + return *this; + } + + recursive_wrapper& operator=(T&& rhs) + { + get() = std::move(rhs); + return *this; + } + + T& get() + { + assert(p_); + return *get_pointer(); + } + + T const& get() const + { + assert(p_); + return *get_pointer(); + } + + T* get_pointer() { return p_; } + + const T* get_pointer() const { return p_; } + + operator T const&() const { return this->get(); } + + operator T&() { return this->get(); } + +}; // class recursive_wrapper + +template +inline void swap(recursive_wrapper& lhs, recursive_wrapper& rhs) noexcept +{ + lhs.swap(rhs); +} +} // namespace util +} // namespace mapbox + +#endif // MAPBOX_UTIL_RECURSIVE_WRAPPER_HPP diff --git a/contrib/variant/1.2.0/include/mapbox/variant.hpp b/contrib/variant/1.2.0/include/mapbox/variant.hpp new file mode 100644 index 0000000..06a46ab --- /dev/null +++ b/contrib/variant/1.2.0/include/mapbox/variant.hpp @@ -0,0 +1,1053 @@ +#ifndef MAPBOX_UTIL_VARIANT_HPP +#define MAPBOX_UTIL_VARIANT_HPP + +#include +#include // size_t +#include // operator new +#include // runtime_error +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +// clang-format off +// [[deprecated]] is only available in C++14, use this for the time being +#if __cplusplus <= 201103L +# ifdef __GNUC__ +# define MAPBOX_VARIANT_DEPRECATED __attribute__((deprecated)) +# elif defined(_MSC_VER) +# define MAPBOX_VARIANT_DEPRECATED __declspec(deprecated) +# else +# define MAPBOX_VARIANT_DEPRECATED +# endif +#else +# define MAPBOX_VARIANT_DEPRECATED [[deprecated]] +#endif + + +#ifdef _MSC_VER +// https://msdn.microsoft.com/en-us/library/bw1hbe6y.aspx +# ifdef NDEBUG +# define VARIANT_INLINE __forceinline +# else +# define VARIANT_INLINE //__declspec(noinline) +# endif +#else +# ifdef NDEBUG +# define VARIANT_INLINE //inline __attribute__((always_inline)) +# else +# define VARIANT_INLINE __attribute__((noinline)) +# endif +#endif +// clang-format on + +// Exceptions +#if defined( __EXCEPTIONS) || defined( _MSC_VER) +#define HAS_EXCEPTIONS +#endif + +#define VARIANT_MAJOR_VERSION 1 +#define VARIANT_MINOR_VERSION 1 +#define VARIANT_PATCH_VERSION 0 + +#define VARIANT_VERSION (VARIANT_MAJOR_VERSION * 100000) + (VARIANT_MINOR_VERSION * 100) + (VARIANT_PATCH_VERSION) + +namespace mapbox { +namespace util { + +// XXX This should derive from std::logic_error instead of std::runtime_error. +// See https://github.com/mapbox/variant/issues/48 for details. +class bad_variant_access : public std::runtime_error +{ + +public: + explicit bad_variant_access(const std::string& what_arg) + : runtime_error(what_arg) {} + + explicit bad_variant_access(const char* what_arg) + : runtime_error(what_arg) {} + +}; // class bad_variant_access + +#if !defined(MAPBOX_VARIANT_MINIMIZE_SIZE) +using type_index_t = unsigned int; +#else +#if defined(MAPBOX_VARIANT_OPTIMIZE_FOR_SPEED) +using type_index_t = std::uint_fast8_t; +#else +using type_index_t = std::uint_least8_t; +#endif +#endif + +namespace detail { + +static constexpr type_index_t invalid_value = type_index_t(-1); + +template +struct direct_type; + +template +struct direct_type +{ + static constexpr type_index_t index = std::is_same::value + ? sizeof...(Types) + : direct_type::index; +}; + +template +struct direct_type +{ + static constexpr type_index_t index = invalid_value; +}; + +#if __cpp_lib_logical_traits >= 201510L + +using std::conjunction; +using std::disjunction; + +#else + +template +struct conjunction : std::true_type {}; + +template +struct conjunction : B1 {}; + +template +struct conjunction : std::conditional::type {}; + +template +struct conjunction : std::conditional, B1>::type {}; + +template +struct disjunction : std::false_type {}; + +template +struct disjunction : B1 {}; + +template +struct disjunction : std::conditional::type {}; + +template +struct disjunction : std::conditional>::type {}; + +#endif + +template +struct convertible_type; + +template +struct convertible_type +{ + static constexpr type_index_t index = std::is_convertible::value + ? disjunction...>::value ? invalid_value : sizeof...(Types) + : convertible_type::index; +}; + +template +struct convertible_type +{ + static constexpr type_index_t index = invalid_value; +}; + +template +struct value_traits +{ + using value_type = typename std::remove_const::type>::type; + using value_type_wrapper = recursive_wrapper; + static constexpr type_index_t direct_index = direct_type::index; + static constexpr bool is_direct = direct_index != invalid_value; + static constexpr type_index_t index_direct_or_wrapper = is_direct ? direct_index : direct_type::index; + static constexpr bool is_direct_or_wrapper = index_direct_or_wrapper != invalid_value; + static constexpr type_index_t index = is_direct_or_wrapper ? index_direct_or_wrapper : convertible_type::index; + static constexpr bool is_valid = index != invalid_value; + static constexpr type_index_t tindex = is_valid ? sizeof...(Types)-index : 0; + using target_type = typename std::tuple_element>::type; +}; + +template +struct copy_cvref +{ + using type = Dest; +}; + +template +struct copy_cvref +{ + using type = Dest const&; +}; + +template +struct copy_cvref +{ + using type = Dest&; +}; + +template +struct copy_cvref +{ + using type = Dest&&; +}; + +template +struct deduced_result_type +{}; + +template +struct deduced_result_type()(std::declval()...))> +{ + using type = decltype(std::declval()(std::declval()...)); +}; + +template +struct visitor_result_type : deduced_result_type +{}; + +// specialization for explicit result_type member in visitor class +template +struct visitor_result_type::type::result_type>())> +{ + using type = typename std::decay::type::result_type; +}; + +template +using result_of_unary_visit = typename visitor_result_type::type; + +template +using result_of_binary_visit = typename visitor_result_type::type; + +template +struct static_max; + +template +struct static_max +{ + static const type_index_t value = arg; +}; + +template +struct static_max +{ + static const type_index_t value = arg1 >= arg2 ? static_max::value : static_max::value; +}; + +template +struct variant_helper; + +template +struct variant_helper +{ + VARIANT_INLINE static void destroy(const type_index_t type_index, void* data) + { + if (type_index == sizeof...(Types)) + { + reinterpret_cast(data)->~T(); + } + else + { + variant_helper::destroy(type_index, data); + } + } + + VARIANT_INLINE static void move(const type_index_t old_type_index, void* old_value, void* new_value) + { + if (old_type_index == sizeof...(Types)) + { + new (new_value) T(std::move(*reinterpret_cast(old_value))); + } + else + { + variant_helper::move(old_type_index, old_value, new_value); + } + } + + VARIANT_INLINE static void copy(const type_index_t old_type_index, const void* old_value, void* new_value) + { + if (old_type_index == sizeof...(Types)) + { + new (new_value) T(*reinterpret_cast(old_value)); + } + else + { + variant_helper::copy(old_type_index, old_value, new_value); + } + } +}; + +template <> +struct variant_helper<> +{ + VARIANT_INLINE static void destroy(const type_index_t, void*) {} + VARIANT_INLINE static void move(const type_index_t, void*, void*) {} + VARIANT_INLINE static void copy(const type_index_t, const void*, void*) {} +}; + +template +struct unwrapper +{ + using value_type = T; + + template + static auto apply(typename std::remove_reference::type& var) + -> typename std::enable_if::value, + decltype(var.template get_unchecked())>::type + { + return var.template get_unchecked(); + } + + template + static auto apply(typename std::remove_reference::type& var) + -> typename std::enable_if::value, + decltype(std::move(var.template get_unchecked()))>::type + { + return std::move(var.template get_unchecked()); + } +}; + +template +struct unwrapper> : unwrapper +{}; + +template +struct unwrapper> : unwrapper +{}; + +template +struct dispatcher; + +template +struct dispatcher +{ + template + VARIANT_INLINE static R apply(V&& v, F&& f) + { + if (v.template is()) + { + return std::forward(f)(unwrapper::template apply(v)); + } + else + { + return dispatcher::apply(std::forward(v), std::forward(f)); + } + } +}; + +template +struct dispatcher +{ + template + VARIANT_INLINE static R apply(V&& v, F&& f) + { + return std::forward(f)(unwrapper::template apply(v)); + } +}; + +template +struct binary_dispatcher_rhs; + +template +struct binary_dispatcher_rhs +{ + template + VARIANT_INLINE static R apply(V&& lhs, V&& rhs, F&& f) + { + if (rhs.template is()) // call binary functor + { + return std::forward(f)(unwrapper::template apply(lhs), + unwrapper::template apply(rhs)); + } + else + { + return binary_dispatcher_rhs::apply(std::forward(lhs), + std::forward(rhs), + std::forward(f)); + } + } +}; + +template +struct binary_dispatcher_rhs +{ + template + VARIANT_INLINE static R apply(V&& lhs, V&& rhs, F&& f) + { + return std::forward(f)(unwrapper::template apply(lhs), + unwrapper::template apply(rhs)); + } +}; + +template +struct binary_dispatcher_lhs; + +template +struct binary_dispatcher_lhs +{ + template + VARIANT_INLINE static R apply(V&& lhs, V&& rhs, F&& f) + { + if (lhs.template is()) // call binary functor + { + return std::forward(f)(unwrapper::template apply(lhs), + unwrapper::template apply(rhs)); + } + else + { + return binary_dispatcher_lhs::apply(std::forward(lhs), + std::forward(rhs), + std::forward(f)); + } + } +}; + +template +struct binary_dispatcher_lhs +{ + template + VARIANT_INLINE static R apply(V&& lhs, V&& rhs, F&& f) + { + return std::forward(f)(unwrapper::template apply(lhs), + unwrapper::template apply(rhs)); + } +}; + +template +struct binary_dispatcher; + +template +struct binary_dispatcher +{ + template + VARIANT_INLINE static R apply(V&& v0, V&& v1, F&& f) + { + if (v0.template is()) + { + if (v1.template is()) + { + return std::forward(f)(unwrapper::template apply(v0), + unwrapper::template apply(v1)); // call binary functor + } + else + { + return binary_dispatcher_rhs::apply(std::forward(v0), + std::forward(v1), + std::forward(f)); + } + } + else if (v1.template is()) + { + return binary_dispatcher_lhs::apply(std::forward(v0), + std::forward(v1), + std::forward(f)); + } + return binary_dispatcher::apply(std::forward(v0), + std::forward(v1), + std::forward(f)); + } +}; + +template +struct binary_dispatcher +{ + template + VARIANT_INLINE static R apply(V&& v0, V&& v1, F&& f) + { + return std::forward(f)(unwrapper::template apply(v0), + unwrapper::template apply(v1)); // call binary functor + } +}; + +// comparator functors +struct equal_comp +{ + template + bool operator()(T const& lhs, T const& rhs) const + { + return lhs == rhs; + } +}; + +struct less_comp +{ + template + bool operator()(T const& lhs, T const& rhs) const + { + return lhs < rhs; + } +}; + +template +class comparer +{ +public: + explicit comparer(Variant const& lhs) noexcept + : lhs_(lhs) {} + comparer& operator=(comparer const&) = delete; + // visitor + template + bool operator()(T const& rhs_content) const + { + T const& lhs_content = lhs_.template get_unchecked(); + return Comp()(lhs_content, rhs_content); + } + +private: + Variant const& lhs_; +}; + +// hashing visitor +struct hasher +{ + template + std::size_t operator()(const T& hashable) const + { + return std::hash{}(hashable); + } +}; + +} // namespace detail + +struct no_init {}; + +template +class variant +{ + static_assert(sizeof...(Types) > 0, "Template parameter type list of variant can not be empty."); + static_assert(!detail::disjunction...>::value, "Variant can not hold reference types. Maybe use std::reference_wrapper?"); + static_assert(!detail::disjunction...>::value, "Variant can not hold array types."); + static_assert(sizeof...(Types) < std::numeric_limits::max(), "Internal index type must be able to accommodate all alternatives."); +private: + static const std::size_t data_size = detail::static_max::value; + static const std::size_t data_align = detail::static_max::value; +public: + struct adapted_variant_tag; + using types = std::tuple; +private: + using first_type = typename std::tuple_element<0, types>::type; + using unwrap_first_type = typename detail::unwrapper::value_type; + using data_type = typename std::aligned_storage::type; + using helper_type = detail::variant_helper; + + template + using alternative_ref = typename detail::copy_cvref::type; + + type_index_t type_index; +#ifdef __clang_analyzer__ + data_type data {}; +#else + data_type data; +#endif + +public: + VARIANT_INLINE variant() noexcept(std::is_nothrow_default_constructible::value) + : type_index(sizeof...(Types)-1) + { + static_assert(std::is_default_constructible::value, "First type in variant must be default constructible to allow default construction of variant."); + new (&data) first_type(); + } + + VARIANT_INLINE variant(no_init) noexcept + : type_index(detail::invalid_value) {} + + // http://isocpp.org/blog/2012/11/universal-references-in-c11-scott-meyers + template , + typename Enable = typename std::enable_if, typename Traits::value_type>::value>::type > + VARIANT_INLINE variant(T&& val) noexcept(std::is_nothrow_constructible::value) + : type_index(Traits::index) + { + new (&data) typename Traits::target_type(std::forward(val)); + } + + VARIANT_INLINE variant(variant const& old) + : type_index(old.type_index) + { + helper_type::copy(old.type_index, &old.data, &data); + } + + VARIANT_INLINE variant(variant&& old) + noexcept(detail::conjunction...>::value) + : type_index(old.type_index) + { + helper_type::move(old.type_index, &old.data, &data); + } + +private: + VARIANT_INLINE void copy_assign(variant const& rhs) + { + helper_type::destroy(type_index, &data); + type_index = detail::invalid_value; + helper_type::copy(rhs.type_index, &rhs.data, &data); + type_index = rhs.type_index; + } + + VARIANT_INLINE void move_assign(variant&& rhs) + { + helper_type::destroy(type_index, &data); + type_index = detail::invalid_value; + helper_type::move(rhs.type_index, &rhs.data, &data); + type_index = rhs.type_index; + } + +public: + VARIANT_INLINE variant& operator=(variant&& other) + // note we check for nothrow-constructible, not nothrow-assignable, since + // move_assign uses move-construction via placement new. + noexcept(detail::conjunction...>::value) + { + if (this == &other) { // playing safe in release mode, hit assertion in debug. + assert(false); + return *this; + } + move_assign(std::move(other)); + return *this; + } + + VARIANT_INLINE variant& operator=(variant const& other) + { + if (this != &other) + copy_assign(other); + return *this; + } + + // conversions + // move-assign + template , + typename Enable = typename std::enable_if, typename Traits::value_type>::value>::type > + VARIANT_INLINE variant& operator=(T&& rhs) + // not that we check is_nothrow_constructible, not is_nothrow_move_assignable, + // since we construct a temporary + noexcept(std::is_nothrow_constructible::value + && std::is_nothrow_move_assignable>::value) + { + variant temp(std::forward(rhs)); + move_assign(std::move(temp)); + return *this; + } + + // copy-assign + template + VARIANT_INLINE variant& operator=(T const& rhs) + { + variant temp(rhs); + copy_assign(temp); + return *this; + } + + template ::index != detail::invalid_value)>::type* = nullptr> + VARIANT_INLINE bool is() const + { + return type_index == detail::direct_type::index; + } + + template , Types...>::index != detail::invalid_value)>::type* = nullptr> + VARIANT_INLINE bool is() const + { + return type_index == detail::direct_type, Types...>::index; + } + + VARIANT_INLINE bool valid() const + { + return type_index != detail::invalid_value; + } + + template + VARIANT_INLINE void set(Args&&... args) + { + helper_type::destroy(type_index, &data); + type_index = detail::invalid_value; + new (&data) T(std::forward(args)...); + type_index = detail::direct_type::index; + } + + // get_unchecked() + template ::index != detail::invalid_value)>::type* = nullptr> + VARIANT_INLINE T& get_unchecked() + { + return *reinterpret_cast(&data); + } + +#ifdef HAS_EXCEPTIONS + // get() + template ::index != detail::invalid_value)>::type* = nullptr> + VARIANT_INLINE T& get() + { + if (type_index == detail::direct_type::index) + { + return *reinterpret_cast(&data); + } + else + { + throw bad_variant_access("in get()"); + } + } +#endif + + template ::index != detail::invalid_value)>::type* = nullptr> + VARIANT_INLINE T const& get_unchecked() const + { + return *reinterpret_cast(&data); + } + +#ifdef HAS_EXCEPTIONS + template ::index != detail::invalid_value)>::type* = nullptr> + VARIANT_INLINE T const& get() const + { + if (type_index == detail::direct_type::index) + { + return *reinterpret_cast(&data); + } + else + { + throw bad_variant_access("in get()"); + } + } +#endif + + // get_unchecked() - T stored as recursive_wrapper + template , Types...>::index != detail::invalid_value)>::type* = nullptr> + VARIANT_INLINE T& get_unchecked() + { + return (*reinterpret_cast*>(&data)).get(); + } + +#ifdef HAS_EXCEPTIONS + // get() - T stored as recursive_wrapper + template , Types...>::index != detail::invalid_value)>::type* = nullptr> + VARIANT_INLINE T& get() + { + if (type_index == detail::direct_type, Types...>::index) + { + return (*reinterpret_cast*>(&data)).get(); + } + else + { + throw bad_variant_access("in get()"); + } + } +#endif + + template , Types...>::index != detail::invalid_value)>::type* = nullptr> + VARIANT_INLINE T const& get_unchecked() const + { + return (*reinterpret_cast const*>(&data)).get(); + } + +#ifdef HAS_EXCEPTIONS + template , Types...>::index != detail::invalid_value)>::type* = nullptr> + VARIANT_INLINE T const& get() const + { + if (type_index == detail::direct_type, Types...>::index) + { + return (*reinterpret_cast const*>(&data)).get(); + } + else + { + throw bad_variant_access("in get()"); + } + } +#endif + + // get_unchecked() - T stored as std::reference_wrapper + template , Types...>::index != detail::invalid_value)>::type* = nullptr> + VARIANT_INLINE T& get_unchecked() + { + return (*reinterpret_cast*>(&data)).get(); + } + +#ifdef HAS_EXCEPTIONS + // get() - T stored as std::reference_wrapper + template , Types...>::index != detail::invalid_value)>::type* = nullptr> + VARIANT_INLINE T& get() + { + if (type_index == detail::direct_type, Types...>::index) + { + return (*reinterpret_cast*>(&data)).get(); + } + else + { + throw bad_variant_access("in get()"); + } + } +#endif + + template , Types...>::index != detail::invalid_value)>::type* = nullptr> + VARIANT_INLINE T const& get_unchecked() const + { + return (*reinterpret_cast const*>(&data)).get(); + } + +#ifdef HAS_EXCEPTIONS + template , Types...>::index != detail::invalid_value)>::type* = nullptr> + VARIANT_INLINE T const& get() const + { + if (type_index == detail::direct_type, Types...>::index) + { + return (*reinterpret_cast const*>(&data)).get(); + } + else + { + throw bad_variant_access("in get()"); + } + } +#endif + + // This function is deprecated because it returns an internal index field. + // Use which() instead. + MAPBOX_VARIANT_DEPRECATED VARIANT_INLINE type_index_t get_type_index() const + { + return type_index; + } + + VARIANT_INLINE int which() const noexcept + { + return static_cast(sizeof...(Types) - type_index - 1); + } + + template ::index != detail::invalid_value)>::type* = nullptr> + VARIANT_INLINE static constexpr int which() noexcept + { + return static_cast(sizeof...(Types)-detail::direct_type::index - 1); + } + + // visitor + // unary + template , + typename R = detail::result_of_unary_visit> + VARIANT_INLINE static R visit(V&& v, F&& f) + { + return detail::dispatcher::apply(std::forward(v), std::forward(f)); + } + + // binary + template , + typename R = detail::result_of_binary_visit> + VARIANT_INLINE static R binary_visit(V&& v0, V&& v1, F&& f) + { + return detail::binary_dispatcher::apply(std::forward(v0), + std::forward(v1), + std::forward(f)); + } + + // match + // unary + template + auto VARIANT_INLINE match(Fs&&... fs) const& + -> decltype(variant::visit(*this, ::mapbox::util::make_visitor(std::forward(fs)...))) + { + return variant::visit(*this, ::mapbox::util::make_visitor(std::forward(fs)...)); + } + // non-const + template + auto VARIANT_INLINE match(Fs&&... fs) & + -> decltype(variant::visit(*this, ::mapbox::util::make_visitor(std::forward(fs)...))) + { + return variant::visit(*this, ::mapbox::util::make_visitor(std::forward(fs)...)); + } + template + auto VARIANT_INLINE match(Fs&&... fs) && + -> decltype(variant::visit(std::move(*this), ::mapbox::util::make_visitor(std::forward(fs)...))) + { + return variant::visit(std::move(*this), ::mapbox::util::make_visitor(std::forward(fs)...)); + } + + ~variant() noexcept // no-throw destructor + { + helper_type::destroy(type_index, &data); + } + + // comparison operators + // equality + VARIANT_INLINE bool operator==(variant const& rhs) const + { + assert(valid() && rhs.valid()); + if (this->which() != rhs.which()) + { + return false; + } + detail::comparer visitor(*this); + return visit(rhs, visitor); + } + + VARIANT_INLINE bool operator!=(variant const& rhs) const + { + return !(*this == rhs); + } + + // less than + VARIANT_INLINE bool operator<(variant const& rhs) const + { + assert(valid() && rhs.valid()); + if (this->which() != rhs.which()) + { + return this->which() < rhs.which(); + } + detail::comparer visitor(*this); + return visit(rhs, visitor); + } + VARIANT_INLINE bool operator>(variant const& rhs) const + { + return rhs < *this; + } + VARIANT_INLINE bool operator<=(variant const& rhs) const + { + return !(*this > rhs); + } + VARIANT_INLINE bool operator>=(variant const& rhs) const + { + return !(*this < rhs); + } +}; + +// unary visitor interface +template +auto VARIANT_INLINE apply_visitor(F&& f, V&& v) + -> decltype(v.visit(std::forward(v), std::forward(f))) +{ + return v.visit(std::forward(v), std::forward(f)); +} + +// binary visitor interface +template +auto VARIANT_INLINE apply_visitor(F&& f, V&& v0, V&& v1) + -> decltype(v0.binary_visit(std::forward(v0), std::forward(v1), std::forward(f))) +{ + return v0.binary_visit(std::forward(v0), std::forward(v1), std::forward(f)); +} + +// getter interface + +#ifdef HAS_EXCEPTIONS +template +auto get(T& var)->decltype(var.template get()) +{ + return var.template get(); +} +#endif + +template +ResultType& get_unchecked(T& var) +{ + return var.template get_unchecked(); +} + +#ifdef HAS_EXCEPTIONS +template +auto get(T const& var)->decltype(var.template get()) +{ + return var.template get(); +} +#endif + +template +ResultType const& get_unchecked(T const& var) +{ + return var.template get_unchecked(); +} +// variant_size +template +struct variant_size; + +//variable templates is c++14 +//template +//constexpr std::size_t variant_size_v = variant_size::value; + +template +struct variant_size + : variant_size {}; + +template +struct variant_size + : variant_size {}; + +template +struct variant_size + : variant_size {}; + +template +struct variant_size> + : std::integral_constant {}; + +// variant_alternative +template +struct variant_alternative; + +#if defined(__clang__) +#if __has_builtin(__type_pack_element) +#define has_type_pack_element +#endif +#endif + +#if defined(has_type_pack_element) +template +struct variant_alternative> +{ + static_assert(sizeof...(Types) > Index , "Index out of range"); + using type = __type_pack_element; +}; +#else +template +struct variant_alternative> + : variant_alternative> +{ + static_assert(sizeof...(Types) > Index -1 , "Index out of range"); +}; + +template +struct variant_alternative<0, variant> +{ + using type = First; +}; + +#endif + +template +using variant_alternative_t = typename variant_alternative::type; + +template +struct variant_alternative + : std::add_const> {}; + +template +struct variant_alternative + : std::add_volatile> {}; + +template +struct variant_alternative + : std::add_cv> {}; + +} // namespace util +} // namespace mapbox + +// hashable iff underlying types are hashable +namespace std { +template +struct hash< ::mapbox::util::variant> { + std::size_t operator()(const ::mapbox::util::variant& v) const noexcept + { + return ::mapbox::util::apply_visitor(::mapbox::util::detail::hasher{}, v); + } +}; + +} + +#endif // MAPBOX_UTIL_VARIANT_HPP diff --git a/contrib/variant/1.2.0/include/mapbox/variant_cast.hpp b/contrib/variant/1.2.0/include/mapbox/variant_cast.hpp new file mode 100644 index 0000000..fe1ab35 --- /dev/null +++ b/contrib/variant/1.2.0/include/mapbox/variant_cast.hpp @@ -0,0 +1,85 @@ +#ifndef VARIANT_CAST_HPP +#define VARIANT_CAST_HPP + +#include + +namespace mapbox { +namespace util { + +namespace detail { + +template +class static_caster +{ +public: + template + T& operator()(V& v) const + { + return static_cast(v); + } +}; + +template +class dynamic_caster +{ +public: + using result_type = T&; + template + T& operator()(V& v, typename std::enable_if::value>::type* = nullptr) const + { + throw std::bad_cast(); + } + template + T& operator()(V& v, typename std::enable_if::value>::type* = nullptr) const + { + return dynamic_cast(v); + } +}; + +template +class dynamic_caster +{ +public: + using result_type = T*; + template + T* operator()(V& v, typename std::enable_if::value>::type* = nullptr) const + { + return nullptr; + } + template + T* operator()(V& v, typename std::enable_if::value>::type* = nullptr) const + { + return dynamic_cast(&v); + } +}; +} + +template +typename detail::dynamic_caster::result_type +dynamic_variant_cast(V& v) +{ + return mapbox::util::apply_visitor(detail::dynamic_caster(), v); +} + +template +typename detail::dynamic_caster::result_type +dynamic_variant_cast(const V& v) +{ + return mapbox::util::apply_visitor(detail::dynamic_caster(), v); +} + +template +T& static_variant_cast(V& v) +{ + return mapbox::util::apply_visitor(detail::static_caster(), v); +} + +template +const T& static_variant_cast(const V& v) +{ + return mapbox::util::apply_visitor(detail::static_caster(), v); +} +} +} + +#endif // VARIANT_CAST_HPP diff --git a/contrib/variant/1.2.0/include/mapbox/variant_io.hpp b/contrib/variant/1.2.0/include/mapbox/variant_io.hpp new file mode 100644 index 0000000..1456cc5 --- /dev/null +++ b/contrib/variant/1.2.0/include/mapbox/variant_io.hpp @@ -0,0 +1,45 @@ +#ifndef MAPBOX_UTIL_VARIANT_IO_HPP +#define MAPBOX_UTIL_VARIANT_IO_HPP + +#include + +#include + +namespace mapbox { +namespace util { + +namespace detail { +// operator<< helper +template +class printer +{ +public: + explicit printer(Out& out) + : out_(out) {} + printer& operator=(printer const&) = delete; + + // visitor + template + void operator()(T const& operand) const + { + out_ << operand; + } + +private: + Out& out_; +}; +} + +// operator<< +template +VARIANT_INLINE std::basic_ostream& +operator<<(std::basic_ostream& out, variant const& rhs) +{ + detail::printer> visitor(out); + apply_visitor(visitor, rhs); + return out; +} +} // namespace util +} // namespace mapbox + +#endif // MAPBOX_UTIL_VARIANT_IO_HPP diff --git a/contrib/variant/1.2.0/include/mapbox/variant_visitor.hpp b/contrib/variant/1.2.0/include/mapbox/variant_visitor.hpp new file mode 100644 index 0000000..54ddba0 --- /dev/null +++ b/contrib/variant/1.2.0/include/mapbox/variant_visitor.hpp @@ -0,0 +1,43 @@ +#ifndef MAPBOX_UTIL_VARIANT_VISITOR_HPP +#define MAPBOX_UTIL_VARIANT_VISITOR_HPP + +#include + +namespace mapbox { +namespace util { + +template +struct visitor; + +template +struct visitor : Fn +{ + using Fn::operator(); + + template + visitor(T&& fn) : Fn(std::forward(fn)) {} +}; + +template +struct visitor : Fn, visitor +{ + using Fn::operator(); + using visitor::operator(); + + template + visitor(T&& fn, Ts&&... fns) + : Fn(std::forward(fn)) + , visitor(std::forward(fns)...) {} +}; + +template +visitor::type...> make_visitor(Fns&&... fns) +{ + return visitor::type...> + (std::forward(fns)...); +} + +} // namespace util +} // namespace mapbox + +#endif // MAPBOX_UTIL_VARIANT_VISITOR_HPP diff --git a/contrib/vtzero/1.1.0/LICENSE b/contrib/vtzero/1.1.0/LICENSE new file mode 100644 index 0000000..30ba8ac --- /dev/null +++ b/contrib/vtzero/1.1.0/LICENSE @@ -0,0 +1,25 @@ +BSD 2-Clause License + +Copyright (c) 2017, Mapbox +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/contrib/vtzero/1.1.0/include/vtzero/builder.hpp b/contrib/vtzero/1.1.0/include/vtzero/builder.hpp new file mode 100644 index 0000000..9919287 --- /dev/null +++ b/contrib/vtzero/1.1.0/include/vtzero/builder.hpp @@ -0,0 +1,1365 @@ +#ifndef VTZERO_BUILDER_HPP +#define VTZERO_BUILDER_HPP + +/***************************************************************************** + +vtzero - Tiny and fast vector tile decoder and encoder in C++. + +This file is from https://github.com/mapbox/vtzero where you can find more +documentation. + +*****************************************************************************/ + +/** + * @file builder.hpp + * + * @brief Contains the classes and functions to build vector tiles. + */ + +#include "builder_impl.hpp" +#include "feature_builder_impl.hpp" +#include "geometry.hpp" +#include "types.hpp" +#include "vector_tile.hpp" + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace vtzero { + + /** + * Used to build vector tiles. Whenever you are building a new vector + * tile, start with an object of this class and add layers. After all + * the data is added, call serialize(). + * + * @code + * layer some_existing_layer = ...; + * + * tile_builder builder; + * layer_builder layer_roads{builder, "roads"}; + * builder.add_existing_layer(some_existing_layer); + * ... + * std::string data = builder.serialize(); + * @endcode + */ + class tile_builder { + + friend class layer_builder; + + std::vector> m_layers; + + /** + * Add a new layer to the vector tile based on an existing layer. The + * new layer will have the same name, version, and extent as the + * existing layer. The new layer will not contain any features. This + * method is handy when copying some (but not all) data from an + * existing layer. + */ + detail::layer_builder_impl* add_layer(const layer& layer) { + auto* ptr = new detail::layer_builder_impl{layer.name(), layer.version(), layer.extent()}; + m_layers.emplace_back(ptr); + return ptr; + } + + /** + * Add a new layer to the vector tile with the specified name, version, + * and extent. + * + * @tparam TString Some string type (const char*, std::string, + * vtzero::data_view) or something that converts to one of + * these types. + * @param name Name of this layer. + * @param version Version of this layer (only version 1 and 2 are + * supported) + * @param extent Extent used for this layer. + */ + template + detail::layer_builder_impl* add_layer(TString&& name, uint32_t version, uint32_t extent) { + auto* ptr = new detail::layer_builder_impl{std::forward(name), version, extent}; + m_layers.emplace_back(ptr); + return ptr; + } + + public: + + /// Constructor + tile_builder() = default; + + /// Destructor + ~tile_builder() noexcept = default; + + /// Tile builders can not be copied. + tile_builder(const tile_builder&) = delete; + + /// Tile builders can not be copied. + tile_builder& operator=(const tile_builder&) = delete; + + /// Tile builders can be moved. + tile_builder(tile_builder&&) = default; + + /// Tile builders can be moved. + tile_builder& operator=(tile_builder&&) = default; + + /** + * Add an existing layer to the vector tile. The layer data will be + * copied over into the new vector_tile when the serialize() method + * is called. Until then, the data referenced here must stay available. + * + * @param data Reference to some data that must be a valid encoded + * layer. + */ + void add_existing_layer(data_view&& data) { + m_layers.emplace_back(new detail::layer_builder_impl{std::forward(data)}); + } + + /** + * Add an existing layer to the vector tile. The layer data will be + * copied over into the new vector_tile when the serialize() method + * is called. Until then, the data referenced here must stay available. + * + * @param layer Reference to the layer to be copied. + */ + void add_existing_layer(const layer& layer) { + add_existing_layer(layer.data()); + } + + /** + * Serialize the data accumulated in this builder into a vector tile. + * The data will be appended to the specified buffer. The buffer + * doesn't have to be empty. + * + * @tparam TBuffer Type of buffer. Must be std:string or other buffer + * type supported by protozero. + * @param buffer Buffer to append the encoded vector tile to. + */ + template + void serialize(TBuffer& buffer) const { + const std::size_t estimated_size = std::accumulate(m_layers.cbegin(), m_layers.cend(), 0ULL, [](std::size_t sum, const std::unique_ptr& layer) { + return sum + layer->estimated_size(); + }); + + protozero::basic_pbf_builder pbf_tile_builder{buffer}; + pbf_tile_builder.reserve(estimated_size); + for (const auto& layer : m_layers) { + layer->build(pbf_tile_builder); + } + } + + /** + * Serialize the data accumulated in this builder into a vector_tile + * and return it. + * + * If you want to use an existing buffer instead, use the serialize() + * member function taking a TBuffer& as parameter. + * + * @returns std::string Buffer with encoded vector_tile data. + */ + std::string serialize() const { + std::string data; + serialize(data); + return data; + } + + }; // class tile_builder + + /** + * The layer_builder is used to add a new layer to a vector tile that is + * being built. + */ + class layer_builder { + + vtzero::detail::layer_builder_impl* m_layer; + + friend class geometry_feature_builder; + friend class point_feature_builder; + friend class linestring_feature_builder; + friend class polygon_feature_builder; + + vtzero::detail::layer_builder_impl& get_layer_impl() noexcept { + return *m_layer; + } + + template + using is_layer = std::is_same::type>::type, layer>; + + public: + + /** + * Construct a layer_builder to build a new layer with the same name, + * version, and extent as an existing layer. + * + * @param tile The tile builder we want to create this layer in. + * @param layer Existing layer we want to use the name, version, and + * extent from + */ + layer_builder(vtzero::tile_builder& tile, const layer& layer) : + m_layer(tile.add_layer(layer)) { + } + + /** + * Construct a layer_builder to build a completely new layer. + * + * @tparam TString Some string type (such as std::string or const char*) + * @param tile The tile builder we want to create this layer in. + * @param name The name of the new layer. + * @param version The vector tile spec version of the new layer. + * @param extent The extent of the new layer. + */ + template ::value, int>::type = 0> + layer_builder(vtzero::tile_builder& tile, TString&& name, uint32_t version = 2, uint32_t extent = 4096) : + m_layer(tile.add_layer(std::forward(name), version, extent)) { + } + + /** + * Add key to the keys table without checking for duplicates. This + * function is usually used when an external index is used which takes + * care of the duplication check. + * + * @param text The key. + * @returns The index value of this key. + */ + index_value add_key_without_dup_check(const data_view text) { + return m_layer->add_key_without_dup_check(text); + } + + /** + * Add key to the keys table. This function will consult the internal + * index in the layer to make sure the key is only in the table once. + * It will either return the index value of an existing key or add the + * new key and return its index value. + * + * @param text The key. + * @returns The index value of this key. + */ + index_value add_key(const data_view text) { + return m_layer->add_key(text); + } + + /** + * Add value to the values table without checking for duplicates. This + * function is usually used when an external index is used which takes + * care of the duplication check. + * + * @param value The property value. + * @returns The index value of this value. + */ + index_value add_value_without_dup_check(const property_value value) { + return m_layer->add_value_without_dup_check(value); + } + + /** + * Add value to the values table without checking for duplicates. This + * function is usually used when an external index is used which takes + * care of the duplication check. + * + * @param value The property value. + * @returns The index value of this value. + */ + index_value add_value_without_dup_check(const encoded_property_value& value) { + return m_layer->add_value_without_dup_check(value); + } + + /** + * Add value to the values table. This function will consult the + * internal index in the layer to make sure the value is only in the + * table once. It will either return the index value of an existing + * value or add the new value and return its index value. + * + * @param value The property value. + * @returns The index value of this value. + */ + index_value add_value(const property_value value) { + return m_layer->add_value(value); + } + + /** + * Add value to the values table. This function will consult the + * internal index in the layer to make sure the value is only in the + * table once. It will either return the index value of an existing + * value or add the new value and return its index value. + * + * @param value The property value. + * @returns The index value of this value. + */ + index_value add_value(const encoded_property_value& value) { + return m_layer->add_value(value); + } + + /** + * Add a feature from an existing layer to the new layer. The feature + * will be copied completely over to the new layer including its + * geometry and all its properties. + */ + void add_feature(const feature& feature); + + }; // class layer_builder + + /** + * Parent class for the point_feature_builder, linestring_feature_builder + * and polygon_feature_builder classes. You can not instantiate this class + * directly, use it through its derived classes. + */ + class feature_builder : public detail::feature_builder_base { + + class countdown_value { + + uint32_t m_value = 0; + + public: + + countdown_value() noexcept = default; + + ~countdown_value() noexcept { + assert_is_zero(); + } + + countdown_value(const countdown_value&) = delete; + + countdown_value& operator=(const countdown_value&) = delete; + + countdown_value(countdown_value&& other) noexcept : + m_value(other.m_value) { + other.m_value = 0; + } + + countdown_value& operator=(countdown_value&& other) noexcept { + m_value = other.m_value; + other.m_value = 0; + return *this; + } + + uint32_t value() const noexcept { + return m_value; + } + + void set(const uint32_t value) noexcept { + m_value = value; + } + + void decrement() { + vtzero_assert(m_value > 0 && "too many calls to set_point()"); + --m_value; + } + + void assert_is_zero() const noexcept { + vtzero_assert_in_noexcept_function(m_value == 0 && + "not enough calls to set_point()"); + } + + }; // countdown_value + + protected: + + /// Encoded geometry. + protozero::packed_field_uint32 m_pbf_geometry{}; + + /// Number of points still to be set for the geometry to be complete. + countdown_value m_num_points; + + /// Last point (used to calculate delta between coordinates) + point m_cursor{0, 0}; + + /// Constructor. + explicit feature_builder(detail::layer_builder_impl* layer) : + feature_builder_base(layer) { + } + + /// Helper function to check size isn't too large + template + uint32_t check_num_points(T size) { + if (size >= (1UL << 29U)) { + throw geometry_exception{"Maximum of 2^29 - 1 points allowed in geometry"}; + } + return static_cast(size); + } + + /// Helper function to make sure we have everything before adding a property + void prepare_to_add_property() { + if (m_pbf_geometry.valid()) { + m_num_points.assert_is_zero(); + m_pbf_geometry.commit(); + } + if (!m_pbf_tags.valid()) { + m_pbf_tags = {m_feature_writer, detail::pbf_feature::tags}; + } + } + + public: + + /** + * If the feature was not committed, the destructor will roll back all + * the changes. + */ + ~feature_builder() { + try { + rollback(); + } catch (...) { + // ignore exceptions + } + } + + /// Builder classes can not be copied + feature_builder(const feature_builder&) = delete; + + /// Builder classes can not be copied + feature_builder& operator=(const feature_builder&) = delete; + + /// Builder classes can be moved + feature_builder(feature_builder&& other) noexcept = default; + + /// Builder classes can be moved + feature_builder& operator=(feature_builder&& other) noexcept = default; + + /** + * Set the ID of this feature. + * + * You can only call this method once and it must be before calling + * any method manipulating the geometry. + * + * @param id The ID. + */ + void set_id(uint64_t id) { + vtzero_assert(m_feature_writer.valid() && + "Can not call set_id() after commit() or rollback()"); + vtzero_assert(!m_pbf_geometry.valid() && + !m_pbf_tags.valid() && + "Call set_id() before setting the geometry or adding properties"); + set_id_impl(id); + } + + /** + * Copy the ID of an existing feature to this feature. If the + * feature doesn't have an ID, no ID is set. + * + * You can only call this method once and it must be before calling + * any method manipulating the geometry. + * + * @param feature The feature to copy the ID from. + */ + void copy_id(const feature& feature) { + vtzero_assert(m_feature_writer.valid() && + "Can not call copy_id() after commit() or rollback()"); + vtzero_assert(!m_pbf_geometry.valid() && + !m_pbf_tags.valid() && + "Call copy_id() before setting the geometry or adding properties"); + if (feature.has_id()) { + set_id_impl(feature.id()); + } + } + + /** + * Add a property to this feature. Can only be called after all the + * methods manipulating the geometry. + * + * @tparam TProp Can be type index_value_pair or property. + * @param prop The property to add. + */ + template + void add_property(TProp&& prop) { + vtzero_assert(m_feature_writer.valid() && + "Can not call add_property() after commit() or rollback()"); + prepare_to_add_property(); + add_property_impl(std::forward(prop)); + } + + /** + * Copy all properties of an existing feature to the one being built. + * + * @param feature The feature to copy the properties from. + */ + void copy_properties(const feature& feature) { + vtzero_assert(m_feature_writer.valid() && + "Can not call copy_properties() after commit() or rollback()"); + prepare_to_add_property(); + feature.for_each_property([this](const property& prop) { + add_property_impl(prop); + return true; + }); + } + + /** + * Copy all properties of an existing feature to the one being built + * using a property_mapper. + * + * @tparam TMapper Must be the property_mapper class or something + * equivalent. + * @param feature The feature to copy the properties from. + * @param mapper Instance of the property_mapper class. + */ + template + void copy_properties(const feature& feature, TMapper& mapper) { + vtzero_assert(m_feature_writer.valid() && + "Can not call copy_properties() after commit() or rollback()"); + prepare_to_add_property(); + feature.for_each_property_indexes([this, &mapper](const index_value_pair& idxs) { + add_property_impl(mapper(idxs)); + return true; + }); + } + + /** + * Add a property to this feature. Can only be called after all the + * methods manipulating the geometry. + * + * @tparam TKey Can be type index_value or data_view or anything that + * converts to it. + * @tparam TValue Can be type index_value or property_value or + * encoded_property or anything that converts to it. + * @param key The key. + * @param value The value. + */ + template + void add_property(TKey&& key, TValue&& value) { + vtzero_assert(m_feature_writer.valid() && + "Can not call add_property() after commit() or rollback()"); + prepare_to_add_property(); + add_property_impl(std::forward(key), std::forward(value)); + } + + /** + * Commit this feature. Call this after all the details of this + * feature have been added. If this is not called, the feature + * will be rolled back when the destructor of the feature_builder is + * called. + * + * Once a feature has been committed or rolled back, further calls + * to commit() or rollback() don't do anything. + */ + void commit() { + if (m_feature_writer.valid()) { + vtzero_assert((m_pbf_geometry.valid() || m_pbf_tags.valid()) && + "Can not call commit before geometry was added"); + if (m_pbf_geometry.valid()) { + m_pbf_geometry.commit(); + } + do_commit(); + } + } + + /** + * Rollback this feature. Removed all traces of this feature from + * the layer_builder. Useful when you started creating a feature + * but then find out that its geometry is invalid or something like + * it. This will also happen automatically when the feature_builder + * is destructed and commit() hasn't been called on it. + * + * Once a feature has been committed or rolled back, further calls + * to commit() or rollback() don't do anything. + */ + void rollback() { + if (m_feature_writer.valid()) { + if (m_pbf_geometry.valid()) { + m_pbf_geometry.rollback(); + } + do_rollback(); + } + } + + }; // class feature_builder + + /** + * Used for adding a feature with a point geometry to a layer. After + * creating an object of this class you can add data to the feature in a + * specific order: + * + * * Optionally add the ID using set_id(). + * * Add the (multi)point geometry using add_point(), add_points() and + * set_point(), or add_points_from_container(). + * * Optionally add any number of properties using add_property(). + * + * @code + * vtzero::tile_builder tb; + * vtzero::layer_builder lb{tb}; + * vtzero::point_feature_builder fb{lb}; + * fb.set_id(123); // optionally set ID + * fb.add_point(10, 20) // add point geometry + * fb.add_property("foo", "bar"); // add property + * @endcode + */ + class point_feature_builder : public feature_builder { + + public: + + /** + * Constructor + * + * @param layer The layer we want to create this feature in. + */ + explicit point_feature_builder(layer_builder layer) : + feature_builder(&layer.get_layer_impl()) { + m_feature_writer.add_enum(detail::pbf_feature::type, static_cast(GeomType::POINT)); + } + + /** + * Add a single point as the geometry to this feature. + * + * @param p The point to add. + * + * @pre You must not have any calls to add_property() before calling + * this method. + */ + void add_point(const point p) { + vtzero_assert(m_feature_writer.valid() && + "Can not add geometry after commit() or rollback()"); + vtzero_assert(!m_pbf_geometry.valid() && + !m_pbf_tags.valid() && + "add_point() can only be called once"); + m_pbf_geometry = {m_feature_writer, detail::pbf_feature::geometry}; + m_pbf_geometry.add_element(detail::command_move_to(1)); + m_pbf_geometry.add_element(protozero::encode_zigzag32(p.x)); + m_pbf_geometry.add_element(protozero::encode_zigzag32(p.y)); + } + + /** + * Add a single point as the geometry to this feature. + * + * @param x X coordinate of the point to add. + * @param y Y coordinate of the point to add. + * + * @pre You must not have any calls to add_property() before calling + * this method. + */ + void add_point(const int32_t x, const int32_t y) { + add_point(point{x, y}); + } + + /** + * Add a single point as the geometry to this feature. + * + * @tparam TPoint A type that can be converted to vtzero::point using + * the create_vtzero_point function. + * @param p The point to add. + * + * @pre You must not have any calls to add_property() before calling + * this method. + */ + template + void add_point(TPoint&& p) { + add_point(create_vtzero_point(std::forward(p))); + } + + /** + * Declare the intent to add a multipoint geometry with *count* points + * to this feature. + * + * @param count The number of points in the multipoint geometry. + * + * @pre @code count > 0 && count < 2^29 @endcode + * + * @pre You must not have any calls to add_property() before calling + * this method. + */ + void add_points(uint32_t count) { + vtzero_assert(m_feature_writer.valid() && + "Can not add geometry after commit() or rollback()"); + vtzero_assert(!m_pbf_geometry.valid() && + "can not call add_points() twice or mix with add_point()"); + vtzero_assert(!m_pbf_tags.valid() && + "add_points() has to be called before properties are added"); + vtzero_assert(count > 0 && count < (1UL << 29U) && "add_points() must be called with 0 < count < 2^29"); + m_num_points.set(count); + m_pbf_geometry = {m_feature_writer, detail::pbf_feature::geometry}; + m_pbf_geometry.add_element(detail::command_move_to(count)); + } + + /** + * Set a point in the multipoint geometry. + * + * @param p The point. + * + * @pre There must have been less than *count* calls to set_point() + * already after a call to add_points(count). + * + * @pre You must not have any calls to add_property() before calling + * this method. + */ + void set_point(const point p) { + vtzero_assert(m_feature_writer.valid() && + "Can not add geometry after commit() or rollback()"); + vtzero_assert(m_pbf_geometry.valid() && + "call add_points() before set_point()"); + vtzero_assert(!m_pbf_tags.valid() && + "set_point() has to be called before properties are added"); + m_num_points.decrement(); + m_pbf_geometry.add_element(protozero::encode_zigzag32(p.x - m_cursor.x)); + m_pbf_geometry.add_element(protozero::encode_zigzag32(p.y - m_cursor.y)); + m_cursor = p; + } + + /** + * Set a point in the multipoint geometry. + * + * @param x X coordinate of the point to set. + * @param y Y coordinate of the point to set. + * + * @pre There must have been less than *count* calls to set_point() + * already after a call to add_points(count). + * + * @pre You must not have any calls to add_property() before calling + * this method. + */ + void set_point(const int32_t x, const int32_t y) { + set_point(point{x, y}); + } + + /** + * Set a point in the multipoint geometry. + * + * @tparam TPoint A type that can be converted to vtzero::point using + * the create_vtzero_point function. + * @param p The point to add. + * + * @pre There must have been less than *count* calls to set_point() + * already after a call to add_points(count). + * + * @pre You must not have any calls to add_property() before calling + * this method. + */ + template + void set_point(TPoint&& p) { + set_point(create_vtzero_point(std::forward(p))); + } + + /** + * Add the points from the specified container as multipoint geometry + * to this feature. + * + * @tparam TContainer The container type. Must support the size() + * method, be iterable using a range for loop, and contain + * objects of type vtzero::point or something convertible to + * it. + * @param container The container to read the points from. + * + * @throws geometry_exception If there are more than 2^32-1 members in + * the container. + * + * @pre You must not have any calls to add_property() before calling + * this method. + */ + template + void add_points_from_container(const TContainer& container) { + add_points(check_num_points(container.size())); + for (const auto& element : container) { + set_point(element); + } + } + + }; // class point_feature_builder + + /** + * Used for adding a feature with a (multi)linestring geometry to a layer. + * After creating an object of this class you can add data to the + * feature in a specific order: + * + * * Optionally add the ID using set_id(). + * * Add the (multi)linestring geometry using add_linestring() or + * add_linestring_from_container(). + * * Optionally add any number of properties using add_property(). + * + * @code + * vtzero::tile_builder tb; + * vtzero::layer_builder lb{tb}; + * vtzero::linestring_feature_builder fb{lb}; + * fb.set_id(123); // optionally set ID + * fb.add_linestring(2); + * fb.set_point(10, 10); + * fb.set_point(10, 20); + * fb.add_property("foo", "bar"); // add property + * @endcode + */ + class linestring_feature_builder : public feature_builder { + + bool m_start_line = false; + + public: + + /** + * Constructor + * + * @param layer The layer we want to create this feature in. + */ + explicit linestring_feature_builder(layer_builder layer) : + feature_builder(&layer.get_layer_impl()) { + m_feature_writer.add_enum(detail::pbf_feature::type, static_cast(GeomType::LINESTRING)); + } + + /** + * Declare the intent to add a linestring geometry with *count* points + * to this feature. + * + * @param count The number of points in the linestring. + * + * @pre @code count > 1 && count < 2^29 @endcode + * + * @pre You must not have any calls to add_property() before calling + * this method. + */ + void add_linestring(const uint32_t count) { + vtzero_assert(m_feature_writer.valid() && + "Can not add geometry after commit() or rollback()"); + vtzero_assert(!m_pbf_tags.valid() && + "add_linestring() has to be called before properties are added"); + vtzero_assert(count > 1 && count < (1UL << 29U) && "add_linestring() must be called with 1 < count < 2^29"); + m_num_points.assert_is_zero(); + if (!m_pbf_geometry.valid()) { + m_pbf_geometry = {m_feature_writer, detail::pbf_feature::geometry}; + } + m_num_points.set(count); + m_start_line = true; + } + + /** + * Set a point in the multilinestring geometry opened with + * add_linestring(). + * + * @param p The point. + * + * @throws geometry_exception if the point set is the same as the + * previous point. This would create zero-length segments + * which are not allowed according to the vector tile spec. + * + * @pre There must have been less than *count* calls to set_point() + * already after a call to add_linestring(count). + * + * @pre You must not have any calls to add_property() before calling + * this method. + */ + void set_point(const point p) { + vtzero_assert(m_feature_writer.valid() && + "Can not add geometry after commit() or rollback()"); + vtzero_assert(m_pbf_geometry.valid() && + "call add_linestring() before set_point()"); + vtzero_assert(!m_pbf_tags.valid() && + "set_point() has to be called before properties are added"); + m_num_points.decrement(); + if (m_start_line) { + m_pbf_geometry.add_element(detail::command_move_to(1)); + m_pbf_geometry.add_element(protozero::encode_zigzag32(p.x - m_cursor.x)); + m_pbf_geometry.add_element(protozero::encode_zigzag32(p.y - m_cursor.y)); + m_pbf_geometry.add_element(detail::command_line_to(m_num_points.value())); + m_start_line = false; + } else { + if (p == m_cursor) { + throw geometry_exception{"Zero-length segments in linestrings are not allowed."}; + } + m_pbf_geometry.add_element(protozero::encode_zigzag32(p.x - m_cursor.x)); + m_pbf_geometry.add_element(protozero::encode_zigzag32(p.y - m_cursor.y)); + } + m_cursor = p; + } + + /** + * Set a point in the multilinestring geometry opened with + * add_linestring(). + * + * @param x X coordinate of the point to set. + * @param y Y coordinate of the point to set. + * + * @throws geometry_exception if the point set is the same as the + * previous point. This would create zero-length segments + * which are not allowed according to the vector tile spec. + * + * @pre There must have been less than *count* calls to set_point() + * already after a call to add_linestring(count). + * + * @pre You must not have any calls to add_property() before calling + * this method. + */ + void set_point(const int32_t x, const int32_t y) { + set_point(point{x, y}); + } + + /** + * Set a point in the multilinestring geometry opened with + * add_linestring(). + * + * @tparam TPoint A type that can be converted to vtzero::point using + * the create_vtzero_point function. + * @param p The point to add. + * + * @throws geometry_exception if the point set is the same as the + * previous point. This would create zero-length segments + * which are not allowed according to the vector tile spec. + * + * @pre There must have been less than *count* calls to set_point() + * already after a call to add_linestring(count). + * + * @pre You must not have any calls to add_property() before calling + * this method. + */ + template + void set_point(TPoint&& p) { + set_point(create_vtzero_point(std::forward(p))); + } + + /** + * Add the points from the specified container as a linestring geometry + * to this feature. + * + * @tparam TContainer The container type. Must support the size() + * method, be iterable using a range for loop, and contain + * objects of type vtzero::point or something convertible to + * it. + * @param container The container to read the points from. + * + * @throws geometry_exception If there are more than 2^32-1 members in + * the container or if two consecutive points in the container + * are identical. + * + * @pre You must not have any calls to add_property() before calling + * this method. + */ + template + void add_linestring_from_container(const TContainer& container) { + add_linestring(check_num_points(container.size())); + for (const auto& element : container) { + set_point(element); + } + } + + }; // class linestring_feature_builder + + /** + * Used for adding a feature with a (multi)polygon geometry to a layer. + * After creating an object of this class you can add data to the + * feature in a specific order: + * + * * Optionally add the ID using set_id(). + * * Add the (multi)polygon geometry using add_ring() or + * add_ring_from_container(). + * * Optionally add any number of properties using add_property(). + * + * @code + * vtzero::tile_builder tb; + * vtzero::layer_builder lb{tb}; + * vtzero::polygon_feature_builder fb{lb}; + * fb.set_id(123); // optionally set ID + * fb.add_ring(5); + * fb.set_point(10, 10); + * ... + * fb.add_property("foo", "bar"); // add property + * @endcode + */ + class polygon_feature_builder : public feature_builder { + + point m_first_point{0, 0}; + bool m_start_ring = false; + + public: + + /** + * Constructor + * + * @param layer The layer we want to create this feature in. + */ + explicit polygon_feature_builder(layer_builder layer) : + feature_builder(&layer.get_layer_impl()) { + m_feature_writer.add_enum(detail::pbf_feature::type, static_cast(GeomType::POLYGON)); + } + + /** + * Declare the intent to add a ring with *count* points to this + * feature. + * + * @param count The number of points in the ring. + * + * @pre @code count > 3 && count < 2^29 @endcode + * + * @pre You must not have any calls to add_property() before calling + * this method. + */ + void add_ring(const uint32_t count) { + vtzero_assert(m_feature_writer.valid() && + "Can not add geometry after commit() or rollback()"); + vtzero_assert(!m_pbf_tags.valid() && + "add_ring() has to be called before properties are added"); + vtzero_assert(count > 3 && count < (1UL << 29U) && "add_ring() must be called with 3 < count < 2^29"); + m_num_points.assert_is_zero(); + if (!m_pbf_geometry.valid()) { + m_pbf_geometry = {m_feature_writer, detail::pbf_feature::geometry}; + } + m_num_points.set(count); + m_start_ring = true; + } + + /** + * Set a point in the ring opened with add_ring(). + * + * @param p The point. + * + * @throws geometry_exception if the point set is the same as the + * previous point. This would create zero-length segments + * which are not allowed according to the vector tile spec. + * This exception is also thrown when the last point in the + * ring is not equal to the first point, because this would + * not create a closed ring. + * + * @pre There must have been less than *count* calls to set_point() + * already after a call to add_ring(count). + * + * @pre You must not have any calls to add_property() before calling + * this method. + */ + void set_point(const point p) { + vtzero_assert(m_feature_writer.valid() && + "Can not add geometry after commit() or rollback()"); + vtzero_assert(m_pbf_geometry.valid() && + "call add_ring() before set_point()"); + vtzero_assert(!m_pbf_tags.valid() && + "set_point() has to be called before properties are added"); + m_num_points.decrement(); + if (m_start_ring) { + m_first_point = p; + m_pbf_geometry.add_element(detail::command_move_to(1)); + m_pbf_geometry.add_element(protozero::encode_zigzag32(p.x - m_cursor.x)); + m_pbf_geometry.add_element(protozero::encode_zigzag32(p.y - m_cursor.y)); + m_pbf_geometry.add_element(detail::command_line_to(m_num_points.value() - 1)); + m_start_ring = false; + m_cursor = p; + } else if (m_num_points.value() == 0) { + if (p != m_first_point) { + throw geometry_exception{"Last point in a ring must be the same as the first point."}; + } + // spec 4.3.3.3 "A ClosePath command MUST have a command count of 1" + m_pbf_geometry.add_element(detail::command_close_path()); + } else { + if (p == m_cursor) { + throw geometry_exception{"Zero-length segments in linestrings are not allowed."}; + } + m_pbf_geometry.add_element(protozero::encode_zigzag32(p.x - m_cursor.x)); + m_pbf_geometry.add_element(protozero::encode_zigzag32(p.y - m_cursor.y)); + m_cursor = p; + } + } + + /** + * Set a point in the ring opened with add_ring(). + * + * @param x X coordinate of the point to set. + * @param y Y coordinate of the point to set. + * + * @throws geometry_exception if the point set is the same as the + * previous point. This would create zero-length segments + * which are not allowed according to the vector tile spec. + * This exception is also thrown when the last point in the + * ring is not equal to the first point, because this would + * not create a closed ring. + * + * @pre There must have been less than *count* calls to set_point() + * already after a call to add_ring(count). + * + * @pre You must not have any calls to add_property() before calling + * this method. + */ + void set_point(const int32_t x, const int32_t y) { + set_point(point{x, y}); + } + + /** + * Set a point in the ring opened with add_ring(). + * + * @tparam TPoint A type that can be converted to vtzero::point using + * the create_vtzero_point function. + * @param p The point to add. + * + * @throws geometry_exception if the point set is the same as the + * previous point. This would create zero-length segments + * which are not allowed according to the vector tile spec. + * This exception is also thrown when the last point in the + * ring is not equal to the first point, because this would + * not create a closed ring. + * + * @pre There must have been less than *count* calls to set_point() + * already after a call to add_ring(count). + * + * @pre You must not have any calls to add_property() before calling + * this method. + */ + template + void set_point(TPoint&& p) { + set_point(create_vtzero_point(std::forward(p))); + } + + /** + * Close a ring opened with add_ring(). This can be called for the + * last point (which will be equal to the first point) in the ring + * instead of calling set_point(). + * + * @pre There must have been *count* - 1 calls to set_point() + * already after a call to add_ring(count). + * + * @pre You must not have any calls to add_property() before calling + * this method. + */ + void close_ring() { + vtzero_assert(m_feature_writer.valid() && + "Can not add geometry after commit() or rollback()"); + vtzero_assert(m_pbf_geometry.valid() && + "Call add_ring() before you can call close_ring()"); + vtzero_assert(!m_pbf_tags.valid() && + "close_ring() has to be called before properties are added"); + vtzero_assert(m_num_points.value() == 1 && + "wrong number of points in ring"); + m_pbf_geometry.add_element(detail::command_close_path()); + m_num_points.decrement(); + } + + /** + * Add the points from the specified container as a ring to this + * feature. + * + * @tparam TContainer The container type. Must support the size() + * method, be iterable using a range for loop, and contain + * objects of type vtzero::point or something convertible to + * it. + * @param container The container to read the points from. + * + * @throws geometry_exception If there are more than 2^32-1 members in + * the container or if two consecutive points in the container + * are identical or if the last point is not the same as the + * first point. + * + * @pre You must not have any calls to add_property() before calling + * this method. + */ + template + void add_ring_from_container(const TContainer& container) { + add_ring(check_num_points(container.size())); + for (const auto& element : container) { + set_point(element); + } + } + + }; // class polygon_feature_builder + + /** + * Used for adding a feature to a layer using an existing geometry. After + * creating an object of this class you can add data to the feature in a + * specific order: + * + * * Optionally add the ID using set_id(). + * * Add the geometry using set_geometry(). + * * Optionally add any number of properties using add_property(). + * + * @code + * auto geom = ... // get geometry from a feature you are reading + * ... + * vtzero::tile_builder tb; + * vtzero::layer_builder lb{tb}; + * vtzero::geometry_feature_builder fb{lb}; + * fb.set_id(123); // optionally set ID + * fb.set_geometry(geom) // add geometry + * fb.add_property("foo", "bar"); // add property + * @endcode + */ + class geometry_feature_builder : public detail::feature_builder_base { + + public: + + /** + * Constructor + * + * @param layer The layer we want to create this feature in. + */ + explicit geometry_feature_builder(layer_builder layer) : + feature_builder_base(&layer.get_layer_impl()) { + } + + /** + * If the feature was not committed, the destructor will roll back all + * the changes. + */ + ~geometry_feature_builder() noexcept { + try { + rollback(); + } catch (...) { + // ignore exceptions + } + } + + /// Feature builders can not be copied. + geometry_feature_builder(const geometry_feature_builder&) = delete; + + /// Feature builders can not be copied. + geometry_feature_builder& operator=(const geometry_feature_builder&) = delete; + + /// Feature builders can be moved. + geometry_feature_builder(geometry_feature_builder&&) noexcept = default; + + /// Feature builders can be moved. + geometry_feature_builder& operator=(geometry_feature_builder&&) noexcept = default; + + /** + * Set the ID of this feature. + * + * You can only call this function once and it must be before calling + * set_geometry(). + * + * @param id The ID. + */ + void set_id(uint64_t id) { + vtzero_assert(m_feature_writer.valid() && + "Can not call set_id() after commit() or rollback()"); + vtzero_assert(!m_pbf_tags.valid()); + set_id_impl(id); + } + + /** + * Copy the ID of an existing feature to this feature. If the + * feature doesn't have an ID, no ID is set. + * + * You can only call this function once and it must be before calling + * set_geometry(). + * + * @param feature The feature to copy the ID from. + */ + void copy_id(const feature& feature) { + vtzero_assert(m_feature_writer.valid() && + "Can not call copy_id() after commit() or rollback()"); + vtzero_assert(!m_pbf_tags.valid()); + if (feature.has_id()) { + set_id_impl(feature.id()); + } + } + + /** + * Set the geometry of this feature. + * + * You can only call this method once and it must be before calling the + * add_property() method. + * + * @param geometry The geometry. + */ + void set_geometry(const geometry& geometry) { + vtzero_assert(m_feature_writer.valid() && + "Can not add geometry after commit() or rollback()"); + vtzero_assert(!m_pbf_tags.valid()); + m_feature_writer.add_enum(detail::pbf_feature::type, static_cast(geometry.type())); + m_feature_writer.add_string(detail::pbf_feature::geometry, geometry.data()); + m_pbf_tags = {m_feature_writer, detail::pbf_feature::tags}; + } + + /** + * Add a property to this feature. Can only be called after the + * set_geometry method. + * + * @tparam TProp Can be type index_value_pair or property. + * @param prop The property to add. + */ + template + void add_property(TProp&& prop) { + vtzero_assert(m_feature_writer.valid() && + "Can not call add_property() after commit() or rollback()"); + add_property_impl(std::forward(prop)); + } + + /** + * Add a property to this feature. Can only be called after the + * set_geometry method. + * + * @tparam TKey Can be type index_value or data_view or anything that + * converts to it. + * @tparam TValue Can be type index_value or property_value or + * encoded_property or anything that converts to it. + * @param key The key. + * @param value The value. + */ + template + void add_property(TKey&& key, TValue&& value) { + vtzero_assert(m_feature_writer.valid() && + "Can not call add_property() after commit() or rollback()"); + add_property_impl(std::forward(key), std::forward(value)); + } + + /** + * Copy all properties of an existing feature to the one being built. + * + * @param feature The feature to copy the properties from. + */ + void copy_properties(const feature& feature) { + vtzero_assert(m_feature_writer.valid() && + "Can not call copy_properties() after commit() or rollback()"); + feature.for_each_property([this](const property& prop) { + add_property_impl(prop); + return true; + }); + } + + /** + * Copy all properties of an existing feature to the one being built + * using a property_mapper. + * + * @tparam TMapper Must be the property_mapper class or something + * equivalent. + * @param feature The feature to copy the properties from. + * @param mapper Instance of the property_mapper class. + */ + template + void copy_properties(const feature& feature, TMapper& mapper) { + vtzero_assert(m_feature_writer.valid() && + "Can not call copy_properties() after commit() or rollback()"); + feature.for_each_property_indexes([this, &mapper](const index_value_pair& idxs) { + add_property_impl(mapper(idxs)); + return true; + }); + } + + /** + * Commit this feature. Call this after all the details of this + * feature have been added. If this is not called, the feature + * will be rolled back when the destructor of the feature_builder is + * called. + * + * Once a feature has been committed or rolled back, further calls + * to commit() or rollback() don't do anything. + */ + void commit() { + if (m_feature_writer.valid()) { + vtzero_assert(m_pbf_tags.valid() && + "Can not call commit before geometry was added"); + do_commit(); + } + } + + /** + * Rollback this feature. Removed all traces of this feature from + * the layer_builder. Useful when you started creating a feature + * but then find out that its geometry is invalid or something like + * it. This will also happen automatically when the feature_builder + * is destructed and commit() hasn't been called on it. + * + * Once a feature has been committed or rolled back, further calls + * to commit() or rollback() don't do anything. + */ + void rollback() { + if (m_feature_writer.valid()) { + do_rollback(); + } + } + + }; // class geometry_feature_builder + + inline void layer_builder::add_feature(const feature& feature) { + geometry_feature_builder feature_builder{*this}; + if (feature.has_id()) { + feature_builder.set_id(feature.id()); + } + feature_builder.set_geometry(feature.geometry()); + feature.for_each_property([&feature_builder](const property& p) { + feature_builder.add_property(p); + return true; + }); + feature_builder.commit(); + } + +} // namespace vtzero + +#endif // VTZERO_BUILDER_HPP diff --git a/contrib/vtzero/1.1.0/include/vtzero/builder_impl.hpp b/contrib/vtzero/1.1.0/include/vtzero/builder_impl.hpp new file mode 100644 index 0000000..1b348df --- /dev/null +++ b/contrib/vtzero/1.1.0/include/vtzero/builder_impl.hpp @@ -0,0 +1,267 @@ +#ifndef VTZERO_BUILDER_IMPL_HPP +#define VTZERO_BUILDER_IMPL_HPP + +/***************************************************************************** + +vtzero - Tiny and fast vector tile decoder and encoder in C++. + +This file is from https://github.com/mapbox/vtzero where you can find more +documentation. + +*****************************************************************************/ + +/** + * @file builder_impl.hpp + * + * @brief Contains classes internal to the builder. + */ + +#include "encoded_property_value.hpp" +#include "property_value.hpp" +#include "types.hpp" + +#include +#include + +#include +#include +#include +#include + +namespace vtzero { + + namespace detail { + + class layer_builder_impl { + + // If this layer is copied from an existing layer, this points + // to the data of the original layer. For newly built layers, + // this is empty. + data_view m_data_view{}; + + // Buffer containing the encoded layer metadata and features + std::string m_data; + + // Buffer containing the encoded keys table + std::string m_keys_data; + + // Buffer containing the encoded values table + std::string m_values_data; + + protozero::pbf_builder m_pbf_message_layer{}; + protozero::pbf_builder m_pbf_message_keys{}; + protozero::pbf_builder m_pbf_message_values{}; + + // The number of features in the layer + std::size_t m_num_features = 0; + + // Vector tile spec version + uint32_t m_version = 0; + + // The number of keys in the keys table + uint32_t m_num_keys = 0; + + // The number of values in the values table + uint32_t m_num_values = 0; + + // Below this value, no index will be used to find entries in the + // key/value tables. This number is based on some initial + // benchmarking but probably needs some tuning. + // See also https://github.com/mapbox/vtzero/issues/30 + static constexpr const uint32_t max_entries_flat = 20; + + using map_type = std::unordered_map; + map_type m_keys_index; + map_type m_values_index; + + static index_value find_in_table(const data_view text, const std::string& data) { + uint32_t index = 0; + protozero::pbf_message pbf_table{data}; + + while (pbf_table.next()) { + const auto v = pbf_table.get_view(); + if (v == text) { + return index_value{index}; + } + ++index; + } + + return index_value{}; + } + + // Read the key or value table and populate an index from its + // entries. This is done once the table becomes too large to do + // linear search in it. + static void populate_index(const std::string& data, map_type& map) { + uint32_t index = 0; + protozero::pbf_message pbf_table{data}; + + while (pbf_table.next()) { + map[pbf_table.get_string()] = index++; + } + } + + index_value add_value_without_dup_check(const data_view text) { + m_pbf_message_values.add_string(detail::pbf_layer::values, text); + return m_num_values++; + } + + index_value add_value(const data_view text) { + const auto index = find_in_values_table(text); + if (index.valid()) { + return index; + } + return add_value_without_dup_check(text); + } + + index_value find_in_keys_table(const data_view text) { + if (m_num_keys < max_entries_flat) { + return find_in_table(text, m_keys_data); + } + + if (m_keys_index.empty()) { + populate_index(m_keys_data, m_keys_index); + } + + auto& v = m_keys_index[std::string(text)]; + if (!v.valid()) { + v = add_key_without_dup_check(text); + } + return v; + } + + index_value find_in_values_table(const data_view text) { + if (m_num_values < max_entries_flat) { + return find_in_table(text, m_values_data); + } + + if (m_values_index.empty()) { + populate_index(m_values_data, m_values_index); + } + + auto& v = m_values_index[std::string(text)]; + if (!v.valid()) { + v = add_value_without_dup_check(text); + } + return v; + } + + public: + + // This layer should be a copy of an existing layer + explicit layer_builder_impl(const data_view data) : + m_data_view(data) { + } + + // This layer is being created from scratch + template + layer_builder_impl(TString&& name, uint32_t version, uint32_t extent) : + m_pbf_message_layer(m_data), + m_pbf_message_keys(m_keys_data), + m_pbf_message_values(m_values_data), + m_version(version) { + m_pbf_message_layer.add_uint32(detail::pbf_layer::version, version); + m_pbf_message_layer.add_string(detail::pbf_layer::name, std::forward(name)); + m_pbf_message_layer.add_uint32(detail::pbf_layer::extent, extent); + } + + ~layer_builder_impl() noexcept = default; + + layer_builder_impl(const layer_builder_impl&) = delete; + layer_builder_impl& operator=(const layer_builder_impl&) = delete; + + layer_builder_impl(layer_builder_impl&&) = default; + layer_builder_impl& operator=(layer_builder_impl&&) = default; + + uint32_t version() const noexcept { + return m_version; + } + + index_value add_key_without_dup_check(const data_view text) { + m_pbf_message_keys.add_string(detail::pbf_layer::keys, text); + return m_num_keys++; + } + + index_value add_key(const data_view text) { + const auto index = find_in_keys_table(text); + if (index.valid()) { + return index; + } + return add_key_without_dup_check(text); + } + + index_value add_value_without_dup_check(const property_value value) { + return add_value_without_dup_check(value.data()); + } + + index_value add_value_without_dup_check(const encoded_property_value& value) { + return add_value_without_dup_check(value.data()); + } + + index_value add_value(const property_value value) { + return add_value(value.data()); + } + + index_value add_value(const encoded_property_value& value) { + return add_value(value.data()); + } + + const std::string& data() const noexcept { + return m_data; + } + + const std::string& keys_data() const noexcept { + return m_keys_data; + } + + const std::string& values_data() const noexcept { + return m_values_data; + } + + protozero::pbf_builder& message() noexcept { + return m_pbf_message_layer; + } + + void increment_feature_count() noexcept { + ++m_num_features; + } + + std::size_t estimated_size() const { + if (m_data_view.data()) { + // This is a layer created as copy from an existing layer + constexpr const std::size_t estimated_overhead_for_pbf_encoding = 8; + return m_data_view.size() + estimated_overhead_for_pbf_encoding; + } + + // This is a layer created from scratch + constexpr const std::size_t estimated_overhead_for_pbf_encoding = 8; + return data().size() + + keys_data().size() + + values_data().size() + + estimated_overhead_for_pbf_encoding; + } + + template + void build(protozero::basic_pbf_builder& pbf_tile_builder) const { + if (m_data_view.data()) { + // This is a layer created as copy from an existing layer + pbf_tile_builder.add_bytes(detail::pbf_tile::layers, m_data_view); + return; + } + + // This is a layer created from scratch + if (m_num_features > 0) { + pbf_tile_builder.add_bytes_vectored(detail::pbf_tile::layers, + data(), + keys_data(), + values_data()); + } + } + + }; // class layer_builder_impl + + } // namespace detail + +} // namespace vtzero + +#endif // VTZERO_BUILDER_IMPL_HPP diff --git a/contrib/vtzero/1.1.0/include/vtzero/encoded_property_value.hpp b/contrib/vtzero/1.1.0/include/vtzero/encoded_property_value.hpp new file mode 100644 index 0000000..8a573f3 --- /dev/null +++ b/contrib/vtzero/1.1.0/include/vtzero/encoded_property_value.hpp @@ -0,0 +1,244 @@ +#ifndef VTZERO_ENCODED_PROPERTY_VALUE_HPP +#define VTZERO_ENCODED_PROPERTY_VALUE_HPP + +/***************************************************************************** + +vtzero - Tiny and fast vector tile decoder and encoder in C++. + +This file is from https://github.com/mapbox/vtzero where you can find more +documentation. + +*****************************************************************************/ + +/** + * @file encoded_property_value.hpp + * + * @brief Contains the encoded_property_value class. + */ + +#include "types.hpp" + +#include + +#include +#include + +namespace vtzero { + + /** + * A property value encoded in the vector_tile internal format. Can be + * created from values of many different types and then later added to + * a layer/feature. + */ + class encoded_property_value { + + std::string m_data; + + public: + + /// Construct from vtzero::string_value_type. + explicit encoded_property_value(string_value_type value) { + protozero::pbf_builder pbf_message_value{m_data}; + pbf_message_value.add_string(detail::pbf_value::string_value, value.value); + } + + /// Construct from const char*. + explicit encoded_property_value(const char* value) { + protozero::pbf_builder pbf_message_value{m_data}; + pbf_message_value.add_string(detail::pbf_value::string_value, value); + } + + /// Construct from const char* and size_t. + explicit encoded_property_value(const char* value, std::size_t size) { + protozero::pbf_builder pbf_message_value{m_data}; + pbf_message_value.add_string(detail::pbf_value::string_value, value, size); + } + + /// Construct from std::string. + explicit encoded_property_value(const std::string& value) { + protozero::pbf_builder pbf_message_value{m_data}; + pbf_message_value.add_string(detail::pbf_value::string_value, value); + } + + /// Construct from vtzero::data_view. + explicit encoded_property_value(const data_view& value) { + protozero::pbf_builder pbf_message_value{m_data}; + pbf_message_value.add_string(detail::pbf_value::string_value, value); + } + + // ------------------ + + /// Construct from vtzero::float_value_type. + explicit encoded_property_value(float_value_type value) { + protozero::pbf_builder pbf_message_value{m_data}; + pbf_message_value.add_float(detail::pbf_value::float_value, value.value); + } + + /// Construct from float. + explicit encoded_property_value(float value) { + protozero::pbf_builder pbf_message_value{m_data}; + pbf_message_value.add_float(detail::pbf_value::float_value, value); + } + + // ------------------ + + /// Construct from vtzero::double_value_type. + explicit encoded_property_value(double_value_type value) { + protozero::pbf_builder pbf_message_value{m_data}; + pbf_message_value.add_double(detail::pbf_value::double_value, value.value); + } + + /// Construct from double. + explicit encoded_property_value(double value) { + protozero::pbf_builder pbf_message_value{m_data}; + pbf_message_value.add_double(detail::pbf_value::double_value, value); + } + + // ------------------ + + /// Construct from vtzero::int_value_type. + explicit encoded_property_value(int_value_type value) { + protozero::pbf_builder pbf_message_value{m_data}; + pbf_message_value.add_int64(detail::pbf_value::int_value, value.value); + } + + /// Construct from int64_t. + explicit encoded_property_value(int64_t value) { + protozero::pbf_builder pbf_message_value{m_data}; + pbf_message_value.add_int64(detail::pbf_value::int_value, value); + } + + /// Construct from int32_t. + explicit encoded_property_value(int32_t value) { + protozero::pbf_builder pbf_message_value{m_data}; + pbf_message_value.add_int64(detail::pbf_value::int_value, static_cast(value)); + } + + /// Construct from int16_t. + explicit encoded_property_value(int16_t value) { + protozero::pbf_builder pbf_message_value{m_data}; + pbf_message_value.add_int64(detail::pbf_value::int_value, static_cast(value)); + } + + // ------------------ + + /// Construct from vtzero::uint_value_type. + explicit encoded_property_value(uint_value_type value) { + protozero::pbf_builder pbf_message_value{m_data}; + pbf_message_value.add_uint64(detail::pbf_value::uint_value, value.value); + } + + /// Construct from uint64_t. + explicit encoded_property_value(uint64_t value) { + protozero::pbf_builder pbf_message_value{m_data}; + pbf_message_value.add_uint64(detail::pbf_value::uint_value, value); + } + + /// Construct from uint32_t. + explicit encoded_property_value(uint32_t value) { + protozero::pbf_builder pbf_message_value{m_data}; + pbf_message_value.add_uint64(detail::pbf_value::uint_value, static_cast(value)); + } + + /// Construct from uint16_t. + explicit encoded_property_value(uint16_t value) { + protozero::pbf_builder pbf_message_value{m_data}; + pbf_message_value.add_uint64(detail::pbf_value::uint_value, static_cast(value)); + } + + // ------------------ + + /// Construct from vtzero::sint_value_type. + explicit encoded_property_value(sint_value_type value) { + protozero::pbf_builder pbf_message_value{m_data}; + pbf_message_value.add_sint64(detail::pbf_value::sint_value, value.value); + } + + // ------------------ + + /// Construct from vtzero::bool_value_type. + explicit encoded_property_value(bool_value_type value) { + protozero::pbf_builder pbf_message_value{m_data}; + pbf_message_value.add_bool(detail::pbf_value::bool_value, value.value); + } + + /// Construct from bool. + explicit encoded_property_value(bool value) { + protozero::pbf_builder pbf_message_value{m_data}; + pbf_message_value.add_bool(detail::pbf_value::bool_value, value); + } + + // ------------------ + + /** + * Get view of the raw data stored inside. + */ + data_view data() const noexcept { + return {m_data.data(), m_data.size()}; + } + + /** + * Hash function compatible with std::hash. + */ + std::size_t hash() const noexcept { + return std::hash{}(m_data); + } + + }; // class encoded_property_value + + /// Encoded property values are equal if they contain the same data. + inline bool operator==(const encoded_property_value& lhs, const encoded_property_value& rhs) noexcept { + return lhs.data() == rhs.data(); + } + + /// Encoded property values are unequal if they are not equal. + inline bool operator!=(const encoded_property_value& lhs, const encoded_property_value& rhs) noexcept { + return !(lhs == rhs); + } + + /// Arbitrary ordering based on internal data. + inline bool operator<(const encoded_property_value& lhs, const encoded_property_value& rhs) noexcept { + return lhs.data() < rhs.data(); + } + + /// Arbitrary ordering based on internal data. + inline bool operator<=(const encoded_property_value& lhs, const encoded_property_value& rhs) noexcept { + return lhs.data() <= rhs.data(); + } + + /// Arbitrary ordering based on internal data. + inline bool operator>(const encoded_property_value& lhs, const encoded_property_value& rhs) noexcept { + return lhs.data() > rhs.data(); + } + + /// Arbitrary ordering based on internal data. + inline bool operator>=(const encoded_property_value& lhs, const encoded_property_value& rhs) noexcept { + return lhs.data() >= rhs.data(); + } + +} // namespace vtzero + +namespace std { + + /** + * Specialization of std::hash for encoded_property_value. + */ + template <> + struct hash { + + /// key vtzero::encoded_property_value + using argument_type = vtzero::encoded_property_value; + + /// hash result: size_t + using result_type = std::size_t; + + /// calculate the hash of the argument + std::size_t operator()(const vtzero::encoded_property_value& value) const noexcept { + return value.hash(); + } + + }; // struct hash + +} // namespace std + +#endif // VTZERO_ENCODED_PROPERTY_VALUE_HPP diff --git a/contrib/vtzero/1.1.0/include/vtzero/exception.hpp b/contrib/vtzero/1.1.0/include/vtzero/exception.hpp new file mode 100644 index 0000000..80b7259 --- /dev/null +++ b/contrib/vtzero/1.1.0/include/vtzero/exception.hpp @@ -0,0 +1,134 @@ +#ifndef VTZERO_EXCEPTION_HPP +#define VTZERO_EXCEPTION_HPP + +/***************************************************************************** + +vtzero - Tiny and fast vector tile decoder and encoder in C++. + +This file is from https://github.com/mapbox/vtzero where you can find more +documentation. + +*****************************************************************************/ + +/** + * @file exception.hpp + * + * @brief Contains the exceptions used in the vtzero library. + */ + +#include +#include +#include + +namespace vtzero { + + /** + * Base class for all exceptions directly thrown by the vtzero library. + */ + class exception : public std::runtime_error { + + public: + + /// Constructor + explicit exception(const char* message) : + std::runtime_error(message) { + } + + /// Constructor + explicit exception(const std::string& message) : + std::runtime_error(message) { + } + + }; // class exception + + /** + * This exception is thrown when vector tile encoding isn't valid according + * to the vector tile specification. + */ + class format_exception : public exception { + + public: + + /// Constructor + explicit format_exception(const char* message) : + exception(message) { + } + + /// Constructor + explicit format_exception(const std::string& message) : + exception(message) { + } + + }; // class format_exception + + /** + * This exception is thrown when a geometry encoding isn't valid according + * to the vector tile specification. + */ + class geometry_exception : public format_exception { + + public: + + /// Constructor + explicit geometry_exception(const char* message) : + format_exception(message) { + } + + /// Constructor + explicit geometry_exception(const std::string& message) : + format_exception(message) { + } + + }; // class geometry_exception + + /** + * This exception is thrown when a property value is accessed using the + * wrong type. + */ + class type_exception : public exception { + + public: + + /// Constructor + explicit type_exception() : + exception("wrong property value type") { + } + + }; // class type_exception + + /** + * This exception is thrown when an unknown version number is found in the + * layer. + */ + class version_exception : public exception { + + public: + + /// Constructor + explicit version_exception(const uint32_t version) : + exception(std::string{"unknown vector tile version: "} + + std::to_string(version)) { + } + + }; // version_exception + + /** + * This exception is thrown when an index into the key or value table + * in a layer is out of range. This can only happen if the tile data is + * invalid. + */ + class out_of_range_exception : public exception { + + public: + + /// Constructor + explicit out_of_range_exception(const uint32_t index) : + exception(std::string{"index out of range: "} + + std::to_string(index)) { + } + + }; // out_of_range_exception + +} // namespace vtzero + +#endif // VTZERO_EXCEPTION_HPP diff --git a/contrib/vtzero/1.1.0/include/vtzero/feature.hpp b/contrib/vtzero/1.1.0/include/vtzero/feature.hpp new file mode 100644 index 0000000..745d49a --- /dev/null +++ b/contrib/vtzero/1.1.0/include/vtzero/feature.hpp @@ -0,0 +1,315 @@ +#ifndef VTZERO_FEATURE_HPP +#define VTZERO_FEATURE_HPP + +/***************************************************************************** + +vtzero - Tiny and fast vector tile decoder and encoder in C++. + +This file is from https://github.com/mapbox/vtzero where you can find more +documentation. + +*****************************************************************************/ + +/** + * @file feature.hpp + * + * @brief Contains the feature class. + */ + +#include "exception.hpp" +#include "property.hpp" +#include "property_value.hpp" +#include "types.hpp" + +#include + +#include +#include +#include +#include + +namespace vtzero { + + class layer; + + /** + * A feature according to spec 4.2. + * + * Note that a feature will internally contain a pointer to the layer it + * came from. The layer has to stay valid as long as the feature is used. + */ + class feature { + + using uint32_it_range = protozero::iterator_range; + + const layer* m_layer = nullptr; + uint64_t m_id = 0; // defaults to 0, see https://github.com/mapbox/vector-tile-spec/blob/master/2.1/vector_tile.proto#L32 + uint32_it_range m_properties{}; + protozero::pbf_reader::const_uint32_iterator m_property_iterator{}; + std::size_t m_num_properties = 0; + data_view m_geometry{}; + GeomType m_geometry_type = GeomType::UNKNOWN; // defaults to UNKNOWN, see https://github.com/mapbox/vector-tile-spec/blob/master/2.1/vector_tile.proto#L41 + bool m_has_id = false; + + public: + + /** + * Construct an invalid feature object. + */ + feature() = default; + + /** + * Construct a feature object. + * + * @throws format_exception if the layer data is ill-formed. + */ + feature(const layer* layer, const data_view data) : + m_layer(layer) { + vtzero_assert(layer); + vtzero_assert(data.data()); + + protozero::pbf_message reader{data}; + + while (reader.next()) { + switch (reader.tag_and_type()) { + case protozero::tag_and_type(detail::pbf_feature::id, protozero::pbf_wire_type::varint): + m_id = reader.get_uint64(); + m_has_id = true; + break; + case protozero::tag_and_type(detail::pbf_feature::tags, protozero::pbf_wire_type::length_delimited): + if (m_properties.begin() != protozero::pbf_reader::const_uint32_iterator{}) { + throw format_exception{"Feature has more than one tags field"}; + } + m_properties = reader.get_packed_uint32(); + m_property_iterator = m_properties.begin(); + break; + case protozero::tag_and_type(detail::pbf_feature::type, protozero::pbf_wire_type::varint): { + const auto type = reader.get_enum(); + // spec 4.3.4 "Geometry Types" + if (type < 0 || type > 3) { + throw format_exception{"Unknown geometry type (spec 4.3.4)"}; + } + m_geometry_type = static_cast(type); + } + break; + case protozero::tag_and_type(detail::pbf_feature::geometry, protozero::pbf_wire_type::length_delimited): + if (!m_geometry.empty()) { + throw format_exception{"Feature has more than one geometry field"}; + } + m_geometry = reader.get_view(); + break; + default: + reader.skip(); // ignore unknown fields + } + } + + // spec 4.2 "A feature MUST contain a geometry field." + if (m_geometry.empty()) { + throw format_exception{"Missing geometry field in feature (spec 4.2)"}; + } + + const auto size = m_properties.size(); + if (size % 2 != 0) { + throw format_exception{"unpaired property key/value indexes (spec 4.4)"}; + } + m_num_properties = size / 2; + } + + /** + * Is this a valid feature? Valid features are those not created from + * the default constructor. + * + * Complexity: Constant. + */ + bool valid() const noexcept { + return m_geometry.data() != nullptr; + } + + /** + * Is this a valid feature? Valid features are those not created from + * the default constructor. + * + * Complexity: Constant. + */ + explicit operator bool() const noexcept { + return valid(); + } + + /** + * The ID of this feature. According to the spec IDs should be unique + * in a layer if they are set (spec 4.2). + * + * Complexity: Constant. + * + * Always returns 0 for invalid features. + */ + uint64_t id() const noexcept { + return m_id; + } + + /** + * Does this feature have an ID? + * + * Complexity: Constant. + * + * Always returns false for invalid features. + */ + bool has_id() const noexcept { + return m_has_id; + } + + /** + * The geometry type of this feature. + * + * Complexity: Constant. + * + * Always returns GeomType::UNKNOWN for invalid features. + */ + GeomType geometry_type() const noexcept { + return m_geometry_type; + } + + /** + * Get the geometry of this feature. + * + * Complexity: Constant. + * + * @pre @code valid() @endcode + */ + vtzero::geometry geometry() const noexcept { + vtzero_assert_in_noexcept_function(valid()); + return {m_geometry, m_geometry_type}; + } + + /** + * Returns true if this feature doesn't have any properties. + * + * Complexity: Constant. + * + * Always returns true for invalid features. + */ + bool empty() const noexcept { + return m_num_properties == 0; + } + + /** + * Returns the number of properties in this feature. + * + * Complexity: Constant. + * + * Always returns 0 for invalid features. + */ + std::size_t num_properties() const noexcept { + return m_num_properties; + } + + /** + * Get the next property in this feature. + * + * Complexity: Constant. + * + * @returns The next property or the invalid property if there are no + * more properties. + * @throws format_exception if the feature data is ill-formed. + * @throws any protozero exception if the protobuf encoding is invalid. + * @pre @code valid() @endcode + */ + property next_property(); + + /** + * Get the indexes into the key/value table for the next property in + * this feature. + * + * Complexity: Constant. + * + * @returns The next index_value_pair or an invalid index_value_pair + * if there are no more properties. + * @throws format_exception if the feature data is ill-formed. + * @throws out_of_range_exception if the key or value index is not + * within the range of indexes in the layer key/value table. + * @throws any protozero exception if the protobuf encoding is invalid. + * @pre @code valid() @endcode + */ + index_value_pair next_property_indexes(); + + /** + * Reset the property iterator. The next time next_property() or + * next_property_indexes() is called, it will begin from the first + * property again. + * + * Complexity: Constant. + * + * @pre @code valid() @endcode + */ + void reset_property() noexcept { + vtzero_assert_in_noexcept_function(valid()); + m_property_iterator = m_properties.begin(); + } + + /** + * Call a function for each property of this feature. + * + * @tparam TFunc The type of the function. It must take a single + * argument of type property&& and return a bool. If the + * function returns false, the iteration will be stopped. + * @param func The function to call. + * @returns true if the iteration was completed and false otherwise. + * @pre @code valid() @endcode + */ + template + bool for_each_property(TFunc&& func) const; + + /** + * Call a function for each key/value index of this feature. + * + * @tparam TFunc The type of the function. It must take a single + * argument of type index_value_pair&& and return a bool. + * If the function returns false, the iteration will be stopped. + * @param func The function to call. + * @returns true if the iteration was completed and false otherwise. + * @pre @code valid() @endcode + */ + template + bool for_each_property_indexes(TFunc&& func) const; + + }; // class feature + + /** + * Create some kind of mapping from property keys to property values. + * + * This can be used to read all properties into a std::map or similar + * object. + * + * @tparam TMap Map type (std::map, std::unordered_map, ...) Must support + * the emplace() method. + * @tparam TKey Key type, usually the key of the map type. The data_view + * of the property key is converted to this type before + * adding it to the map. + * @tparam TValue Value type, usally the value of the map type. The + * property_value is converted to this type before + * adding it to the map. + * @tparam TMapping A struct derived from property_value_mapping with the + * mapping for vtzero property value types to TValue-constructing + * types. (See convert_property_value() for details.) + * @param feature The feature to get the properties from. + * @returns An object of type TMap with all the properties. + * @pre @code feature.valid() @endcode + */ + template + TMap create_properties_map(const vtzero::feature& feature) { + TMap map; + + feature.for_each_property([&map](const property& p) { + map.emplace(TKey(p.key()), convert_property_value(p.value())); + return true; + }); + + return map; + } + +} // namespace vtzero + +#endif // VTZERO_FEATURE_HPP diff --git a/contrib/vtzero/1.1.0/include/vtzero/feature_builder_impl.hpp b/contrib/vtzero/1.1.0/include/vtzero/feature_builder_impl.hpp new file mode 100644 index 0000000..3067418 --- /dev/null +++ b/contrib/vtzero/1.1.0/include/vtzero/feature_builder_impl.hpp @@ -0,0 +1,126 @@ +#ifndef VTZERO_FEATURE_BUILDER_IMPL_HPP +#define VTZERO_FEATURE_BUILDER_IMPL_HPP + +/***************************************************************************** + +vtzero - Tiny and fast vector tile decoder and encoder in C++. + +This file is from https://github.com/mapbox/vtzero where you can find more +documentation. + +*****************************************************************************/ + +/** + * @file feature_builder_impl.hpp + * + * @brief Contains classes internal to the builder. + */ + +#include "builder_impl.hpp" +#include "encoded_property_value.hpp" +#include "geometry.hpp" +#include "property.hpp" +#include "property_value.hpp" + +#include + +namespace vtzero { + + namespace detail { + + class feature_builder_base { + + layer_builder_impl* m_layer; + + void add_key_internal(index_value idx) { + vtzero_assert(idx.valid()); + m_pbf_tags.add_element(idx.value()); + } + + template + void add_key_internal(T&& key) { + add_key_internal(m_layer->add_key(data_view{std::forward(key)})); + } + + void add_value_internal(index_value idx) { + vtzero_assert(idx.valid()); + m_pbf_tags.add_element(idx.value()); + } + + void add_value_internal(property_value value) { + add_value_internal(m_layer->add_value(value)); + } + + template + void add_value_internal(T&& value) { + encoded_property_value v{std::forward(value)}; + add_value_internal(m_layer->add_value(v)); + } + + protected: + + protozero::pbf_builder m_feature_writer; + protozero::packed_field_uint32 m_pbf_tags; + + explicit feature_builder_base(layer_builder_impl* layer) : + m_layer(layer), + m_feature_writer(layer->message(), detail::pbf_layer::features) { + } + + ~feature_builder_base() noexcept = default; + + feature_builder_base(const feature_builder_base&) = delete; // NOLINT(hicpp-use-equals-delete, modernize-use-equals-delete) + + feature_builder_base& operator=(const feature_builder_base&) = delete; // NOLINT(hicpp-use-equals-delete, modernize-use-equals-delete) + // The check wants these functions to be public... + + feature_builder_base(feature_builder_base&&) noexcept = default; + + feature_builder_base& operator=(feature_builder_base&&) noexcept = default; + + uint32_t version() const noexcept { + return m_layer->version(); + } + + void set_id_impl(uint64_t id) { + m_feature_writer.add_uint64(detail::pbf_feature::id, id); + } + + void add_property_impl(const property& property) { + add_key_internal(property.key()); + add_value_internal(property.value()); + } + + void add_property_impl(const index_value_pair idxs) { + add_key_internal(idxs.key()); + add_value_internal(idxs.value()); + } + + template + void add_property_impl(TKey&& key, TValue&& value) { + add_key_internal(std::forward(key)); + add_value_internal(std::forward(value)); + } + + void do_commit() { + if (m_pbf_tags.valid()) { + m_pbf_tags.commit(); + } + m_feature_writer.commit(); + m_layer->increment_feature_count(); + } + + void do_rollback() { + if (m_pbf_tags.valid()) { + m_pbf_tags.rollback(); + } + m_feature_writer.rollback(); + } + + }; // class feature_builder_base + + } // namespace detail + +} // namespace vtzero + +#endif // VTZERO_FEATURE_BUILDER_IMPL_HPP diff --git a/contrib/vtzero/1.1.0/include/vtzero/geometry.hpp b/contrib/vtzero/1.1.0/include/vtzero/geometry.hpp new file mode 100644 index 0000000..62ee140 --- /dev/null +++ b/contrib/vtzero/1.1.0/include/vtzero/geometry.hpp @@ -0,0 +1,444 @@ +#ifndef VTZERO_GEOMETRY_HPP +#define VTZERO_GEOMETRY_HPP + +/***************************************************************************** + +vtzero - Tiny and fast vector tile decoder and encoder in C++. + +This file is from https://github.com/mapbox/vtzero where you can find more +documentation. + +*****************************************************************************/ + +/** + * @file geometry.hpp + * + * @brief Contains classes and functions related to geometry handling. + */ + +#include "exception.hpp" +#include "types.hpp" + +#include + +#include +#include +#include + +namespace vtzero { + + /// A simple point class + struct point { + + /// X coordinate + int32_t x = 0; + + /// Y coordinate + int32_t y = 0; + + /// Default construct to 0 coordinates + constexpr point() noexcept = default; + + /// Constructor + constexpr point(int32_t x_, int32_t y_) noexcept : + x(x_), + y(y_) { + } + + }; // struct point + + /** + * Type of a polygon ring. This can either be "outer", "inner", or + * "invalid". Invalid is used when the area of the ring is 0. + */ + enum class ring_type { + outer = 0, + inner = 1, + invalid = 2 + }; // enum class ring_type + + /** + * Helper function to create a point from any type that has members x + * and y. + * + * If your point type doesn't have members x any y, you can overload this + * function for your type and it will be used by vtzero. + */ + template + point create_vtzero_point(const TPoint& p) noexcept { + return {p.x, p.y}; + } + + /// Points are equal if their coordinates are + inline constexpr bool operator==(const point a, const point b) noexcept { + return a.x == b.x && a.y == b.y; + } + + /// Points are not equal if their coordinates aren't + inline constexpr bool operator!=(const point a, const point b) noexcept { + return !(a == b); + } + + namespace detail { + + /// The command id type as specified in the vector tile spec + enum class CommandId : uint32_t { + MOVE_TO = 1, + LINE_TO = 2, + CLOSE_PATH = 7 + }; + + inline constexpr uint32_t command_integer(CommandId id, const uint32_t count) noexcept { + return (static_cast(id) & 0x7U) | (count << 3U); + } + + inline constexpr uint32_t command_move_to(const uint32_t count) noexcept { + return command_integer(CommandId::MOVE_TO, count); + } + + inline constexpr uint32_t command_line_to(const uint32_t count) noexcept { + return command_integer(CommandId::LINE_TO, count); + } + + inline constexpr uint32_t command_close_path() noexcept { + return command_integer(CommandId::CLOSE_PATH, 1); + } + + inline constexpr uint32_t get_command_id(const uint32_t command_integer) noexcept { + return command_integer & 0x7U; + } + + inline constexpr uint32_t get_command_count(const uint32_t command_integer) noexcept { + return command_integer >> 3U; + } + + // The maximum value for the command count according to the spec. + inline constexpr uint32_t max_command_count() noexcept { + return get_command_count(std::numeric_limits::max()); + } + + inline constexpr int64_t det(const point a, const point b) noexcept { + return static_cast(a.x) * static_cast(b.y) - + static_cast(b.x) * static_cast(a.y); + } + + template + struct get_result { + + using type = void; + + template + void operator()(TGeomHandler&& /*geom_handler*/) const noexcept { + } + + }; + + template + struct get_result().result()), void>::value>::type> { + + using type = decltype(std::declval().result()); + + template + type operator()(TGeomHandler&& geom_handler) { + return std::forward(geom_handler).result(); + } + + }; + + /** + * Decode a geometry as specified in spec 4.3 from a sequence of 32 bit + * unsigned integers. This templated base class can be instantiated + * with a different iterator type for testing than for normal use. + */ + template + class geometry_decoder { + + public: + + using iterator_type = TIterator; + + private: + + iterator_type m_it; + iterator_type m_end; + + point m_cursor{0, 0}; + + // maximum value for m_count before we throw an exception + uint32_t m_max_count; + + /** + * The current count value is set from the CommandInteger and + * then counted down with each next_point() call. So it must be + * greater than 0 when next_point() is called and 0 when + * next_command() is called. + */ + uint32_t m_count = 0; + + public: + + geometry_decoder(iterator_type begin, iterator_type end, std::size_t max) : + m_it(begin), + m_end(end), + m_max_count(static_cast(max)) { + vtzero_assert(max <= detail::max_command_count()); + } + + uint32_t count() const noexcept { + return m_count; + } + + bool done() const noexcept { + return m_it == m_end; + } + + bool next_command(const CommandId expected_command_id) { + vtzero_assert(m_count == 0); + + if (m_it == m_end) { + return false; + } + + const auto command_id = get_command_id(*m_it); + if (command_id != static_cast(expected_command_id)) { + throw geometry_exception{std::string{"expected command "} + + std::to_string(static_cast(expected_command_id)) + + " but got " + + std::to_string(command_id)}; + } + + if (expected_command_id == CommandId::CLOSE_PATH) { + // spec 4.3.3.3 "A ClosePath command MUST have a command count of 1" + if (get_command_count(*m_it) != 1) { + throw geometry_exception{"ClosePath command count is not 1"}; + } + } else { + m_count = get_command_count(*m_it); + if (m_count > m_max_count) { + throw geometry_exception{"count too large"}; + } + } + + ++m_it; + + return true; + } + + point next_point() { + vtzero_assert(m_count > 0); + + if (m_it == m_end || std::next(m_it) == m_end) { + throw geometry_exception{"too few points in geometry"}; + } + + // spec 4.3.2 "A ParameterInteger is zigzag encoded" + int64_t x = protozero::decode_zigzag32(*m_it++); + int64_t y = protozero::decode_zigzag32(*m_it++); + + // x and y are int64_t so this addition can never overflow + x += m_cursor.x; + y += m_cursor.y; + + // The cast is okay, because a valid vector tile can never + // contain values that would overflow here and we don't care + // what happens to invalid tiles here. + m_cursor.x = static_cast(x); + m_cursor.y = static_cast(y); + + --m_count; + + return m_cursor; + } + + template + typename detail::get_result::type decode_point(TGeomHandler&& geom_handler) { + // spec 4.3.4.2 "MUST consist of a single MoveTo command" + if (!next_command(CommandId::MOVE_TO)) { + throw geometry_exception{"expected MoveTo command (spec 4.3.4.2)"}; + } + + // spec 4.3.4.2 "command count greater than 0" + if (count() == 0) { + throw geometry_exception{"MoveTo command count is zero (spec 4.3.4.2)"}; + } + + std::forward(geom_handler).points_begin(count()); + while (count() > 0) { + std::forward(geom_handler).points_point(next_point()); + } + + // spec 4.3.4.2 "MUST consist of of a single ... command" + if (!done()) { + throw geometry_exception{"additional data after end of geometry (spec 4.3.4.2)"}; + } + + std::forward(geom_handler).points_end(); + + return detail::get_result{}(std::forward(geom_handler)); + } + + template + typename detail::get_result::type decode_linestring(TGeomHandler&& geom_handler) { + // spec 4.3.4.3 "1. A MoveTo command" + while (next_command(CommandId::MOVE_TO)) { + // spec 4.3.4.3 "with a command count of 1" + if (count() != 1) { + throw geometry_exception{"MoveTo command count is not 1 (spec 4.3.4.3)"}; + } + + const auto first_point = next_point(); + + // spec 4.3.4.3 "2. A LineTo command" + if (!next_command(CommandId::LINE_TO)) { + throw geometry_exception{"expected LineTo command (spec 4.3.4.3)"}; + } + + // spec 4.3.4.3 "with a command count greater than 0" + if (count() == 0) { + throw geometry_exception{"LineTo command count is zero (spec 4.3.4.3)"}; + } + + std::forward(geom_handler).linestring_begin(count() + 1); + + std::forward(geom_handler).linestring_point(first_point); + while (count() > 0) { + std::forward(geom_handler).linestring_point(next_point()); + } + + std::forward(geom_handler).linestring_end(); + } + + return detail::get_result{}(std::forward(geom_handler)); + } + + template + typename detail::get_result::type decode_polygon(TGeomHandler&& geom_handler) { + // spec 4.3.4.4 "1. A MoveTo command" + while (next_command(CommandId::MOVE_TO)) { + // spec 4.3.4.4 "with a command count of 1" + if (count() != 1) { + throw geometry_exception{"MoveTo command count is not 1 (spec 4.3.4.4)"}; + } + + int64_t sum = 0; + const point start_point = next_point(); + point last_point = start_point; + + // spec 4.3.4.4 "2. A LineTo command" + if (!next_command(CommandId::LINE_TO)) { + throw geometry_exception{"expected LineTo command (spec 4.3.4.4)"}; + } + + std::forward(geom_handler).ring_begin(count() + 2); + + std::forward(geom_handler).ring_point(start_point); + + while (count() > 0) { + const point p = next_point(); + sum += detail::det(last_point, p); + last_point = p; + std::forward(geom_handler).ring_point(p); + } + + // spec 4.3.4.4 "3. A ClosePath command" + if (!next_command(CommandId::CLOSE_PATH)) { + throw geometry_exception{"expected ClosePath command (4.3.4.4)"}; + } + + sum += detail::det(last_point, start_point); + + std::forward(geom_handler).ring_point(start_point); + + std::forward(geom_handler).ring_end(sum > 0 ? ring_type::outer : + sum < 0 ? ring_type::inner : ring_type::invalid); + } + + return detail::get_result{}(std::forward(geom_handler)); + } + + }; // class geometry_decoder + + } // namespace detail + + /** + * Decode a point geometry. + * + * @tparam TGeomHandler Handler class. See tutorial for details. + * @param geometry The geometry as returned by feature.geometry(). + * @param geom_handler An object of TGeomHandler. + * @throws geometry_error If there is a problem with the geometry. + * @pre Geometry must be a point geometry. + */ + template + typename detail::get_result::type decode_point_geometry(const geometry& geometry, TGeomHandler&& geom_handler) { + vtzero_assert(geometry.type() == GeomType::POINT); + detail::geometry_decoder decoder{geometry.begin(), geometry.end(), geometry.data().size() / 2}; + return decoder.decode_point(std::forward(geom_handler)); + } + + /** + * Decode a linestring geometry. + * + * @tparam TGeomHandler Handler class. See tutorial for details. + * @param geometry The geometry as returned by feature.geometry(). + * @param geom_handler An object of TGeomHandler. + * @returns whatever geom_handler.result() returns if that function exists, + * void otherwise + * @throws geometry_error If there is a problem with the geometry. + * @pre Geometry must be a linestring geometry. + */ + template + typename detail::get_result::type decode_linestring_geometry(const geometry& geometry, TGeomHandler&& geom_handler) { + vtzero_assert(geometry.type() == GeomType::LINESTRING); + detail::geometry_decoder decoder{geometry.begin(), geometry.end(), geometry.data().size() / 2}; + return decoder.decode_linestring(std::forward(geom_handler)); + } + + /** + * Decode a polygon geometry. + * + * @tparam TGeomHandler Handler class. See tutorial for details. + * @param geometry The geometry as returned by feature.geometry(). + * @param geom_handler An object of TGeomHandler. + * @returns whatever geom_handler.result() returns if that function exists, + * void otherwise + * @throws geometry_error If there is a problem with the geometry. + * @pre Geometry must be a polygon geometry. + */ + template + typename detail::get_result::type decode_polygon_geometry(const geometry& geometry, TGeomHandler&& geom_handler) { + vtzero_assert(geometry.type() == GeomType::POLYGON); + detail::geometry_decoder decoder{geometry.begin(), geometry.end(), geometry.data().size() / 2}; + return decoder.decode_polygon(std::forward(geom_handler)); + } + + /** + * Decode a geometry. + * + * @tparam TGeomHandler Handler class. See tutorial for details. + * @param geometry The geometry as returned by feature.geometry(). + * @param geom_handler An object of TGeomHandler. + * @returns whatever geom_handler.result() returns if that function exists, + * void otherwise + * @throws geometry_error If the geometry has type UNKNOWN of if there is + * a problem with the geometry. + */ + template + typename detail::get_result::type decode_geometry(const geometry& geometry, TGeomHandler&& geom_handler) { + detail::geometry_decoder decoder{geometry.begin(), geometry.end(), geometry.data().size() / 2}; + switch (geometry.type()) { + case GeomType::POINT: + return decoder.decode_point(std::forward(geom_handler)); + case GeomType::LINESTRING: + return decoder.decode_linestring(std::forward(geom_handler)); + case GeomType::POLYGON: + return decoder.decode_polygon(std::forward(geom_handler)); + default: + break; + } + throw geometry_exception{"unknown geometry type"}; + } + +} // namespace vtzero + +#endif // VTZERO_GEOMETRY_HPP diff --git a/contrib/vtzero/1.1.0/include/vtzero/index.hpp b/contrib/vtzero/1.1.0/include/vtzero/index.hpp new file mode 100644 index 0000000..75f4df7 --- /dev/null +++ b/contrib/vtzero/1.1.0/include/vtzero/index.hpp @@ -0,0 +1,264 @@ +#ifndef VTZERO_INDEX_HPP +#define VTZERO_INDEX_HPP + +/***************************************************************************** + +vtzero - Tiny and fast vector tile decoder and encoder in C++. + +This file is from https://github.com/mapbox/vtzero where you can find more +documentation. + +*****************************************************************************/ + +/** + * @file index.hpp + * + * @brief Contains classes for indexing the key/value tables inside layers. + */ + +#include "builder.hpp" +#include "types.hpp" + +#include +#include +#include + +namespace vtzero { + + /** + * Used to store the mapping between property keys and the index value + * in the table stored in a layer. + * + * @tparam TMap The map class to use (std::map, std::unordered_map or + * something compatible). + */ + template