From de7b23dc7b7f7f8ef57708c3b6110ea23bfec16c Mon Sep 17 00:00:00 2001
From: Sheng Zha
Date: Wed, 18 Mar 2020 15:31:13 -0700
Subject: [PATCH 01/49] clean slate for 1.x
---
CODEOWNERS | 12 -
CODE_OF_CONDUCT.md | 77 -
CONTRIBUTING.md | 1 -
LICENSE | 201 --
MANIFEST.in | 5 -
Makefile | 113 -
README.rst | 218 --
ci/batch/docker/Dockerfile | 27 -
ci/batch/docker/gluon_nlp_job.sh | 39 -
ci/batch/submit-job.py | 154 --
ci/batch/wait-job.py | 93 -
ci/codecov.sh | 1550 -----------
.../Jenkinsfile_py3-master_cpu_unittest | 69 -
ci/jenkins/Jenkinsfile_py3-master_gpu_doc | 168 --
.../Jenkinsfile_py3-master_gpu_integration | 53 -
.../Jenkinsfile_py3-master_gpu_unittest | 61 -
ci/jenkins/Jenkinsfile_py3_cpu_unittest | 69 -
ci/jenkins/Jenkinsfile_py3_gpu_integration | 53 -
ci/jenkins/Jenkinsfile_py3_gpu_unittest | 61 -
ci/jenkins/build_steps.groovy | 127 -
ci/jenkins/utils.groovy | 214 --
ci/prepare_clean_env.sh | 25 -
ci/rat/rat-excludes | 55 -
ci/upload_doc.sh | 6 -
codecov.yml | 30 -
conftest.py | 207 --
docs/.gitignore | 5 -
docs/.nojekyll | 0
docs/404.rst | 25 -
docs/Doxyfile | 2353 -----------------
docs/Makefile | 194 --
docs/README.txt | 3 -
docs/_static/404.jpg | Bin 145814 -> 0 bytes
docs/_static/custom.css | 28 -
docs/_static/gluon-logo.svg | 37 -
docs/_static/gluon.ico | Bin 1150 -> 0 bytes
docs/_static/gluon_black.png | Bin 7735 -> 0 bytes
docs/_static/google_analytics.js | 7 -
docs/_static/hidebib.js | 42 -
docs/_static/install-options.js | 90 -
docs/api/data.batchify.rst | 47 -
docs/api/data.rst | 298 ---
docs/api/embedding.rst | 60 -
docs/api/index.rst | 16 -
docs/api/initializer.rst | 32 -
docs/api/loss.rst | 51 -
docs/api/model.rst | 170 --
docs/api/model.train.rst | 39 -
docs/api/optimizer.rst | 23 -
docs/api/utils.rst | 53 -
docs/api/vocab.rst | 78 -
docs/community/contribute.rst | 127 -
docs/community/git.rst | 113 -
docs/community/index.rst | 57 -
docs/community/release.rst | 15 -
docs/conf.py | 264 --
docs/examples/index.rst | 158 --
docs/examples/language_model/cache_model.png | Bin 56716 -> 0 bytes
docs/examples/language_model/index.rst | 27 -
.../language_model/language_model_intro.png | Bin 148966 -> 0 bytes
.../language_model/train_language_model.md | 292 --
.../language_model/use_pretrained_lm.md | 217 --
.../machine_translation/dataprocessor.py | 167 --
docs/examples/machine_translation/gnmt.md | 531 ----
.../machine_translation/hyperparameters.py | 53 -
docs/examples/machine_translation/index.rst | 28 -
docs/examples/machine_translation/nmt | 1 -
.../machine_translation/transformer.md | 249 --
.../machine_translation/transformer.png | Bin 296782 -> 0 bytes
docs/examples/machine_translation/utils.py | 156 --
docs/examples/notes/data_api.rst | 286 --
.../images/fixed_bucket_strategy_ratio0.0.png | Bin 38249 -> 0 bytes
.../images/fixed_bucket_strategy_ratio0.7.png | Bin 37911 -> 0 bytes
.../notes/images/no_bucket_strategy.png | Bin 54444 -> 0 bytes
.../notes/images/sorted_bucket_strategy.png | Bin 42514 -> 0 bytes
docs/examples/notes/index.rst | 26 -
docs/examples/notes/vocab_emb.rst | 99 -
docs/examples/sentence_embedding/bert | 1 -
.../sentence_embedding/bert-embed.png | Bin 72606 -> 0 bytes
.../sentence_embedding/bert-sentence-pair.png | Bin 114561 -> 0 bytes
docs/examples/sentence_embedding/bert.md | 421 ---
docs/examples/sentence_embedding/bert.png | Bin 36389 -> 0 bytes
docs/examples/sentence_embedding/dev.tsv | 409 ---
.../elmo_sentence_representation.md | 165 --
.../sentence_embedding/sentences.json | 38 -
.../sentiment_analysis/Bi-LSTM-Rep.png | Bin 16784 -> 0 bytes
.../sentiment_analysis/attention-nlp.png | Bin 22156 -> 0 bytes
docs/examples/sentiment_analysis/index.rst | 27 -
.../sentiment_analysis/samodel-v3.png | Bin 20149 -> 0 bytes
.../self_attentive_sentence_embedding.md | 559 ----
.../sentiment_analysis/sentiment_analysis.md | 354 ---
docs/examples/sequence_sampling/index.rst | 21 -
.../sequence_sampling/sequence_sampling.md | 197 --
docs/examples/word_embedding/data.py | 1 -
docs/examples/word_embedding/index.rst | 42 -
docs/examples/word_embedding/model.py | 1 -
docs/examples/word_embedding/utils.py | 1 -
.../examples/word_embedding/word_embedding.md | 349 ---
.../word_embedding/word_embedding_training.md | 381 ---
docs/genindex.rst | 2 -
docs/index.rst | 83 -
docs/install.rst | 18 -
docs/install/install-include.rst | 146 -
docs/install/install-more.rst | 29 -
docs/md2ipynb.py | 41 -
docs/model_zoo | 1 -
docs/model_zoo.rst | 76 -
env/cpu/py3-master.yml | 43 -
env/cpu/py3.yml | 42 -
env/docker/py3.yml | 42 -
env/gpu/py3-master.yml | 44 -
env/gpu/py3.yml | 43 -
examples | 1 -
mms/README.rst | 17 -
mms/bert.py | 88 -
pytest.ini | 18 -
scripts/__init__.py | 19 -
scripts/bert/__init__.py | 20 -
scripts/bert/bert_qa_evaluate.py | 394 ---
scripts/bert/data/__init__.py | 21 -
scripts/bert/data/create_pretraining_data.py | 688 -----
scripts/bert/data/embedding.py | 51 -
scripts/bert/data/transform.py | 130 -
scripts/bert/embedding.py | 271 --
scripts/bert/export.py | 222 --
scripts/bert/finetune_classifier.py | 704 -----
scripts/bert/finetune_squad.py | 862 ------
scripts/bert/fp16_utils.py | 167 --
scripts/bert/index.rst | 369 ---
scripts/bert/model/__init__.py | 20 -
scripts/bert/model/qa.py | 112 -
scripts/bert/pretraining_utils.py | 526 ----
scripts/bert/run_pretraining.py | 479 ----
scripts/bert/sample_text.txt | 33 -
scripts/bert/utils.py | 90 -
.../conversion_tools/compare_gluon_ernie.py | 89 -
.../compare_tf_gluon_model.py | 189 --
.../conversion_tools/convert_fairseq_model.py | 213 --
.../convert_paddle_to_gluon.py | 254 --
.../conversion_tools/convert_pytorch_model.py | 177 --
.../convert_pytorch_transformers.py | 221 --
scripts/conversion_tools/convert_tf_model.py | 241 --
scripts/conversion_tools/index.rst | 27 -
...er_pytorch_gluon_parameter_name_mapping.py | 92 -
scripts/conversion_tools/input.txt | 1 -
scripts/conversion_tools/input_cn.txt | 1 -
scripts/conversion_tools/utils.py | 72 -
scripts/index.rst | 93 -
.../intent_cls_slot_labeling/finetune_icsl.py | 461 ----
scripts/intent_cls_slot_labeling/index.rst | 108 -
scripts/language_model/__init__.py | 19 -
.../language_model/cache_language_model.py | 211 --
...mpare_transformerxl_pytorch_gluon_model.py | 181 --
.../compare_xlnet_pytorch_gluon_model.py | 108 -
.../convert_transformer_xl.py | 272 --
.../conversion_utils/convert_xlnet.py | 194 --
.../language_model/conversion_utils/utils.py | 107 -
scripts/language_model/index.rst | 301 ---
.../large_word_language_model.py | 357 ---
.../language_model/model/XLNet_classifier.py | 90 -
scripts/language_model/model/qa.py | 345 ---
scripts/language_model/run_glue.py | 658 -----
scripts/language_model/run_squad.py | 721 -----
scripts/language_model/sampler.py | 109 -
.../language_model/transformer/__init__.py | 27 -
.../transformer/attention_cell.py | 394 ---
scripts/language_model/transformer/data.py | 111 -
.../language_model/transformer/embedding.py | 156 --
scripts/language_model/transformer/model.py | 300 ---
scripts/language_model/transformer/softmax.py | 360 ---
.../language_model/transformer/transformer.py | 755 ------
scripts/language_model/transformer_xl.py | 164 --
scripts/language_model/word_language_model.py | 474 ----
scripts/language_model/xlnet_qa_evaluate.py | 152 --
scripts/machine_translation/__init__.py | 21 -
scripts/machine_translation/_constants.py | 22 -
scripts/machine_translation/bleu.py | 352 ---
scripts/machine_translation/dataprocessor.py | 284 --
scripts/machine_translation/dataset.py | 67 -
scripts/machine_translation/gnmt.py | 512 ----
.../machine_translation/hyperparameters.py | 51 -
scripts/machine_translation/index.rst | 71 -
.../inference_transformer.py | 300 ---
scripts/machine_translation/train_gnmt.py | 285 --
.../machine_translation/train_transformer.py | 412 ---
scripts/machine_translation/translation.py | 80 -
scripts/machine_translation/utils.py | 69 -
scripts/natural_language_inference/dataset.py | 77 -
.../decomposable_attention.py | 164 --
scripts/natural_language_inference/esim.py | 115 -
scripts/natural_language_inference/index.rst | 53 -
scripts/natural_language_inference/main.py | 254 --
.../natural_language_inference/preprocess.py | 61 -
scripts/natural_language_inference/utils.py | 54 -
scripts/ner/data.py | 355 ---
scripts/ner/dataset_sample/test_sample.txt | 17 -
scripts/ner/dataset_sample/train_sample.txt | 14 -
.../ner/dataset_sample/validation_sample.txt | 16 -
scripts/ner/finetune_bert.py | 222 --
scripts/ner/index.rst | 34 -
scripts/ner/model.py | 100 -
scripts/ner/ner_utils.py | 108 -
scripts/ner/predict_ner.py | 130 -
scripts/parsing/__init__.py | 16 -
scripts/parsing/common/__init__.py | 16 -
scripts/parsing/common/config.py | 118 -
scripts/parsing/common/data.py | 474 ----
.../parsing/common/exponential_scheduler.py | 41 -
scripts/parsing/common/k_means.py | 183 --
scripts/parsing/common/savable.py | 56 -
scripts/parsing/common/tarjan.py | 95 -
scripts/parsing/common/utils.py | 526 ----
scripts/parsing/index.rst | 79 -
scripts/parsing/parser/__init__.py | 16 -
scripts/parsing/parser/biaffine_parser.py | 357 ---
scripts/parsing/parser/dep_parser.py | 310 ---
scripts/parsing/parser/evaluate/__init__.py | 19 -
scripts/parsing/parser/evaluate/evaluate.py | 136 -
scripts/question_answering/__init__.py | 19 -
scripts/question_answering/data_pipeline.py | 946 -------
scripts/question_answering/utils.py | 45 -
scripts/sentiment_analysis/__init__.py | 19 -
scripts/sentiment_analysis/finetune_lm.py | 344 ---
scripts/sentiment_analysis/index.rst | 247 --
scripts/sentiment_analysis/process_data.py | 142 -
.../sentiment_analysis_cnn.py | 208 --
scripts/sentiment_analysis/text_cnn.py | 78 -
scripts/tests/__init__.py | 19 -
scripts/tests/conftest.py | 40 -
scripts/tests/multi-bleu-detok.perl | 211 --
scripts/tests/multi-bleu.perl | 177 --
scripts/tests/test_bert_checkpoints.py | 45 -
scripts/tests/test_bert_dataset_transform.py | 72 -
scripts/tests/test_bert_embedding.py | 51 -
scripts/tests/test_bleu.py | 149 --
scripts/tests/test_dataprocessor.py | 86 -
scripts/tests/test_encoder_decoder.py | 236 --
scripts/tests/test_models.py | 56 -
scripts/tests/test_question_answering.py | 60 -
scripts/tests/test_references.txt | 100 -
scripts/tests/test_sampler.py | 34 -
scripts/tests/test_sanity.py | 38 -
scripts/tests/test_scripts.py | 423 ---
scripts/tests/test_transformer_xl.py | 141 -
scripts/tests/test_translations.txt | 100 -
scripts/tests/test_xlnet.py | 43 -
.../word_embeddings/glove/cooccurrences.npz | Bin 4949 -> 0 bytes
scripts/tests/word_embeddings/glove/vocab.txt | 27 -
.../fasttext_word_ngram.py | 422 ---
scripts/text_classification/index.rst | 73 -
scripts/text_generation/__init__.py | 20 -
scripts/text_generation/index.rst | 131 -
scripts/text_generation/model/__init__.py | 64 -
scripts/text_generation/model/gpt.py | 441 ---
scripts/text_generation/sequence_sampling.py | 190 --
scripts/word_embeddings/data.py | 561 ----
.../word_embeddings/evaluate_pretrained.py | 241 --
scripts/word_embeddings/evaluation.py | 302 ---
scripts/word_embeddings/executors.py | 93 -
scripts/word_embeddings/extract_vocab.py | 92 -
scripts/word_embeddings/index.rst | 139 -
scripts/word_embeddings/model.py | 194 --
scripts/word_embeddings/run_all.sh | 40 -
scripts/word_embeddings/tools/CMakeLists.txt | 61 -
scripts/word_embeddings/tools/cooccur.cc | 314 ---
scripts/word_embeddings/tools/extern/CLI11 | 1 -
scripts/word_embeddings/tools/extern/cnpy | 1 -
scripts/word_embeddings/tools/extern/sparsepp | 1 -
scripts/word_embeddings/tools/utils.h | 51 -
scripts/word_embeddings/tools/vocab_count.cc | 151 --
scripts/word_embeddings/train_glove.py | 426 ---
scripts/word_embeddings/train_sg_cbow.py | 307 ---
scripts/word_embeddings/utils.py | 46 -
setup.py | 97 -
src/gluonnlp/__init__.py | 49 -
src/gluonnlp/_constants.py | 1836 -------------
src/gluonnlp/base.py | 65 -
src/gluonnlp/calibration/__init__.py | 21 -
src/gluonnlp/calibration/collector.py | 60 -
src/gluonnlp/data/__init__.py | 171 --
src/gluonnlp/data/baidu_ernie_data.py | 187 --
src/gluonnlp/data/batchify/__init__.py | 26 -
src/gluonnlp/data/batchify/batchify.py | 544 ----
src/gluonnlp/data/batchify/embedding.py | 265 --
src/gluonnlp/data/batchify/language_model.py | 323 ---
src/gluonnlp/data/bert/__init__.py | 25 -
src/gluonnlp/data/bert/glue.py | 129 -
src/gluonnlp/data/bert/squad.py | 308 ---
src/gluonnlp/data/candidate_sampler.py | 120 -
src/gluonnlp/data/classification.py | 645 -----
src/gluonnlp/data/conll.py | 470 ----
src/gluonnlp/data/corpora/__init__.py | 28 -
.../data/corpora/google_billion_word.py | 146 -
.../large_text_compression_benchmark.py | 190 --
src/gluonnlp/data/corpora/wikitext.py | 416 ---
src/gluonnlp/data/dataloader.py | 248 --
src/gluonnlp/data/dataset.py | 337 ---
src/gluonnlp/data/datasetloader.py | 453 ----
src/gluonnlp/data/fast_bert_tokenizer.pyx | 266 --
src/gluonnlp/data/glue.py | 705 -----
src/gluonnlp/data/intent_slot.py | 202 --
src/gluonnlp/data/question_answering.py | 225 --
src/gluonnlp/data/registry.py | 144 -
src/gluonnlp/data/sampler.py | 567 ----
src/gluonnlp/data/sentiment.py | 472 ----
src/gluonnlp/data/stream.py | 385 ---
src/gluonnlp/data/super_glue.py | 577 ----
src/gluonnlp/data/transforms.py | 1257 ---------
src/gluonnlp/data/translation.py | 462 ----
src/gluonnlp/data/utils.py | 519 ----
.../data/word_embedding_evaluation.py | 916 -------
src/gluonnlp/data/xlnet/__init__.py | 24 -
src/gluonnlp/data/xlnet/squad.py | 138 -
src/gluonnlp/embedding/__init__.py | 24 -
src/gluonnlp/embedding/evaluation.py | 513 ----
src/gluonnlp/embedding/token_embedding.py | 1347 ----------
src/gluonnlp/initializer/__init__.py | 25 -
src/gluonnlp/initializer/initializer.py | 95 -
src/gluonnlp/loss/__init__.py | 27 -
src/gluonnlp/loss/activation_regularizer.py | 146 -
src/gluonnlp/loss/label_smoothing.py | 126 -
src/gluonnlp/loss/loss.py | 98 -
src/gluonnlp/metric/__init__.py | 27 -
src/gluonnlp/metric/length_normalized_loss.py | 89 -
src/gluonnlp/metric/masked_accuracy.py | 114 -
src/gluonnlp/model/__init__.py | 156 --
src/gluonnlp/model/attention_cell.py | 549 ----
src/gluonnlp/model/bert.py | 1647 ------------
src/gluonnlp/model/bilm_encoder.py | 206 --
src/gluonnlp/model/block.py | 127 -
src/gluonnlp/model/convolutional_encoder.py | 162 --
src/gluonnlp/model/elmo.py | 435 ---
src/gluonnlp/model/highway.py | 121 -
src/gluonnlp/model/info.py | 30 -
src/gluonnlp/model/language_model.py | 558 ----
src/gluonnlp/model/lstmpcellwithclip.py | 139 -
src/gluonnlp/model/parameter.py | 77 -
src/gluonnlp/model/sampled_block.py | 689 -----
src/gluonnlp/model/seq2seq_encoder_decoder.py | 168 --
src/gluonnlp/model/sequence_sampler.py | 812 ------
src/gluonnlp/model/train/__init__.py | 94 -
src/gluonnlp/model/train/cache.py | 195 --
src/gluonnlp/model/train/embedding.py | 432 ---
src/gluonnlp/model/train/language_model.py | 566 ----
src/gluonnlp/model/transformer.py | 1032 --------
src/gluonnlp/model/translation.py | 242 --
src/gluonnlp/model/utils.py | 307 ---
src/gluonnlp/optimizer/__init__.py | 25 -
src/gluonnlp/optimizer/bert_adam.py | 169 --
src/gluonnlp/utils/__init__.py | 28 -
src/gluonnlp/utils/files.py | 128 -
src/gluonnlp/utils/parallel.py | 146 -
src/gluonnlp/utils/parameter.py | 261 --
src/gluonnlp/utils/seed.py | 34 -
src/gluonnlp/utils/version.py | 51 -
src/gluonnlp/vocab/__init__.py | 27 -
src/gluonnlp/vocab/bert.py | 253 --
src/gluonnlp/vocab/elmo.py | 116 -
src/gluonnlp/vocab/subwords.py | 278 --
src/gluonnlp/vocab/vocab.py | 627 -----
.../vocab/backward_compat_0_7_corrupted_index | 1 -
tests/unittest/batchify/test_batchify.py | 196 --
.../batchify/test_batchify_embedding.py | 105 -
.../batchify/test_batchify_language_model.py | 127 -
tests/unittest/conftest.py | 111 -
tests/unittest/corpora/test_gbw.py | 38 -
.../test_large_text_compression_benchmark.py | 46 -
tests/unittest/corpora/test_wikitext.py | 135 -
tests/unittest/test_attention_cell.py | 101 -
tests/unittest/test_bertvocab.py | 133 -
tests/unittest/test_bilm_encoder.py | 75 -
tests/unittest/test_candidate_sampler.py | 17 -
tests/unittest/test_convolutional_encoder.py | 99 -
tests/unittest/test_datasets.py | 806 ------
tests/unittest/test_elmo.py | 135 -
tests/unittest/test_highway.py | 56 -
tests/unittest/test_info.py | 26 -
tests/unittest/test_initializer.py | 51 -
tests/unittest/test_lamb.py | 86 -
tests/unittest/test_loss.py | 63 -
tests/unittest/test_lstmpcellwithclip.py | 40 -
tests/unittest/test_metrics.py | 59 -
tests/unittest/test_model_weight_share.py | 70 -
tests/unittest/test_models.py | 685 -----
tests/unittest/test_optimizer.py | 220 --
tests/unittest/test_preprocess_utils.py | 30 -
tests/unittest/test_pytest.py | 20 -
tests/unittest/test_sampled_logits.py | 80 -
tests/unittest/test_sampler.py | 139 -
tests/unittest/test_sanity.py | 31 -
tests/unittest/test_sequence_sampler.py | 295 ---
tests/unittest/test_stream.py | 137 -
tests/unittest/test_token_embedding.py | 140 -
tests/unittest/test_transforms.py | 402 ---
tests/unittest/test_utils.py | 212 --
tests/unittest/test_vocab_embed.py | 1468 ----------
tests/unittest/train/test_dataloader.py | 64 -
tests/unittest/train/test_datasetloader.py | 69 -
tests/unittest/train/test_embedding.py | 104 -
.../train/test_embedding/lorem_ipsum.bin | Bin 277140 -> 0 bytes
.../train/test_embedding/lorem_ipsum.vec | 66 -
.../train/test_embedding/lorem_ipsum_w2v.bin | Bin 177326 -> 0 bytes
.../train/test_embedding/lorem_ipsum_w2v.vec | 436 ---
tools/diagnose.py | 196 --
tools/plot_bucketing_strategies.py | 261 --
405 files changed, 78316 deletions(-)
delete mode 100644 CODEOWNERS
delete mode 100644 CODE_OF_CONDUCT.md
delete mode 100644 CONTRIBUTING.md
delete mode 100644 LICENSE
delete mode 100644 MANIFEST.in
delete mode 100644 Makefile
delete mode 100644 README.rst
delete mode 100644 ci/batch/docker/Dockerfile
delete mode 100755 ci/batch/docker/gluon_nlp_job.sh
delete mode 100644 ci/batch/submit-job.py
delete mode 100644 ci/batch/wait-job.py
delete mode 100755 ci/codecov.sh
delete mode 100644 ci/jenkins/Jenkinsfile_py3-master_cpu_unittest
delete mode 100644 ci/jenkins/Jenkinsfile_py3-master_gpu_doc
delete mode 100644 ci/jenkins/Jenkinsfile_py3-master_gpu_integration
delete mode 100644 ci/jenkins/Jenkinsfile_py3-master_gpu_unittest
delete mode 100644 ci/jenkins/Jenkinsfile_py3_cpu_unittest
delete mode 100644 ci/jenkins/Jenkinsfile_py3_gpu_integration
delete mode 100644 ci/jenkins/Jenkinsfile_py3_gpu_unittest
delete mode 100644 ci/jenkins/build_steps.groovy
delete mode 100644 ci/jenkins/utils.groovy
delete mode 100755 ci/prepare_clean_env.sh
delete mode 100755 ci/rat/rat-excludes
delete mode 100755 ci/upload_doc.sh
delete mode 100644 codecov.yml
delete mode 100644 conftest.py
delete mode 100644 docs/.gitignore
delete mode 100644 docs/.nojekyll
delete mode 100644 docs/404.rst
delete mode 100644 docs/Doxyfile
delete mode 100644 docs/Makefile
delete mode 100644 docs/README.txt
delete mode 100644 docs/_static/404.jpg
delete mode 100644 docs/_static/custom.css
delete mode 100644 docs/_static/gluon-logo.svg
delete mode 100644 docs/_static/gluon.ico
delete mode 100644 docs/_static/gluon_black.png
delete mode 100644 docs/_static/google_analytics.js
delete mode 100644 docs/_static/hidebib.js
delete mode 100644 docs/_static/install-options.js
delete mode 100644 docs/api/data.batchify.rst
delete mode 100644 docs/api/data.rst
delete mode 100644 docs/api/embedding.rst
delete mode 100644 docs/api/index.rst
delete mode 100644 docs/api/initializer.rst
delete mode 100644 docs/api/loss.rst
delete mode 100644 docs/api/model.rst
delete mode 100644 docs/api/model.train.rst
delete mode 100644 docs/api/optimizer.rst
delete mode 100644 docs/api/utils.rst
delete mode 100644 docs/api/vocab.rst
delete mode 100644 docs/community/contribute.rst
delete mode 100644 docs/community/git.rst
delete mode 100644 docs/community/index.rst
delete mode 100644 docs/community/release.rst
delete mode 100644 docs/conf.py
delete mode 100644 docs/examples/index.rst
delete mode 100644 docs/examples/language_model/cache_model.png
delete mode 100644 docs/examples/language_model/index.rst
delete mode 100644 docs/examples/language_model/language_model_intro.png
delete mode 100644 docs/examples/language_model/train_language_model.md
delete mode 100644 docs/examples/language_model/use_pretrained_lm.md
delete mode 100644 docs/examples/machine_translation/dataprocessor.py
delete mode 100644 docs/examples/machine_translation/gnmt.md
delete mode 100644 docs/examples/machine_translation/hyperparameters.py
delete mode 100644 docs/examples/machine_translation/index.rst
delete mode 120000 docs/examples/machine_translation/nmt
delete mode 100644 docs/examples/machine_translation/transformer.md
delete mode 100644 docs/examples/machine_translation/transformer.png
delete mode 100644 docs/examples/machine_translation/utils.py
delete mode 100644 docs/examples/notes/data_api.rst
delete mode 100644 docs/examples/notes/images/fixed_bucket_strategy_ratio0.0.png
delete mode 100644 docs/examples/notes/images/fixed_bucket_strategy_ratio0.7.png
delete mode 100644 docs/examples/notes/images/no_bucket_strategy.png
delete mode 100644 docs/examples/notes/images/sorted_bucket_strategy.png
delete mode 100644 docs/examples/notes/index.rst
delete mode 100644 docs/examples/notes/vocab_emb.rst
delete mode 120000 docs/examples/sentence_embedding/bert
delete mode 100644 docs/examples/sentence_embedding/bert-embed.png
delete mode 100644 docs/examples/sentence_embedding/bert-sentence-pair.png
delete mode 100644 docs/examples/sentence_embedding/bert.md
delete mode 100644 docs/examples/sentence_embedding/bert.png
delete mode 100644 docs/examples/sentence_embedding/dev.tsv
delete mode 100644 docs/examples/sentence_embedding/elmo_sentence_representation.md
delete mode 100644 docs/examples/sentence_embedding/sentences.json
delete mode 100644 docs/examples/sentiment_analysis/Bi-LSTM-Rep.png
delete mode 100644 docs/examples/sentiment_analysis/attention-nlp.png
delete mode 100644 docs/examples/sentiment_analysis/index.rst
delete mode 100644 docs/examples/sentiment_analysis/samodel-v3.png
delete mode 100644 docs/examples/sentiment_analysis/self_attentive_sentence_embedding.md
delete mode 100644 docs/examples/sentiment_analysis/sentiment_analysis.md
delete mode 100644 docs/examples/sequence_sampling/index.rst
delete mode 100644 docs/examples/sequence_sampling/sequence_sampling.md
delete mode 120000 docs/examples/word_embedding/data.py
delete mode 100644 docs/examples/word_embedding/index.rst
delete mode 120000 docs/examples/word_embedding/model.py
delete mode 120000 docs/examples/word_embedding/utils.py
delete mode 100644 docs/examples/word_embedding/word_embedding.md
delete mode 100644 docs/examples/word_embedding/word_embedding_training.md
delete mode 100644 docs/genindex.rst
delete mode 100644 docs/index.rst
delete mode 100644 docs/install.rst
delete mode 100644 docs/install/install-include.rst
delete mode 100644 docs/install/install-more.rst
delete mode 100644 docs/md2ipynb.py
delete mode 120000 docs/model_zoo
delete mode 100644 docs/model_zoo.rst
delete mode 100644 env/cpu/py3-master.yml
delete mode 100644 env/cpu/py3.yml
delete mode 100644 env/docker/py3.yml
delete mode 100644 env/gpu/py3-master.yml
delete mode 100644 env/gpu/py3.yml
delete mode 120000 examples
delete mode 100644 mms/README.rst
delete mode 100644 mms/bert.py
delete mode 100644 pytest.ini
delete mode 100644 scripts/__init__.py
delete mode 100644 scripts/bert/__init__.py
delete mode 100644 scripts/bert/bert_qa_evaluate.py
delete mode 100644 scripts/bert/data/__init__.py
delete mode 100644 scripts/bert/data/create_pretraining_data.py
delete mode 100644 scripts/bert/data/embedding.py
delete mode 100644 scripts/bert/data/transform.py
delete mode 100644 scripts/bert/embedding.py
delete mode 100644 scripts/bert/export.py
delete mode 100644 scripts/bert/finetune_classifier.py
delete mode 100644 scripts/bert/finetune_squad.py
delete mode 100644 scripts/bert/fp16_utils.py
delete mode 100644 scripts/bert/index.rst
delete mode 100644 scripts/bert/model/__init__.py
delete mode 100644 scripts/bert/model/qa.py
delete mode 100644 scripts/bert/pretraining_utils.py
delete mode 100644 scripts/bert/run_pretraining.py
delete mode 100644 scripts/bert/sample_text.txt
delete mode 100644 scripts/bert/utils.py
delete mode 100644 scripts/conversion_tools/compare_gluon_ernie.py
delete mode 100644 scripts/conversion_tools/compare_tf_gluon_model.py
delete mode 100644 scripts/conversion_tools/convert_fairseq_model.py
delete mode 100644 scripts/conversion_tools/convert_paddle_to_gluon.py
delete mode 100644 scripts/conversion_tools/convert_pytorch_model.py
delete mode 100644 scripts/conversion_tools/convert_pytorch_transformers.py
delete mode 100644 scripts/conversion_tools/convert_tf_model.py
delete mode 100644 scripts/conversion_tools/index.rst
delete mode 100644 scripts/conversion_tools/infer_pytorch_gluon_parameter_name_mapping.py
delete mode 100644 scripts/conversion_tools/input.txt
delete mode 100644 scripts/conversion_tools/input_cn.txt
delete mode 100644 scripts/conversion_tools/utils.py
delete mode 100644 scripts/index.rst
delete mode 100644 scripts/intent_cls_slot_labeling/finetune_icsl.py
delete mode 100644 scripts/intent_cls_slot_labeling/index.rst
delete mode 100644 scripts/language_model/__init__.py
delete mode 100644 scripts/language_model/cache_language_model.py
delete mode 100644 scripts/language_model/conversion_utils/compare_transformerxl_pytorch_gluon_model.py
delete mode 100644 scripts/language_model/conversion_utils/compare_xlnet_pytorch_gluon_model.py
delete mode 100644 scripts/language_model/conversion_utils/convert_transformer_xl.py
delete mode 100644 scripts/language_model/conversion_utils/convert_xlnet.py
delete mode 100644 scripts/language_model/conversion_utils/utils.py
delete mode 100644 scripts/language_model/index.rst
delete mode 100644 scripts/language_model/large_word_language_model.py
delete mode 100644 scripts/language_model/model/XLNet_classifier.py
delete mode 100644 scripts/language_model/model/qa.py
delete mode 100644 scripts/language_model/run_glue.py
delete mode 100644 scripts/language_model/run_squad.py
delete mode 100644 scripts/language_model/sampler.py
delete mode 100644 scripts/language_model/transformer/__init__.py
delete mode 100644 scripts/language_model/transformer/attention_cell.py
delete mode 100644 scripts/language_model/transformer/data.py
delete mode 100644 scripts/language_model/transformer/embedding.py
delete mode 100644 scripts/language_model/transformer/model.py
delete mode 100644 scripts/language_model/transformer/softmax.py
delete mode 100644 scripts/language_model/transformer/transformer.py
delete mode 100644 scripts/language_model/transformer_xl.py
delete mode 100644 scripts/language_model/word_language_model.py
delete mode 100644 scripts/language_model/xlnet_qa_evaluate.py
delete mode 100644 scripts/machine_translation/__init__.py
delete mode 100644 scripts/machine_translation/_constants.py
delete mode 100644 scripts/machine_translation/bleu.py
delete mode 100644 scripts/machine_translation/dataprocessor.py
delete mode 100644 scripts/machine_translation/dataset.py
delete mode 100644 scripts/machine_translation/gnmt.py
delete mode 100644 scripts/machine_translation/hyperparameters.py
delete mode 100644 scripts/machine_translation/index.rst
delete mode 100644 scripts/machine_translation/inference_transformer.py
delete mode 100644 scripts/machine_translation/train_gnmt.py
delete mode 100644 scripts/machine_translation/train_transformer.py
delete mode 100644 scripts/machine_translation/translation.py
delete mode 100644 scripts/machine_translation/utils.py
delete mode 100644 scripts/natural_language_inference/dataset.py
delete mode 100644 scripts/natural_language_inference/decomposable_attention.py
delete mode 100644 scripts/natural_language_inference/esim.py
delete mode 100644 scripts/natural_language_inference/index.rst
delete mode 100644 scripts/natural_language_inference/main.py
delete mode 100644 scripts/natural_language_inference/preprocess.py
delete mode 100644 scripts/natural_language_inference/utils.py
delete mode 100644 scripts/ner/data.py
delete mode 100644 scripts/ner/dataset_sample/test_sample.txt
delete mode 100644 scripts/ner/dataset_sample/train_sample.txt
delete mode 100644 scripts/ner/dataset_sample/validation_sample.txt
delete mode 100644 scripts/ner/finetune_bert.py
delete mode 100644 scripts/ner/index.rst
delete mode 100644 scripts/ner/model.py
delete mode 100644 scripts/ner/ner_utils.py
delete mode 100644 scripts/ner/predict_ner.py
delete mode 100644 scripts/parsing/__init__.py
delete mode 100644 scripts/parsing/common/__init__.py
delete mode 100644 scripts/parsing/common/config.py
delete mode 100644 scripts/parsing/common/data.py
delete mode 100644 scripts/parsing/common/exponential_scheduler.py
delete mode 100755 scripts/parsing/common/k_means.py
delete mode 100644 scripts/parsing/common/savable.py
delete mode 100755 scripts/parsing/common/tarjan.py
delete mode 100644 scripts/parsing/common/utils.py
delete mode 100644 scripts/parsing/index.rst
delete mode 100644 scripts/parsing/parser/__init__.py
delete mode 100644 scripts/parsing/parser/biaffine_parser.py
delete mode 100644 scripts/parsing/parser/dep_parser.py
delete mode 100644 scripts/parsing/parser/evaluate/__init__.py
delete mode 100644 scripts/parsing/parser/evaluate/evaluate.py
delete mode 100644 scripts/question_answering/__init__.py
delete mode 100644 scripts/question_answering/data_pipeline.py
delete mode 100644 scripts/question_answering/utils.py
delete mode 100644 scripts/sentiment_analysis/__init__.py
delete mode 100644 scripts/sentiment_analysis/finetune_lm.py
delete mode 100644 scripts/sentiment_analysis/index.rst
delete mode 100644 scripts/sentiment_analysis/process_data.py
delete mode 100644 scripts/sentiment_analysis/sentiment_analysis_cnn.py
delete mode 100644 scripts/sentiment_analysis/text_cnn.py
delete mode 100644 scripts/tests/__init__.py
delete mode 100644 scripts/tests/conftest.py
delete mode 100644 scripts/tests/multi-bleu-detok.perl
delete mode 100644 scripts/tests/multi-bleu.perl
delete mode 100644 scripts/tests/test_bert_checkpoints.py
delete mode 100644 scripts/tests/test_bert_dataset_transform.py
delete mode 100644 scripts/tests/test_bert_embedding.py
delete mode 100644 scripts/tests/test_bleu.py
delete mode 100644 scripts/tests/test_dataprocessor.py
delete mode 100644 scripts/tests/test_encoder_decoder.py
delete mode 100644 scripts/tests/test_models.py
delete mode 100644 scripts/tests/test_question_answering.py
delete mode 100644 scripts/tests/test_references.txt
delete mode 100644 scripts/tests/test_sampler.py
delete mode 100644 scripts/tests/test_sanity.py
delete mode 100644 scripts/tests/test_scripts.py
delete mode 100644 scripts/tests/test_transformer_xl.py
delete mode 100644 scripts/tests/test_translations.txt
delete mode 100644 scripts/tests/test_xlnet.py
delete mode 100644 scripts/tests/word_embeddings/glove/cooccurrences.npz
delete mode 100644 scripts/tests/word_embeddings/glove/vocab.txt
delete mode 100644 scripts/text_classification/fasttext_word_ngram.py
delete mode 100644 scripts/text_classification/index.rst
delete mode 100644 scripts/text_generation/__init__.py
delete mode 100644 scripts/text_generation/index.rst
delete mode 100644 scripts/text_generation/model/__init__.py
delete mode 100644 scripts/text_generation/model/gpt.py
delete mode 100644 scripts/text_generation/sequence_sampling.py
delete mode 100644 scripts/word_embeddings/data.py
delete mode 100644 scripts/word_embeddings/evaluate_pretrained.py
delete mode 100644 scripts/word_embeddings/evaluation.py
delete mode 100644 scripts/word_embeddings/executors.py
delete mode 100644 scripts/word_embeddings/extract_vocab.py
delete mode 100644 scripts/word_embeddings/index.rst
delete mode 100644 scripts/word_embeddings/model.py
delete mode 100755 scripts/word_embeddings/run_all.sh
delete mode 100644 scripts/word_embeddings/tools/CMakeLists.txt
delete mode 100644 scripts/word_embeddings/tools/cooccur.cc
delete mode 160000 scripts/word_embeddings/tools/extern/CLI11
delete mode 160000 scripts/word_embeddings/tools/extern/cnpy
delete mode 160000 scripts/word_embeddings/tools/extern/sparsepp
delete mode 100644 scripts/word_embeddings/tools/utils.h
delete mode 100644 scripts/word_embeddings/tools/vocab_count.cc
delete mode 100644 scripts/word_embeddings/train_glove.py
delete mode 100644 scripts/word_embeddings/train_sg_cbow.py
delete mode 100644 scripts/word_embeddings/utils.py
delete mode 100644 setup.py
delete mode 100644 src/gluonnlp/__init__.py
delete mode 100644 src/gluonnlp/_constants.py
delete mode 100644 src/gluonnlp/base.py
delete mode 100644 src/gluonnlp/calibration/__init__.py
delete mode 100644 src/gluonnlp/calibration/collector.py
delete mode 100644 src/gluonnlp/data/__init__.py
delete mode 100644 src/gluonnlp/data/baidu_ernie_data.py
delete mode 100644 src/gluonnlp/data/batchify/__init__.py
delete mode 100644 src/gluonnlp/data/batchify/batchify.py
delete mode 100644 src/gluonnlp/data/batchify/embedding.py
delete mode 100644 src/gluonnlp/data/batchify/language_model.py
delete mode 100644 src/gluonnlp/data/bert/__init__.py
delete mode 100644 src/gluonnlp/data/bert/glue.py
delete mode 100644 src/gluonnlp/data/bert/squad.py
delete mode 100644 src/gluonnlp/data/candidate_sampler.py
delete mode 100644 src/gluonnlp/data/classification.py
delete mode 100644 src/gluonnlp/data/conll.py
delete mode 100644 src/gluonnlp/data/corpora/__init__.py
delete mode 100644 src/gluonnlp/data/corpora/google_billion_word.py
delete mode 100644 src/gluonnlp/data/corpora/large_text_compression_benchmark.py
delete mode 100644 src/gluonnlp/data/corpora/wikitext.py
delete mode 100644 src/gluonnlp/data/dataloader.py
delete mode 100644 src/gluonnlp/data/dataset.py
delete mode 100644 src/gluonnlp/data/datasetloader.py
delete mode 100644 src/gluonnlp/data/fast_bert_tokenizer.pyx
delete mode 100644 src/gluonnlp/data/glue.py
delete mode 100644 src/gluonnlp/data/intent_slot.py
delete mode 100644 src/gluonnlp/data/question_answering.py
delete mode 100644 src/gluonnlp/data/registry.py
delete mode 100644 src/gluonnlp/data/sampler.py
delete mode 100644 src/gluonnlp/data/sentiment.py
delete mode 100644 src/gluonnlp/data/stream.py
delete mode 100644 src/gluonnlp/data/super_glue.py
delete mode 100644 src/gluonnlp/data/transforms.py
delete mode 100644 src/gluonnlp/data/translation.py
delete mode 100644 src/gluonnlp/data/utils.py
delete mode 100644 src/gluonnlp/data/word_embedding_evaluation.py
delete mode 100644 src/gluonnlp/data/xlnet/__init__.py
delete mode 100644 src/gluonnlp/data/xlnet/squad.py
delete mode 100644 src/gluonnlp/embedding/__init__.py
delete mode 100644 src/gluonnlp/embedding/evaluation.py
delete mode 100644 src/gluonnlp/embedding/token_embedding.py
delete mode 100644 src/gluonnlp/initializer/__init__.py
delete mode 100644 src/gluonnlp/initializer/initializer.py
delete mode 100644 src/gluonnlp/loss/__init__.py
delete mode 100644 src/gluonnlp/loss/activation_regularizer.py
delete mode 100644 src/gluonnlp/loss/label_smoothing.py
delete mode 100644 src/gluonnlp/loss/loss.py
delete mode 100644 src/gluonnlp/metric/__init__.py
delete mode 100644 src/gluonnlp/metric/length_normalized_loss.py
delete mode 100644 src/gluonnlp/metric/masked_accuracy.py
delete mode 100644 src/gluonnlp/model/__init__.py
delete mode 100644 src/gluonnlp/model/attention_cell.py
delete mode 100644 src/gluonnlp/model/bert.py
delete mode 100644 src/gluonnlp/model/bilm_encoder.py
delete mode 100644 src/gluonnlp/model/block.py
delete mode 100644 src/gluonnlp/model/convolutional_encoder.py
delete mode 100644 src/gluonnlp/model/elmo.py
delete mode 100644 src/gluonnlp/model/highway.py
delete mode 100644 src/gluonnlp/model/info.py
delete mode 100644 src/gluonnlp/model/language_model.py
delete mode 100644 src/gluonnlp/model/lstmpcellwithclip.py
delete mode 100644 src/gluonnlp/model/parameter.py
delete mode 100644 src/gluonnlp/model/sampled_block.py
delete mode 100644 src/gluonnlp/model/seq2seq_encoder_decoder.py
delete mode 100644 src/gluonnlp/model/sequence_sampler.py
delete mode 100644 src/gluonnlp/model/train/__init__.py
delete mode 100644 src/gluonnlp/model/train/cache.py
delete mode 100644 src/gluonnlp/model/train/embedding.py
delete mode 100644 src/gluonnlp/model/train/language_model.py
delete mode 100644 src/gluonnlp/model/transformer.py
delete mode 100644 src/gluonnlp/model/translation.py
delete mode 100644 src/gluonnlp/model/utils.py
delete mode 100644 src/gluonnlp/optimizer/__init__.py
delete mode 100644 src/gluonnlp/optimizer/bert_adam.py
delete mode 100644 src/gluonnlp/utils/__init__.py
delete mode 100644 src/gluonnlp/utils/files.py
delete mode 100644 src/gluonnlp/utils/parallel.py
delete mode 100644 src/gluonnlp/utils/parameter.py
delete mode 100644 src/gluonnlp/utils/seed.py
delete mode 100644 src/gluonnlp/utils/version.py
delete mode 100644 src/gluonnlp/vocab/__init__.py
delete mode 100644 src/gluonnlp/vocab/bert.py
delete mode 100644 src/gluonnlp/vocab/elmo.py
delete mode 100644 src/gluonnlp/vocab/subwords.py
delete mode 100644 src/gluonnlp/vocab/vocab.py
delete mode 100644 tests/data/vocab/backward_compat_0_7_corrupted_index
delete mode 100644 tests/unittest/batchify/test_batchify.py
delete mode 100644 tests/unittest/batchify/test_batchify_embedding.py
delete mode 100644 tests/unittest/batchify/test_batchify_language_model.py
delete mode 100644 tests/unittest/conftest.py
delete mode 100644 tests/unittest/corpora/test_gbw.py
delete mode 100644 tests/unittest/corpora/test_large_text_compression_benchmark.py
delete mode 100644 tests/unittest/corpora/test_wikitext.py
delete mode 100644 tests/unittest/test_attention_cell.py
delete mode 100644 tests/unittest/test_bertvocab.py
delete mode 100644 tests/unittest/test_bilm_encoder.py
delete mode 100644 tests/unittest/test_candidate_sampler.py
delete mode 100644 tests/unittest/test_convolutional_encoder.py
delete mode 100644 tests/unittest/test_datasets.py
delete mode 100644 tests/unittest/test_elmo.py
delete mode 100644 tests/unittest/test_highway.py
delete mode 100644 tests/unittest/test_info.py
delete mode 100644 tests/unittest/test_initializer.py
delete mode 100644 tests/unittest/test_lamb.py
delete mode 100644 tests/unittest/test_loss.py
delete mode 100644 tests/unittest/test_lstmpcellwithclip.py
delete mode 100644 tests/unittest/test_metrics.py
delete mode 100644 tests/unittest/test_model_weight_share.py
delete mode 100644 tests/unittest/test_models.py
delete mode 100644 tests/unittest/test_optimizer.py
delete mode 100644 tests/unittest/test_preprocess_utils.py
delete mode 100644 tests/unittest/test_pytest.py
delete mode 100644 tests/unittest/test_sampled_logits.py
delete mode 100644 tests/unittest/test_sampler.py
delete mode 100644 tests/unittest/test_sanity.py
delete mode 100644 tests/unittest/test_sequence_sampler.py
delete mode 100644 tests/unittest/test_stream.py
delete mode 100644 tests/unittest/test_token_embedding.py
delete mode 100644 tests/unittest/test_transforms.py
delete mode 100644 tests/unittest/test_utils.py
delete mode 100644 tests/unittest/test_vocab_embed.py
delete mode 100644 tests/unittest/train/test_dataloader.py
delete mode 100644 tests/unittest/train/test_datasetloader.py
delete mode 100644 tests/unittest/train/test_embedding.py
delete mode 100644 tests/unittest/train/test_embedding/lorem_ipsum.bin
delete mode 100644 tests/unittest/train/test_embedding/lorem_ipsum.vec
delete mode 100644 tests/unittest/train/test_embedding/lorem_ipsum_w2v.bin
delete mode 100644 tests/unittest/train/test_embedding/lorem_ipsum_w2v.vec
delete mode 100644 tools/diagnose.py
delete mode 100644 tools/plot_bucketing_strategies.py
diff --git a/CODEOWNERS b/CODEOWNERS
deleted file mode 100644
index 11af321c0e..0000000000
--- a/CODEOWNERS
+++ /dev/null
@@ -1,12 +0,0 @@
-# Watchers and contributors to Apache MXNet repo directories/packages/files
-# Please see documentation of use of CODEOWNERS file at
-# https://help.github.com/articles/about-codeowners/ and
-# https://github.com/blog/2392-introducing-code-owners
-#
-# Anybody can add themselves or a team as additional watcher or contributor
-# to get notified about changes in a specific package.
-# See https://help.github.com/articles/about-teams how to setup teams.
-
-
-# Global owners
-* @dmlc/gluon-nlp-committers @dmlc/gluon-nlp-reviewers
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
deleted file mode 100644
index 81b284a9ef..0000000000
--- a/CODE_OF_CONDUCT.md
+++ /dev/null
@@ -1,77 +0,0 @@
-# Contributor Covenant Code of Conduct
-
-## Our Pledge
-
-In the interest of fostering an open and welcoming environment, we as
-contributors and maintainers pledge to making participation in our project and
-our community a harassment-free experience for everyone, regardless of age, body
-size, disability, ethnicity, sex characteristics, gender identity and expression,
-level of experience, education, socio-economic status, nationality, personal
-appearance, race, religion, or sexual identity and orientation.
-
-## Our Standards
-
-Examples of behavior that contributes to creating a positive environment
-include:
-
-* Using welcoming and inclusive language
-* Being respectful of differing viewpoints and experiences
-* Gracefully accepting constructive criticism
-* Focusing on what is best for the community
-* Showing empathy towards other community members
-
-Examples of unacceptable behavior by participants include:
-
-* The use of sexualized language or imagery and unwelcome sexual attention or
- advances
-* Trolling, insulting/derogatory comments, and personal or political attacks
-* Public or private harassment
-* Publishing others' private information, such as a physical or electronic
- address, without explicit permission
-* Other conduct which could reasonably be considered inappropriate in a
- professional setting
-
-## Our Responsibilities
-
-Project maintainers are responsible for clarifying the standards of acceptable
-behavior and are expected to take appropriate and fair corrective action in
-response to any instances of unacceptable behavior.
-
-Project maintainers have the right and responsibility to remove, edit, or
-reject comments, commits, code, wiki edits, issues, and other contributions
-that are not aligned to this Code of Conduct, or to ban temporarily or
-permanently any contributor for other behaviors that they deem inappropriate,
-threatening, offensive, or harmful.
-
-## Scope
-
-This Code of Conduct applies both within project spaces and in public spaces
-when an individual is representing the project or its community. Examples of
-representing a project or community include using an official project e-mail
-address, posting via an official social media account, or acting as an appointed
-representative at an online or offline event. Representation of a project may be
-further defined and clarified by project maintainers.
-
-## Enforcement
-
-Instances of abusive, harassing, or otherwise unacceptable behavior may be
-reported by contacting the project team in GitHub issues/pull requests
-by mentioning @dmlc/gluon-nlp-committers. All
-complaints will be reviewed and investigated and will result in a response that
-is deemed necessary and appropriate to the circumstances. The project team is
-obligated to maintain confidentiality with regard to the reporter of an incident.
-Further details of specific enforcement policies may be posted separately.
-
-Project maintainers who do not follow or enforce the Code of Conduct in good
-faith may face temporary or permanent repercussions as determined by other
-members of the project's leadership.
-
-## Attribution
-
-This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
-available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
-
-[homepage]: https://www.contributor-covenant.org
-
-For answers to common questions about this code of conduct, see
-https://www.contributor-covenant.org/faq
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
deleted file mode 100644
index abb8a2119f..0000000000
--- a/CONTRIBUTING.md
+++ /dev/null
@@ -1 +0,0 @@
-Contribution guideline can be found at http://gluon-nlp.mxnet.io/community/contribute.html
diff --git a/LICENSE b/LICENSE
deleted file mode 100644
index 261eeb9e9f..0000000000
--- a/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/MANIFEST.in b/MANIFEST.in
deleted file mode 100644
index 5ebc05b4eb..0000000000
--- a/MANIFEST.in
+++ /dev/null
@@ -1,5 +0,0 @@
-recursive-include gluonnlp *.py
-include LICENSE
-include README.rst
-recursive-exclude tests *
-recursive-exclude scripts *
\ No newline at end of file
diff --git a/Makefile b/Makefile
deleted file mode 100644
index 90b1b01e19..0000000000
--- a/Makefile
+++ /dev/null
@@ -1,113 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-ROOTDIR = $(CURDIR)
-MD2IPYNB = $(ROOTDIR)/docs/md2ipynb.py
-
-flake8:
- flake8 --exclude conda,*tests*,test_*.py,scripts/word_embeddings/tools/extern --count --select=E9,F63,F7,F82 --show-source --statistics $(lintdir)
-
-pylint:
- pylint --rcfile=$(ROOTDIR)/.pylintrc $(lintdir)
-
-pytype:
- pytype --config=$(ROOTDIR)/.pytype.cfg
-
-restruc:
- python setup.py check --restructuredtext --strict
-
-lint:
- make lintdir=$(lintdir) flake8
- make lintdir=$(lintdir) pylint
- make pytype
- make lintdir=$(lintdir) ratcheck
- make restruc
-
-ci/rat/apache-rat.jar:
- mkdir -p build
- svn co http://svn.apache.org/repos/asf/creadur/rat/tags/apache-rat-project-0.13/ ci/rat/apache-rat; \
- cd ci/rat/apache-rat/apache-rat; \
- mvn -Dmaven.test.skip=true install;
- cp ci/rat/apache-rat/apache-rat/target/apache-rat-0.13.jar ci/rat/apache-rat.jar
-
-ratcheck: ci/rat/apache-rat.jar
- exec 5>&1; \
- RAT_JAR=ci/rat/apache-rat.jar; \
- OUTPUT=$(java -jar $(RAT_JAR) -E ci/rat/rat-excludes -d $(lintdir) | tee >(cat - >&5)); \
- ERROR_MESSAGE="Printing headers for text files without a valid license header"; \
- echo "-------Process The Output-------"; \
- if [[ $OUTPUT =~ $ERROR_MESSAGE ]]; then \
- echo "ERROR: RAT Check detected files with unknown licenses. Please fix and run test again!"; \
- exit 1; \
- else \
- echo "SUCCESS: There are no files with an Unknown License."; \
- fi
-
-docs: compile_notebooks distribute
- make -C docs html SPHINXOPTS=-W
- for f in $(shell find docs/examples -type f -name '*.md' -print) ; do \
- FILE=`echo $$f | sed 's/docs\///g'` ; \
- DIR=`dirname $$FILE` ; \
- BASENAME=`basename $$FILE` ; \
- HTML_BASENAME=`echo $$BASENAME | sed 's/md/html/'` ; \
- IPYNB_BASENAME=`echo $$BASENAME | sed 's/md/ipynb/'` ; \
- TARGET_HTML="docs/_build/html/$$DIR/$$HTML_BASENAME" ; \
- echo "processing" $$BASENAME ; \
- sed -i "s/$$IPYNB_BASENAME/$$BASENAME/g" $$TARGET_HTML; \
- done;
- for f in $(shell find docs/model_zoo -type f -name '*.rst' -print) ; do \
- DIR=`dirname $$f` ; \
- BASENAME=`basename $$f` ; \
- HTML_BASENAME=`echo $$BASENAME | sed 's/rst/html/'` ; \
- TARGET_HTML="docs/_build/html/$$DIR/$$HTML_BASENAME" ; \
- echo "processing" $$BASENAME ; \
- sed -i "s/docs\/model_zoo/scripts/g" $$TARGET_HTML; \
- done;
- sed -i.bak 's/33\,150\,243/23\,141\,201/g' docs/_build/html/_static/material-design-lite-1.3.0/material.blue-deep_orange.min.css;
- sed -i.bak 's/2196f3/178dc9/g' docs/_build/html/_static/sphinx_materialdesign_theme.css;
-
-clean:
- git clean -ff -d -x --exclude="$(ROOTDIR)/tests/data/*" --exclude="$(ROOTDIR)/conda/"
-
-compile_notebooks:
- for f in $(shell find docs/examples -type f -name '*.md' -print) ; do \
- DIR=$$(dirname $$f) ; \
- BASENAME=$$(basename $$f) ; \
- TARGETNAME=$${BASENAME%.md}.ipynb ; \
- echo $$DIR $$BASENAME $$TARGETNAME; \
- cd $$DIR ; \
- if [ -f $$TARGETNAME ]; then \
- echo $$TARGETNAME exists. Skipping compilation of $$BASENAME in Makefile. ; \
- else \
- python $(MD2IPYNB) $$BASENAME ; \
- fi ; \
- cd - ; \
- done;
-
-dist_scripts:
- cd scripts && \
- find * -type d -prune | grep -v 'tests\|__pycache__' | xargs -t -n 1 -I{} zip -r {}.zip {}
-
-dist_notebooks:
- cd docs/examples && \
- find * -type d -prune | grep -v 'tests\|__pycache__' | xargs -t -n 1 -I{} zip -r {}.zip {} -x "*.md" -x "__pycache__" -x "*.pyc" -x "*.txt" -x "*.log" -x "*.params" -x "*.npz" -x "*.json"
-
-test:
- py.test -v --capture=no --durations=0 tests/unittest scripts
-
-distribute: dist_scripts dist_notebooks
- python setup.py sdist
diff --git a/README.rst b/README.rst
deleted file mode 100644
index cf004dc838..0000000000
--- a/README.rst
+++ /dev/null
@@ -1,218 +0,0 @@
-.. raw:: html
-
-
-
-
-.. raw:: html
-
-
-
-GluonNLP: Your Choice of Deep Learning for NLP
-
-.. raw:: html
-
-
-
-.. raw:: html
-
-
-
-
-
-
-GluonNLP is a toolkit that enables easy text preprocessing, datasets
-loading and neural models building to help you speed up your Natural
-Language Processing (NLP) research.
-
-- `Quick Start Guide `__
-- `Resources `__
-
-News
-====
-
-- Tutorial proposal for GluonNLP is accepted at `EMNLP 2019 `__, Hong Kong.
-
-- GluonNLP was featured in:
-
- - **KDD 2019 Alaska**! Check out our tutorial: `From Shallow to Deep Language Representations: Pre-training, Fine-tuning, and Beyond `__.
- - **JSALT 2019 in Montreal, 2019-6-14**! Checkout **https://jsalt19.mxnet.io**.
- - **AWS re:invent 2018 in Las Vegas, 2018-11-28**! Checkout `details `_.
- - **PyData 2018 NYC, 2018-10-18**! Checkout the `awesome talk `__ by Sneha Jha.
- - **KDD 2018 London, 2018-08-21, Apache MXNet Gluon tutorial**! Check out **https://kdd18.mxnet.io**.
-
-Installation
-============
-
-Make sure you have Python 3.5 or newer and a recent version of MXNet (our CI
-server runs the testsuite with Python 3.5).
-
-You can install ``MXNet`` and ``GluonNLP`` using pip.
-
-``GluonNLP`` is based on the most recent version of ``MXNet``.
-
-
-In particular, if you want to install the most recent ``MXNet`` release:
-
-::
-
- pip install --upgrade mxnet>=1.6.0
-
-Else, if you want to install the most recent ``MXNet`` nightly build:
-
-::
-
- pip install --pre --upgrade mxnet
-
-Then, you can install ``GluonNLP``:
-
-::
-
- pip install gluonnlp
-
-Please check more `installation details `_.
-
-Docs 📖
-=======
-
-GluonNLP documentation is available at `our
-website `__.
-
-Community
-=========
-
-GluonNLP is a community that believes in sharing.
-
-For questions, comments, and bug reports, `Github issues `__ is the best way to reach us.
-
-We now have a new Slack channel `here `__.
-(`register `__).
-
-How to Contribute
-=================
-
-GluonNLP community welcomes contributions from anyone!
-
-There are lots of opportunities for you to become our `contributors `__:
-
-- Ask or answer questions on `GitHub issues `__.
-- Propose ideas, or review proposed design ideas on `GitHub issues `__.
-- Improve the `documentation `__.
-- Contribute bug reports `GitHub issues `__.
-- Write new `scripts `__ to reproduce
- state-of-the-art results.
-- Write new `examples `__ to explain
- key ideas in NLP methods and models.
-- Write new `public datasets `__
- (license permitting).
-- Most importantly, if you have an idea of how to contribute, then do it!
-
-For a list of open starter tasks, check `good first issues `__.
-
-Also see our `contributing
-guide `__ on simple how-tos,
-contribution guidelines and more.
-
-Resources
-=========
-
-Check out how to use GluonNLP for your own research or projects.
-
-If you are new to Gluon, please check out our `60-minute crash course
-`__.
-
-For getting started quickly, refer to notebook runnable examples at
-`Examples. `__
-
-For advanced examples, check out our
-`Scripts. `__
-
-For experienced users, check out our
-`API Notes `__.
-
-Quick Start Guide
-=================
-
-`Dataset Loading `__
--------------------------------------------------------------------------------
-
-Load the Wikitext-2 dataset, for example:
-
-.. code:: python
-
- >>> import gluonnlp as nlp
- >>> train = nlp.data.WikiText2(segment='train')
- >>> train[0:5]
- ['=', 'Valkyria', 'Chronicles', 'III', '=']
-
-`Vocabulary Construction `__
--------------------------------------------------------------------------------------
-
-Build vocabulary based on the above dataset, for example:
-
-.. code:: python
-
- >>> vocab = nlp.Vocab(counter=nlp.data.Counter(train))
- >>> vocab
- Vocab(size=33280, unk="", reserved="['', '', '']")
-
-`Neural Models Building `__
-------------------------------------------------------------------------------------
-
-From the models package, apply a Standard RNN language model to the
-above dataset:
-
-.. code:: python
-
- >>> model = nlp.model.language_model.StandardRNN('lstm', len(vocab),
- ... 200, 200, 2, 0.5, True)
- >>> model
- StandardRNN(
- (embedding): HybridSequential(
- (0): Embedding(33280 -> 200, float32)
- (1): Dropout(p = 0.5, axes=())
- )
- (encoder): LSTM(200 -> 200.0, TNC, num_layers=2, dropout=0.5)
- (decoder): HybridSequential(
- (0): Dense(200 -> 33280, linear)
- )
- )
-
-`Word Embeddings Loading `__
------------------------------------------------------------------------------------------
-
-For example, load a GloVe word embedding, one of the state-of-the-art
-English word embeddings:
-
-.. code:: python
-
- >>> glove = nlp.embedding.create('glove', source='glove.6B.50d')
- # Obtain vectors for 'baby' in the GloVe word embedding
- >>> type(glove['baby'])
-
- >>> glove['baby'].shape
- (50,)
-
-
-Reference Paper
-===============
-
-The bibtex entry for the `reference paper `__ of GluonNLP is:
-
-.. code::
-
- @article{gluoncvnlp2020,
- author = {Jian Guo and He He and Tong He and Leonard Lausen and Mu Li and Haibin Lin and Xingjian Shi and Chenguang Wang and Junyuan Xie and Sheng Zha and Aston Zhang and Hang Zhang and Zhi Zhang and Zhongyue Zhang and Shuai Zheng and Yi Zhu},
- title = {GluonCV and GluonNLP: Deep Learning in Computer Vision and Natural Language Processing},
- journal = {Journal of Machine Learning Research},
- year = {2020},
- volume = {21},
- number = {23},
- pages = {1-7},
- url = {http://jmlr.org/papers/v21/19-429.html}
- }
-
-
-New to Deep Learning or NLP?
-============================
-
-For background knowledge of deep learning or NLP, please refer to the open source book `Dive into Deep Learning `__.
diff --git a/ci/batch/docker/Dockerfile b/ci/batch/docker/Dockerfile
deleted file mode 100644
index 8cc64125b5..0000000000
--- a/ci/batch/docker/Dockerfile
+++ /dev/null
@@ -1,27 +0,0 @@
-FROM nvidia/cuda:10.1-cudnn7-devel-ubuntu18.04
-
- RUN apt-get update && apt-get install -y --no-install-recommends \
- build-essential \
- locales \
- cmake \
- git \
- curl \
- vim \
- unzip \
- sudo \
- ca-certificates \
- libjpeg-dev \
- libpng-dev \
- libfreetype6-dev \
- libxft-dev &&\
- rm -rf /var/lib/apt/lists/*
-
- RUN curl -o ~/miniconda.sh -O https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh && \
- chmod +x ~/miniconda.sh && \
- ~/miniconda.sh -b -p /opt/conda && \
- rm ~/miniconda.sh && \
- /opt/conda/bin/conda clean -ya
- ENV PATH /opt/conda/bin:$PATH
- RUN git clone https://github.com/dmlc/gluon-nlp
- WORKDIR gluon-nlp
- ADD gluon_nlp_job.sh .
diff --git a/ci/batch/docker/gluon_nlp_job.sh b/ci/batch/docker/gluon_nlp_job.sh
deleted file mode 100755
index 38be81db67..0000000000
--- a/ci/batch/docker/gluon_nlp_job.sh
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/bin/bash
-date
-echo "Args: $@"
-env
-echo "jobId: $AWS_BATCH_JOB_ID"
-echo "jobQueue: $AWS_BATCH_JQ_NAME"
-echo "computeEnvironment: $AWS_BATCH_CE_NAME"
-
-SOURCE_REF=$1
-CONDA_ENV=$2
-WORK_DIR=$3
-COMMAND=$4
-SAVED_OUTPUT=$5
-SAVE_PATH=$6
-REMOTE=$7
-
-if [ ! -z $REMOTE ]; then
- git remote set-url origin $REMOTE
-fi;
-
-git fetch origin $SOURCE_REF:working
-git checkout working
-conda env update --prune -p conda/$CONDA_ENV -f env/$CONDA_ENV.yml
-source activate ./conda/$CONDA_ENV
-pip install -v -e .
-python -m spacy download en
-python -m spacy download de
-python -m nltk.downloader all
-pip install awscli
-
-cd $WORK_DIR
-/bin/bash -o pipefail -c "$COMMAND"
-COMMAND_EXIT_CODE=$?
-if [[ -f $SAVED_OUTPUT ]]; then
- aws s3 cp $SAVED_OUTPUT s3://gluon-nlp-staging/$SAVE_PATH;
-elif [[ -d $SAVED_OUTPUT ]]; then
- aws s3 cp --recursive $SAVED_OUTPUT s3://gluon-nlp-staging/$SAVE_PATH;
-fi;
-exit $COMMAND_EXIT_CODE
diff --git a/ci/batch/submit-job.py b/ci/batch/submit-job.py
deleted file mode 100644
index ec99e44f47..0000000000
--- a/ci/batch/submit-job.py
+++ /dev/null
@@ -1,154 +0,0 @@
-import argparse
-import random
-import re
-import sys
-import time
-from datetime import datetime
-
-import boto3
-from botocore.compat import total_seconds
-
-parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
-
-parser.add_argument('--profile', help='profile name of aws account.', type=str,
- default=None)
-parser.add_argument('--region', help='Default region when creating new connections', type=str,
- default=None)
-parser.add_argument('--name', help='name of the job', type=str, default='dummy')
-parser.add_argument('--job-queue', help='name of the job queue to submit this job', type=str,
- default='gluon-nlp-jobs')
-parser.add_argument('--job-definition', help='name of the job job definition', type=str,
- default='gluon-nlp-jobs:8')
-parser.add_argument('--source-ref',
- help='ref in GluonNLP main github. e.g. master, refs/pull/500/head',
- type=str, default='master')
-parser.add_argument('--work-dir',
- help='working directory inside the repo. e.g. scripts/sentiment_analysis',
- type=str, default='scripts/bert')
-parser.add_argument('--saved-output',
- help='output to be saved, relative to working directory. '
- 'it can be either a single file or a directory',
- type=str, default='.')
-parser.add_argument('--save-path',
- help='s3 path where files are saved.',
- type=str, default='batch/temp/{}'.format(datetime.now().isoformat()))
-parser.add_argument('--conda-env',
- help='conda environment preset to use.',
- type=str, default='gpu/py3')
-parser.add_argument('--command', help='command to run', type=str,
- default='git rev-parse HEAD | tee stdout.log')
-parser.add_argument('--remote',
- help='git repo address. https://github.com/dmlc/gluon-nlp',
- type=str, default="https://github.com/dmlc/gluon-nlp")
-parser.add_argument('--wait', help='block wait until the job completes. '
- 'Non-zero exit code if job fails.', action='store_true')
-parser.add_argument('--timeout', help='job timeout in seconds', default=None, type=int)
-
-args = parser.parse_args()
-
-session = boto3.Session(profile_name=args.profile, region_name=args.region)
-batch, cloudwatch = [session.client(service_name=sn) for sn in ['batch', 'logs']]
-
-def printLogs(logGroupName, logStreamName, startTime):
- kwargs = {'logGroupName': logGroupName,
- 'logStreamName': logStreamName,
- 'startTime': startTime,
- 'startFromHead': True}
-
- lastTimestamp = 0
- while True:
- logEvents = cloudwatch.get_log_events(**kwargs)
-
- for event in logEvents['events']:
- lastTimestamp = event['timestamp']
- timestamp = datetime.utcfromtimestamp(lastTimestamp / 1000.0).isoformat()
- print('[{}] {}'.format((timestamp + '.000')[:23] + 'Z', event['message']))
-
- nextToken = logEvents['nextForwardToken']
- if nextToken and kwargs.get('nextToken') != nextToken:
- kwargs['nextToken'] = nextToken
- else:
- break
- return lastTimestamp
-
-
-def getLogStream(logGroupName, jobName, jobId):
- response = cloudwatch.describe_log_streams(
- logGroupName=logGroupName,
- logStreamNamePrefix=jobName + '/' + jobId
- )
- logStreams = response['logStreams']
- if not logStreams:
- return ''
- else:
- return logStreams[0]['logStreamName']
-
-def nowInMillis():
- endTime = long(total_seconds(datetime.utcnow() - datetime(1970, 1, 1))) * 1000
- return endTime
-
-
-def main():
- spin = ['-', '/', '|', '\\', '-', '/', '|', '\\']
- logGroupName = '/aws/batch/job'
-
- jobName = re.sub('[^A-Za-z0-9_\-]', '', args.name)[:128] # Enforce AWS Batch jobName rules
- jobQueue = args.job_queue
- jobDefinition = args.job_definition
- command = args.command.split()
- wait = args.wait
-
- parameters={
- 'SOURCE_REF': args.source_ref,
- 'WORK_DIR': args.work_dir,
- 'SAVED_OUTPUT': args.saved_output,
- 'SAVE_PATH': args.save_path,
- 'CONDA_ENV': args.conda_env,
- 'COMMAND': args.command,
- 'REMOTE': args.remote
- }
- kwargs = dict(
- jobName=jobName,
- jobQueue=jobQueue,
- jobDefinition=jobDefinition,
- parameters=parameters,
- )
- if args.timeout is not None:
- kwargs['timeout'] = {'attemptDurationSeconds': args.timeout}
- submitJobResponse = batch.submit_job(**kwargs)
-
- jobId = submitJobResponse['jobId']
- print('Submitted job [{} - {}] to the job queue [{}]'.format(jobName, jobId, jobQueue))
-
- spinner = 0
- running = False
- status_set = set()
- startTime = 0
-
- while wait:
- time.sleep(random.randint(5, 10))
- describeJobsResponse = batch.describe_jobs(jobs=[jobId])
- status = describeJobsResponse['jobs'][0]['status']
- if status == 'SUCCEEDED' or status == 'FAILED':
- print('=' * 80)
- print('Job [{} - {}] {}'.format(jobName, jobId, status))
-
- sys.exit(status == 'FAILED')
-
- elif status == 'RUNNING':
- logStreamName = getLogStream(logGroupName, jobName, jobId)
- if not running:
- running = True
- print('\rJob [{} - {}] is RUNNING.'.format(jobName, jobId))
- if logStreamName:
- print('Output [{}]:\n {}'.format(logStreamName, '=' * 80))
- if logStreamName:
- startTime = printLogs(logGroupName, logStreamName, startTime) + 1
- elif status not in status_set:
- status_set.add(status)
- print('\rJob [%s - %s] is %-9s... %s' % (jobName, jobId, status, spin[spinner % len(spin)]),)
- sys.stdout.flush()
- spinner += 1
-
-if __name__ == '__main__':
- main()
diff --git a/ci/batch/wait-job.py b/ci/batch/wait-job.py
deleted file mode 100644
index 87d8679255..0000000000
--- a/ci/batch/wait-job.py
+++ /dev/null
@@ -1,93 +0,0 @@
-import argparse
-from datetime import datetime
-import sys
-import time
-
-import boto3
-from botocore.compat import total_seconds
-
-parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
-
-parser.add_argument('--profile', help='profile name of aws account.', type=str,
- default=None)
-parser.add_argument('--job-id', help='job id to check status and wait.', type=str,
- default=None)
-
-args = parser.parse_args()
-
-session = boto3.Session(profile_name=args.profile)
-batch, cloudwatch = [session.client(service_name=sn) for sn in ['batch', 'logs']]
-
-def printLogs(logGroupName, logStreamName, startTime):
- kwargs = {'logGroupName': logGroupName,
- 'logStreamName': logStreamName,
- 'startTime': startTime,
- 'startFromHead': True}
-
- lastTimestamp = 0
- while True:
- logEvents = cloudwatch.get_log_events(**kwargs)
-
- for event in logEvents['events']:
- lastTimestamp = event['timestamp']
- timestamp = datetime.utcfromtimestamp(lastTimestamp / 1000.0).isoformat()
- print('[{}] {}'.format((timestamp + '.000')[:23] + 'Z', event['message']))
-
- nextToken = logEvents['nextForwardToken']
- if nextToken and kwargs.get('nextToken') != nextToken:
- kwargs['nextToken'] = nextToken
- else:
- break
- return lastTimestamp
-
-
-def getLogStream(logGroupName, jobName, jobId):
- response = cloudwatch.describe_log_streams(
- logGroupName=logGroupName,
- logStreamNamePrefix=jobName + '/' + jobId
- )
- logStreams = response['logStreams']
- if not logStreams:
- return ''
- else:
- return logStreams[0]['logStreamName']
-
-def nowInMillis():
- endTime = long(total_seconds(datetime.utcnow() - datetime(1970, 1, 1))) * 1000
- return endTime
-
-
-def main():
- spin = ['-', '/', '|', '\\', '-', '/', '|', '\\']
- logGroupName = '/aws/batch/job'
-
- jobId = args.job_id
-
- spinner = 0
- running = False
- startTime = 0
-
- while True:
- time.sleep(1)
- describeJobsResponse = batch.describe_jobs(jobs=[jobId])
- job = describeJobsResponse['jobs'][0]
- status, jobName = job['status'], job['jobName']
- if status == 'SUCCEEDED' or status == 'FAILED':
- print('=' * 80)
- print('Job [{} - {}] {}'.format(jobName, jobId, status))
- break
- elif status == 'RUNNING':
- logStreamName = getLogStream(logGroupName, jobName, jobId)
- if not running and logStreamName:
- running = True
- print('\rJob [{} - {}] is RUNNING.'.format(jobName, jobId))
- print('Output [{}]:\n {}'.format(logStreamName, '=' * 80))
- if logStreamName:
- startTime = printLogs(logGroupName, logStreamName, startTime) + 1
- else:
- print('\rJob [%s - %s] is %-9s... %s' % (jobName, jobId, status, spin[spinner % len(spin)]),)
- sys.stdout.flush()
- spinner += 1
-
-if __name__ == '__main__':
- main()
diff --git a/ci/codecov.sh b/ci/codecov.sh
deleted file mode 100755
index 1ef332b1b3..0000000000
--- a/ci/codecov.sh
+++ /dev/null
@@ -1,1550 +0,0 @@
-#!/usr/bin/env bash
-
-# Apache License Version 2.0, January 2004
-# https://github.com/codecov/codecov-bash/blob/master/LICENSE
-
-
-set -e +o pipefail
-
-VERSION="0b37652"
-
-url="https://codecov.io"
-env="$CODECOV_ENV"
-service=""
-token=""
-search_in=""
-flags=""
-exit_with=0
-curlargs=""
-curlawsargs=""
-dump="0"
-clean="0"
-curl_s="-s"
-name="$CODECOV_NAME"
-include_cov=""
-exclude_cov=""
-ddp="$(echo ~)/Library/Developer/Xcode/DerivedData"
-xp=""
-files=""
-cacert="$CODECOV_CA_BUNDLE"
-gcov_ignore="-not -path './bower_components/**' -not -path './node_modules/**' -not -path './vendor/**'"
-gcov_include=""
-
-ft_gcov="1"
-ft_coveragepy="1"
-ft_fix="1"
-ft_search="1"
-ft_s3="1"
-ft_network="1"
-ft_xcodellvm="1"
-ft_xcodeplist="0"
-
-_git_root=$(git rev-parse --show-toplevel 2>/dev/null || hg root 2>/dev/null || echo $PWD)
-git_root="$_git_root"
-codecov_yml=""
-remote_addr=""
-if [ "$git_root" = "$PWD" ];
-then
- git_root="."
-fi
-
-url_o=""
-pr_o=""
-build_o=""
-commit_o=""
-search_in_o=""
-tag_o=""
-branch_o=""
-slug_o=""
-prefix_o=""
-
-commit="$VCS_COMMIT_ID"
-branch="$VCS_BRANCH_NAME"
-pr="$VCS_PULL_REQUEST"
-slug="$VCS_SLUG"
-tag="$VCS_TAG"
-build_url="$CI_BUILD_URL"
-build="$CI_BUILD_ID"
-job="$CI_JOB_ID"
-
-beta_xcode_partials=""
-
-proj_root="$git_root"
-gcov_exe="gcov"
-gcov_arg=""
-
-b="\033[0;36m"
-g="\033[0;32m"
-r="\033[0;31m"
-e="\033[0;90m"
-x="\033[0m"
-
-show_help() {
-cat << EOF
-
- Codecov Bash $VERSION
-
- Global report uploading tool for Codecov
- Documentation at https://docs.codecov.io/docs
- Contribute at https://github.com/codecov/codecov-bash
-
-
- -h Display this help and exit
- -f FILE Target file(s) to upload
-
- -f "path/to/file" only upload this file
- skips searching unless provided patterns below
-
- -f '!*.bar' ignore all files at pattern *.bar
- -f '*.foo' include all files at pattern *.foo
- Must use single quotes.
- This is non-exclusive, use -s "*.foo" to match specific paths.
-
- -s DIR Directory to search for coverage reports.
- Already searches project root and artifact folders.
- -t TOKEN Set the private repository token
- (option) set environment variable CODECOV_TOKEN=:uuid
-
- -t @/path/to/token_file
- -t uuid
-
- -n NAME Custom defined name of the upload. Visible in Codecov UI
-
- -e ENV Specify environment variables to be included with this build
- Also accepting environment variables: CODECOV_ENV=VAR,VAR2
-
- -e VAR,VAR2
-
- -X feature Toggle functionalities
-
- -X gcov Disable gcov
- -X coveragepy Disable python coverage
- -X fix Disable report fixing
- -X search Disable searching for reports
- -X xcode Disable xcode processing
- -X network Disable uploading the file network
-
- -R root dir Used when not in git/hg project to identify project root directory
- -y conf file Used to specify the location of the .codecov.yml config file
- -F flag Flag the upload to group coverage metrics
-
- -F unittests This upload is only unittests
- -F integration This upload is only integration tests
- -F ui,chrome This upload is Chrome - UI tests
-
- -c Move discovered coverage reports to the trash
- -Z Exit with 1 if not successful. Default will Exit with 0
-
- -- xcode --
- -D Custom Derived Data Path for Coverage.profdata and gcov processing
- Default '~/Library/Developer/Xcode/DerivedData'
- -J Specify packages to build coverage.
- This can significantly reduces time to build coverage reports.
-
- -J 'MyAppName' Will match "MyAppName" and "MyAppNameTests"
- -J '^ExampleApp$' Will match only "ExampleApp" not "ExampleAppTests"
-
- -- gcov --
- -g GLOB Paths to ignore during gcov gathering
- -G GLOB Paths to include during gcov gathering
- -p dir Project root directory
- Also used when preparing gcov
- -k prefix Prefix filepaths to help resolve path fixing: https://github.com/codecov/support/issues/472
- -x gcovexe gcov executable to run. Defaults to 'gcov'
- -a gcovargs extra arguments to pass to gcov
-
- -- Override CI Environment Variables --
- These variables are automatically detected by popular CI providers
-
- -B branch Specify the branch name
- -C sha Specify the commit sha
- -P pr Specify the pull request number
- -b build Specify the build number
- -T tag Specify the git tag
-
- -- Enterprise --
- -u URL Set the target url for Enterprise customers
- Not required when retrieving the bash uploader from your CCE
- (option) Set environment variable CODECOV_URL=https://my-hosted-codecov.com
- -r SLUG owner/repo slug used instead of the private repo token in Enterprise
- (option) set environment variable CODECOV_SLUG=:owner/:repo
- (option) set in your codecov.yml "codecov.slug"
- -S PATH File path to your cacert.pem file used to verify ssl with Codecov Enterprise (optional)
- (option) Set environment variable: CODECOV_CA_BUNDLE="/path/to/ca.pem"
- -U curlargs Extra curl arguments to communicate with Codecov. e.g., -U "--proxy http://http-proxy"
- -A curlargs Extra curl arguments to communicate with AWS.
-
- -- Debugging --
- -d Don't upload, but dump upload file to stdout
- -K Remove color from the output
- -v Verbose mode
-
-EOF
-}
-
-
-say() {
- echo -e "$1"
-}
-
-
-urlencode() {
- echo "$1" | curl -Gso /dev/null -w %{url_effective} --data-urlencode @- "" | cut -c 3- | sed -e 's/%0A//'
-}
-
-
-swiftcov() {
- _dir=$(dirname "$1" | sed 's/\(Build\).*/\1/g')
- for _type in app framework xctest
- do
- find "$_dir" -name "*.$_type" | while read f
- do
- _proj=${f##*/}
- _proj=${_proj%."$_type"}
- if [ "$2" = "" ] || [ "$(echo "$_proj" | grep -i "$2")" != "" ];
- then
- say " $g+$x Building reports for $_proj $_type"
- dest=$([ -f "$f/$_proj" ] && echo "$f/$_proj" || echo "$f/Contents/MacOS/$_proj")
- _proj_name=$(echo "$_proj" | sed -e 's/[[:space:]]//g')
- xcrun llvm-cov show $beta_xcode_partials -instr-profile "$1" "$dest" > "$_proj_name.$_type.coverage.txt" \
- || say " ${r}x>${x} llvm-cov failed to produce results for $dest"
- fi
- done
- done
-}
-
-
-# Credits to: https://gist.github.com/pkuczynski/8665367
-parse_yaml() {
- local prefix=$2
- local s='[[:space:]]*' w='[a-zA-Z0-9_]*' fs=$(echo @|tr @ '\034')
- sed -ne "s|^\($s\)\($w\)$s:$s\"\(.*\)\"$s\$|\1$fs\2$fs\3|p" \
- -e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $1 |
- awk -F$fs '{
- indent = length($1)/2;
- vname[indent] = $2;
- for (i in vname) {if (i > indent) {delete vname[i]}}
- if (length($3) > 0) {
- vn=""; if (indent > 0) {vn=(vn)(vname[0])("_")}
- printf("%s%s%s=\"%s\"\n", "'$prefix'",vn, $2, $3);
- }
- }'
-}
-
-
-if [ $# != 0 ];
-then
- while getopts "a:A:b:B:cC:dD:e:f:F:g:G:hJ:k:Kn:p:P:r:R:y:s:S:t:T:u:U:vx:X:Z" o
- do
- case "$o" in
- "a")
- gcov_arg=$OPTARG
- ;;
- "A")
- curlawsargs="$OPTARG"
- ;;
- "b")
- build_o="$OPTARG"
- ;;
- "B")
- branch_o="$OPTARG"
- ;;
- "c")
- clean="1"
- ;;
- "C")
- commit_o="$OPTARG"
- ;;
- "d")
- dump="1"
- ;;
- "D")
- ddp="$OPTARG"
- ;;
- "e")
- env="$env,$OPTARG"
- ;;
- "f")
- if [ "${OPTARG::1}" = "!" ];
- then
- exclude_cov="$exclude_cov -not -path '${OPTARG:1}'"
-
- elif [[ "$OPTARG" = *"*"* ]];
- then
- include_cov="$include_cov -or -name '$OPTARG'"
-
- else
- ft_search=0
- if [ "$files" = "" ];
- then
- files="$OPTARG"
- else
- files="$files
-$OPTARG"
- fi
- fi
- ;;
- "F")
- if [ "$flags" = "" ];
- then
- flags="$OPTARG"
- else
- flags="$flags,$OPTARG"
- fi
- ;;
- "g")
- gcov_ignore="$gcov_ignore -not -path '$OPTARG'"
- ;;
- "G")
- gcov_include="$gcov_include -path '$OPTARG'"
- ;;
- "h")
- show_help
- exit 0;
- ;;
- "J")
- ft_xcodellvm="1"
- ft_xcodeplist="0"
- if [ "$xp" = "" ];
- then
- xp="$OPTARG"
- else
- xp="$xp\|$OPTARG"
- fi
- ;;
- "k")
- prefix_o=$(echo "$OPTARG" | sed -e 's:^/*::' -e 's:/*$::')
- ;;
- "K")
- b=""
- g=""
- r=""
- e=""
- x=""
- ;;
- "n")
- name="$OPTARG"
- ;;
- "p")
- proj_root="$OPTARG"
- ;;
- "P")
- pr_o="$OPTARG"
- ;;
- "r")
- slug_o="$OPTARG"
- ;;
- "R")
- git_root="$OPTARG"
- ;;
- "s")
- if [ "$search_in_o" = "" ];
- then
- search_in_o="$OPTARG"
- else
- search_in_o="$search_in_o $OPTARG"
- fi
- ;;
- "S")
- cacert="--cacert \"$OPTARG\""
- ;;
- "t")
- if [ "${OPTARG::1}" = "@" ];
- then
- token=$(cat "${OPTARG:1}" | tr -d ' \n')
- else
- token="$OPTARG"
- fi
- ;;
- "T")
- tag_o="$OPTARG"
- ;;
- "u")
- url_o=$(echo "$OPTARG" | sed -e 's/\/$//')
- ;;
- "U")
- curlargs="$OPTARG"
- ;;
- "v")
- set -x
- curl_s=""
- ;;
- "x")
- gcov_exe=$OPTARG
- ;;
- "X")
- if [ "$OPTARG" = "gcov" ];
- then
- ft_gcov="0"
- elif [ "$OPTARG" = "coveragepy" ] || [ "$OPTARG" = "py" ];
- then
- ft_coveragepy="0"
- elif [ "$OPTARG" = "xcodellvm" ];
- then
- ft_xcodellvm="1"
- ft_xcodeplist="0"
- elif [ "$OPTARG" = "fix" ] || [ "$OPTARG" = "fixes" ];
- then
- ft_fix="0"
- elif [ "$OPTARG" = "xcode" ];
- then
- ft_xcodellvm="0"
- ft_xcodeplist="0"
- elif [ "$OPTARG" = "search" ];
- then
- ft_search="0"
- elif [ "$OPTARG" = "xcodepartials" ];
- then
- beta_xcode_partials="-use-color"
- elif [ "$OPTARG" = "network" ];
- then
- ft_network="0"
- elif [ "$OPTARG" = "s3" ];
- then
- ft_s3="0"
- fi
- ;;
- "y")
- codecov_yml="$OPTARG"
- ;;
- "Z")
- exit_with=1
- ;;
- esac
- done
-fi
-
-say "
- _____ _
- / ____| | |
-| | ___ __| | ___ ___ _____ __
-| | / _ \\ / _\` |/ _ \\/ __/ _ \\ \\ / /
-| |___| (_) | (_| | __/ (_| (_) \\ V /
- \\_____\\___/ \\__,_|\\___|\\___\\___/ \\_/
- Bash-$VERSION
-
-"
-
-search_in="$proj_root"
-
-if [ "$JENKINS_URL" != "" ];
-then
- say "$e==>$x Jenkins CI detected."
- # https://wiki.jenkins-ci.org/display/JENKINS/Building+a+software+project
- # https://wiki.jenkins-ci.org/display/JENKINS/GitHub+pull+request+builder+plugin#GitHubpullrequestbuilderplugin-EnvironmentVariables
- service="jenkins"
-
- if [ "$ghprbSourceBranch" != "" ];
- then
- branch="$ghprbSourceBranch"
- elif [ "$GIT_BRANCH" != "" ];
- then
- branch="$GIT_BRANCH"
- elif [ "$BRANCH_NAME" != "" ];
- then
- branch="$BRANCH_NAME"
- fi
-
- if [ "$ghprbActualCommit" != "" ];
- then
- commit="$ghprbActualCommit"
- elif [ "$GIT_COMMIT" != "" ];
- then
- commit="$GIT_COMMIT"
- fi
-
- if [ "$ghprbPullId" != "" ];
- then
- pr="$ghprbPullId"
- elif [ "$CHANGE_ID" != "" ];
- then
- pr="$CHANGE_ID"
- fi
-
- build="$BUILD_NUMBER"
- build_url=$(urlencode "$BUILD_URL")
-
-elif [ "$CI" = "true" ] && [ "$TRAVIS" = "true" ] && [ "$SHIPPABLE" != "true" ];
-then
- say "$e==>$x Travis CI detected."
- # https://docs.travis-ci.com/user/environment-variables/
- service="travis"
- commit="${TRAVIS_PULL_REQUEST_SHA:-$TRAVIS_COMMIT}"
- build="$TRAVIS_JOB_NUMBER"
- pr="$TRAVIS_PULL_REQUEST"
- job="$TRAVIS_JOB_ID"
- slug="$TRAVIS_REPO_SLUG"
- env="$env,TRAVIS_OS_NAME"
- tag="$TRAVIS_TAG"
- if [ "$TRAVIS_BRANCH" != "$TRAVIS_TAG" ];
- then
- branch="$TRAVIS_BRANCH"
- fi
-
- language=$(printenv | grep "TRAVIS_.*_VERSION" | head -1)
- if [ "$language" != "" ];
- then
- env="$env,${language%=*}"
- fi
-
-elif [ "$DOCKER_REPO" != "" ];
-then
- say "$e==>$x Docker detected."
- # https://docs.docker.com/docker-cloud/builds/advanced/
- service="docker"
- branch="$SOURCE_BRANCH"
- commit="$SOURCE_COMMIT"
- slug="$DOCKER_REPO"
- tag="$CACHE_TAG"
- env="$env,IMAGE_NAME"
-
-elif [ "$CI" = "true" ] && [ "$CI_NAME" = "codeship" ];
-then
- say "$e==>$x Codeship CI detected."
- # https://www.codeship.io/documentation/continuous-integration/set-environment-variables/
- service="codeship"
- branch="$CI_BRANCH"
- build="$CI_BUILD_NUMBER"
- build_url=$(urlencode "$CI_BUILD_URL")
- commit="$CI_COMMIT_ID"
-
-elif [ ! -z "$CF_BUILD_URL" ] && [ ! -z "$CF_BUILD_ID" ];
-then
- say "$e==>$x Codefresh CI detected."
- # https://docs.codefresh.io/v1.0/docs/variables
- service="codefresh"
- branch="$CF_BRANCH"
- build="$CF_BUILD_ID"
- build_url=$(urlencode "$CF_BUILD_URL")
- commit="$CF_REVISION"
-
-elif [ "$TEAMCITY_VERSION" != "" ];
-then
- say "$e==>$x TeamCity CI detected."
- # https://confluence.jetbrains.com/display/TCD8/Predefined+Build+Parameters
- # https://confluence.jetbrains.com/plugins/servlet/mobile#content/view/74847298
- if [ "$TEAMCITY_BUILD_BRANCH" = '' ];
- then
- echo " Teamcity does not automatically make build parameters available as environment variables."
- echo " Add the following environment parameters to the build configuration"
- echo " env.TEAMCITY_BUILD_BRANCH = %teamcity.build.branch%"
- echo " env.TEAMCITY_BUILD_ID = %teamcity.build.id%"
- echo " env.TEAMCITY_BUILD_URL = %teamcity.serverUrl%/viewLog.html?buildId=%teamcity.build.id%"
- echo " env.TEAMCITY_BUILD_COMMIT = %system.build.vcs.number%"
- echo " env.TEAMCITY_BUILD_REPOSITORY = %vcsroot..url%"
- fi
- service="teamcity"
- branch="$TEAMCITY_BUILD_BRANCH"
- build="$TEAMCITY_BUILD_ID"
- build_url=$(urlencode "$TEAMCITY_BUILD_URL")
- if [ "$TEAMCITY_BUILD_COMMIT" != "" ];
- then
- commit="$TEAMCITY_BUILD_COMMIT"
- else
- commit="$BUILD_VCS_NUMBER"
- fi
- remote_addr="$TEAMCITY_BUILD_REPOSITORY"
-
-elif [ "$CI" = "true" ] && [ "$CIRCLECI" = "true" ];
-then
- say "$e==>$x Circle CI detected."
- # https://circleci.com/docs/environment-variables
- service="circleci"
- branch="$CIRCLE_BRANCH"
- build="$CIRCLE_BUILD_NUM"
- job="$CIRCLE_NODE_INDEX"
- if [ "$CIRCLE_PROJECT_REPONAME" != "" ];
- then
- slug="$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME"
- else
- # git@github.com:owner/repo.git
- slug="${CIRCLE_REPOSITORY_URL##*:}"
- # owner/repo.git
- slug="${slug%%.git}"
- fi
- pr="$CIRCLE_PR_NUMBER"
- commit="$CIRCLE_SHA1"
- search_in="$search_in $CIRCLE_ARTIFACTS $CIRCLE_TEST_REPORTS"
-
-elif [ "$BUDDYBUILD_BRANCH" != "" ];
-then
- say "$e==>$x buddybuild detected"
- # http://docs.buddybuild.com/v6/docs/custom-prebuild-and-postbuild-steps
- service="buddybuild"
- branch="$BUDDYBUILD_BRANCH"
- build="$BUDDYBUILD_BUILD_NUMBER"
- build_url="https://dashboard.buddybuild.com/public/apps/$BUDDYBUILD_APP_ID/build/$BUDDYBUILD_BUILD_ID"
- # BUDDYBUILD_TRIGGERED_BY
- if [ "$ddp" = "$(echo ~)/Library/Developer/Xcode/DerivedData" ];
- then
- ddp="/private/tmp/sandbox/${BUDDYBUILD_APP_ID}/bbtest"
- fi
-
-elif [ "${bamboo_planRepository_revision}" != "" ];
-then
- say "$e==>$x Bamboo detected"
- # https://confluence.atlassian.com/bamboo/bamboo-variables-289277087.html#Bamboovariables-Build-specificvariables
- service="bamboo"
- commit="${bamboo_planRepository_revision}"
- branch="${bamboo_planRepository_branch}"
- build="${bamboo_buildNumber}"
- build_url="${bamboo_buildResultsUrl}"
- remote_addr="${bamboo_planRepository_repositoryUrl}"
-
-elif [ "$CI" = "true" ] && [ "$BITRISE_IO" = "true" ];
-then
- # http://devcenter.bitrise.io/faq/available-environment-variables/
- say "$e==>$x Bitrise CI detected."
- service="bitrise"
- branch="$BITRISE_GIT_BRANCH"
- build="$BITRISE_BUILD_NUMBER"
- build_url=$(urlencode "$BITRISE_BUILD_URL")
- pr="$BITRISE_PULL_REQUEST"
- if [ "$GIT_CLONE_COMMIT_HASH" != "" ];
- then
- commit="$GIT_CLONE_COMMIT_HASH"
- fi
-
-elif [ "$CI" = "true" ] && [ "$SEMAPHORE" = "true" ];
-then
- say "$e==>$x Semaphore CI detected."
- # https://semaphoreapp.com/docs/available-environment-variables.html
- service="semaphore"
- branch="$BRANCH_NAME"
- build="$SEMAPHORE_BUILD_NUMBER"
- job="$SEMAPHORE_CURRENT_THREAD"
- pr="$PULL_REQUEST_NUMBER"
- slug="$SEMAPHORE_REPO_SLUG"
- commit="$REVISION"
- env="$env,SEMAPHORE_TRIGGER_SOURCE"
-
-elif [ "$CI" = "true" ] && [ "$BUILDKITE" = "true" ];
-then
- say "$e==>$x Buildkite CI detected."
- # https://buildkite.com/docs/guides/environment-variables
- service="buildkite"
- branch="$BUILDKITE_BRANCH"
- build="$BUILDKITE_BUILD_NUMBER"
- job="$BUILDKITE_JOB_ID"
- build_url=$(urlencode "$BUILDKITE_BUILD_URL")
- slug="$BUILDKITE_PROJECT_SLUG"
- commit="$BUILDKITE_COMMIT"
- if [[ "$BUILDKITE_PULL_REQUEST" != "false" ]]; then
- pr="$BUILDKITE_PULL_REQUEST"
- fi
- tag="$BUILDKITE_TAG"
-
-elif [ "$CI" = "drone" ] || [ "$DRONE" = "true" ];
-then
- say "$e==>$x Drone CI detected."
- # http://docs.drone.io/env.html
- # drone commits are not full shas
- service="drone.io"
- branch="$DRONE_BRANCH"
- build="$DRONE_BUILD_NUMBER"
- build_url=$(urlencode "${DRONE_BUILD_LINK}")
- pr="$DRONE_PULL_REQUEST"
- job="$DRONE_JOB_NUMBER"
- tag="$DRONE_TAG"
-
-elif [ "$HEROKU_TEST_RUN_BRANCH" != "" ];
-then
- say "$e==>$x Heroku CI detected."
- # https://devcenter.heroku.com/articles/heroku-ci#environment-variables
- service="heroku"
- branch="$HEROKU_TEST_RUN_BRANCH"
- build="$HEROKU_TEST_RUN_ID"
-
-elif [ "$CI" = "True" ] && [ "$APPVEYOR" = "True" ];
-then
- say "$e==>$x Appveyor CI detected."
- # http://www.appveyor.com/docs/environment-variables
- service="appveyor"
- branch="$APPVEYOR_REPO_BRANCH"
- build=$(urlencode "$APPVEYOR_JOB_ID")
- pr="$APPVEYOR_PULL_REQUEST_NUMBER"
- job="$APPVEYOR_ACCOUNT_NAME%2F$APPVEYOR_PROJECT_SLUG%2F$APPVEYOR_BUILD_VERSION"
- slug="$APPVEYOR_REPO_NAME"
- commit="$APPVEYOR_REPO_COMMIT"
-
-elif [ "$CI" = "true" ] && [ "$WERCKER_GIT_BRANCH" != "" ];
-then
- say "$e==>$x Wercker CI detected."
- # http://devcenter.wercker.com/articles/steps/variables.html
- service="wercker"
- branch="$WERCKER_GIT_BRANCH"
- build="$WERCKER_MAIN_PIPELINE_STARTED"
- slug="$WERCKER_GIT_OWNER/$WERCKER_GIT_REPOSITORY"
- commit="$WERCKER_GIT_COMMIT"
-
-elif [ "$CI" = "true" ] && [ "$MAGNUM" = "true" ];
-then
- say "$e==>$x Magnum CI detected."
- # https://magnum-ci.com/docs/environment
- service="magnum"
- branch="$CI_BRANCH"
- build="$CI_BUILD_NUMBER"
- commit="$CI_COMMIT"
-
-elif [ "$SHIPPABLE" = "true" ];
-then
- say "$e==>$x Shippable CI detected."
- # http://docs.shippable.com/ci_configure/
- service="shippable"
- branch=$([ "$HEAD_BRANCH" != "" ] && echo "$HEAD_BRANCH" || echo "$BRANCH")
- build="$BUILD_NUMBER"
- build_url=$(urlencode "$BUILD_URL")
- pr="$PULL_REQUEST"
- slug="$REPO_FULL_NAME"
- commit="$COMMIT"
-
-elif [ "$TDDIUM" = "true" ];
-then
- say "Solano CI detected."
- # http://docs.solanolabs.com/Setup/tddium-set-environment-variables/
- service="solano"
- commit="$TDDIUM_CURRENT_COMMIT"
- branch="$TDDIUM_CURRENT_BRANCH"
- build="$TDDIUM_TID"
- pr="$TDDIUM_PR_ID"
-
-elif [ "$GREENHOUSE" = "true" ];
-then
- say "$e==>$x Greenhouse CI detected."
- # http://docs.greenhouseci.com/docs/environment-variables-files
- service="greenhouse"
- branch="$GREENHOUSE_BRANCH"
- build="$GREENHOUSE_BUILD_NUMBER"
- build_url=$(urlencode "$GREENHOUSE_BUILD_URL")
- pr="$GREENHOUSE_PULL_REQUEST"
- commit="$GREENHOUSE_COMMIT"
- search_in="$search_in $GREENHOUSE_EXPORT_DIR"
-
-elif [ "$GITLAB_CI" != "" ];
-then
- say "$e==>$x GitLab CI detected."
- # http://doc.gitlab.com/ce/ci/variables/README.html
- service="gitlab"
- branch="${CI_BUILD_REF_NAME:-$CI_COMMIT_REF_NAME}"
- build="${CI_BUILD_ID:-$CI_JOB_ID}"
- remote_addr="${CI_BUILD_REPO:-$CI_REPOSITORY_URL}"
- commit="${CI_BUILD_REF:-$CI_COMMIT_SHA}"
-
-else
- say "${r}x>${x} No CI provider detected."
- say " Testing inside Docker? ${b}http://docs.codecov.io/docs/testing-with-docker${x}"
- say " Testing with Tox? ${b}https://docs.codecov.io/docs/python#section-testing-with-tox${x}"
-
-fi
-
-say " ${e}project root:${x} $git_root"
-
-# find branch, commit, repo from git command
-if [ "$GIT_BRANCH" != "" ];
-then
- branch="$GIT_BRANCH"
-
-elif [ "$branch" = "" ];
-then
- branch=$(git rev-parse --abbrev-ref HEAD 2>/dev/null || hg branch 2>/dev/null || echo "")
- if [ "$branch" = "HEAD" ];
- then
- branch=""
- fi
-fi
-
-if [ "$commit_o" = "" ];
-then
- # merge commit -> actual commit
- mc=
- if [ -n "$pr" ] && [ "$pr" != false ];
- then
- mc=$(git show --no-patch --format="%P" 2>/dev/null || echo "")
- fi
- if [[ "$mc" =~ ^[a-z0-9]{40}[[:space:]][a-z0-9]{40}$ ]];
- then
- say " Fixing merge commit SHA"
- commit=$(echo "$mc" | cut -d' ' -f2)
- elif [ "$GIT_COMMIT" != "" ];
- then
- commit="$GIT_COMMIT"
- elif [ "$commit" = "" ];
- then
- commit=$(git log -1 --format="%H" 2>/dev/null || hg id -i --debug 2>/dev/null | tr -d '+' || echo "")
- fi
-else
- commit="$commit_o"
-fi
-
-if [ "$CODECOV_TOKEN" != "" ] && [ "$token" = "" ];
-then
- say "${e}-->${x} token set from env"
- token="$CODECOV_TOKEN"
-fi
-
-if [ "$CODECOV_URL" != "" ] && [ "$url_o" = "" ];
-then
- say "${e}-->${x} url set from env"
- url_o=$(echo "$CODECOV_URL" | sed -e 's/\/$//')
-fi
-
-if [ "$CODECOV_SLUG" != "" ];
-then
- say "${e}-->${x} slug set from env"
- slug_o="$CODECOV_SLUG"
-
-elif [ "$slug" = "" ];
-then
- if [ "$remote_addr" = "" ];
- then
- remote_addr=$(git config --get remote.origin.url || hg paths default || echo '')
- fi
- if [ "$remote_addr" != "" ];
- then
- if echo "$remote_addr" | grep -q "//"; then
- # https
- slug=$(echo "$remote_addr" | cut -d / -f 4,5 | sed -e 's/\.git$//')
- else
- # ssh
- slug=$(echo "$remote_addr" | cut -d : -f 2 | sed -e 's/\.git$//')
- fi
- fi
- if [ "$slug" = "/" ];
- then
- slug=""
- fi
-fi
-
-yaml=$(test -n "$codecov_yml" && echo "$codecov_yml" \
- || cd "$git_root" && \
- git ls-files "*codecov.yml" "*codecov.yaml" 2>/dev/null \
- || hg locate "*codecov.yml" "*codecov.yaml" 2>/dev/null \
- || cd $proj_root && find . -type f -name '*codecov.y*ml' -depth 1 2>/dev/null \
- || echo '')
-yaml=$(echo "$yaml" | head -1)
-
-if [ "$yaml" != "" ];
-then
- say " ${e}Yaml found at:${x} $yaml"
- config=$(parse_yaml "$git_root/$yaml" || echo '')
-
- # TODO validate the yaml here
-
- if [ "$(echo "$config" | grep 'codecov_token="')" != "" ] && [ "$token" = "" ];
- then
- say "${e}-->${x} token set from yaml"
- token="$(echo "$config" | grep 'codecov_token="' | sed -e 's/codecov_token="//' | sed -e 's/"\.*//')"
- fi
-
- if [ "$(echo "$config" | grep 'codecov_url="')" != "" ] && [ "$url_o" = "" ];
- then
- say "${e}-->${x} url set from yaml"
- url_o="$(echo "$config" | grep 'codecov_url="' | sed -e 's/codecov_url="//' | sed -e 's/"\.*//')"
- fi
-
- if [ "$(echo "$config" | grep 'codecov_slug="')" != "" ] && [ "$slug_o" = "" ];
- then
- say "${e}-->${x} slug set from yaml"
- slug_o="$(echo "$config" | grep 'codecov_slug="' | sed -e 's/codecov_slug="//' | sed -e 's/"\.*//')"
- fi
-else
- say " ${g}Yaml not found, that's ok! Learn more at${x} ${b}http://docs.codecov.io/docs/codecov-yaml${x}"
-
-fi
-
-if [ "$branch_o" != "" ];
-then
- branch=$(urlencode "$branch_o")
-else
- branch=$(urlencode "$branch")
-fi
-
-query="branch=$branch\
- &commit=$commit\
- &build=$([ "$build_o" = "" ] && echo "$build" || echo "$build_o")\
- &build_url=$build_url\
- &name=$(urlencode "$name")\
- &tag=$([ "$tag_o" = "" ] && echo "$tag" || echo "$tag_o")\
- &slug=$([ "$slug_o" = "" ] && urlencode "$slug" || urlencode "$slug_o")\
- &service=$service\
- &flags=$flags\
- &pr=$([ "$pr_o" = "" ] && echo "${pr##\#}" || echo "${pr_o##\#}")\
- &job=$job"
-
-if [ "$ft_search" = "1" ];
-then
- # detect bower comoponents location
- bower_components="bower_components"
- bower_rc=$(cd "$git_root" && cat .bowerrc 2>/dev/null || echo "")
- if [ "$bower_rc" != "" ];
- then
- bower_components=$(echo "$bower_rc" | tr -d '\n' | grep '"directory"' | cut -d'"' -f4 | sed -e 's/\/$//')
- if [ "$bower_components" = "" ];
- then
- bower_components="bower_components"
- fi
- fi
-
- # Swift Coverage
- if [ "$ft_xcodellvm" = "1" ] && [ -d "$ddp" ];
- then
- say "${e}==>${x} Processing Xcode reports via llvm-cov"
- say " DerivedData folder: $ddp"
- profdata_files=$(find "$ddp" -name '*.profdata' 2>/dev/null || echo '')
- if [ "$profdata_files" != "" ];
- then
- # xcode via profdata
- if [ "$xp" = "" ];
- then
- # xp=$(xcodebuild -showBuildSettings 2>/dev/null | grep -i "^\s*PRODUCT_NAME" | sed -e 's/.*= \(.*\)/\1/')
- # say " ${e}->${x} Speed up Xcode processing by adding ${e}-J '$xp'${x}"
- say " ${g}hint${x} Speed up Swift processing by using use ${g}-J 'AppName'${x} (regexp accepted)"
- say " ${g}hint${x} This will remove Pods/ from your report. Also ${b}https://docs.codecov.io/docs/ignoring-paths${x}"
- fi
- while read -r profdata;
- do
- if [ "$profdata" != "" ];
- then
- swiftcov "$profdata" "$xp"
- fi
- done <<< "$profdata_files"
- else
- say " ${e}->${x} No Swift coverage found"
- fi
-
- # Obj-C Gcov Coverage
- if [ "$ft_gcov" = "1" ];
- then
- say " ${e}->${x} Running $gcov_exe for Obj-C"
- bash -c "find $ddp -type f -name '*.gcda' $gcov_include $gcov_ignore -exec $gcov_exe -p $gcov_arg {} +" || true
- fi
- fi
-
- if [ "$ft_xcodeplist" = "1" ] && [ -d "$ddp" ];
- then
- say "${e}==>${x} Processing Xcode plists"
- plists_files=$(find "$ddp" -name '*.xccoverage' 2>/dev/null || echo '')
- if [ "$plists_files" != "" ];
- then
- while read -r plist;
- do
- if [ "$plist" != "" ];
- then
- say " ${g}Found${x} plist file at $plist"
- plutil -convert xml1 -o "$(basename "$plist").plist" -- $plist
- fi
- done <<< "$plists_files"
- fi
- fi
-
- # Gcov Coverage
- if [ "$ft_gcov" = "1" ];
- then
- say "${e}==>${x} Running gcov in $proj_root ${e}(disable via -X gcov)${x}"
- bash -c "find $proj_root -type f -name '*.gcno' $gcov_include $gcov_ignore -exec $gcov_exe -pb $gcov_arg {} +" || true
- else
- say "${e}==>${x} gcov disabled"
- fi
-
- # Python Coverage
- if [ "$ft_coveragepy" = "1" ];
- then
- if [ ! -f coverage.xml ];
- then
- if which coverage >/dev/null 2>&1;
- then
- say "${e}==>${x} Python coveragepy exists ${e}disable via -X coveragepy${x}"
-
- dotcoverage=$(find "$git_root" -name '.coverage' -or -name '.coverage.*' | head -1 || echo '')
- if [ "$dotcoverage" != "" ];
- then
- cd "$(dirname "$dotcoverage")"
- if [ ! -f .coverage ];
- then
- say " ${e}->${x} Running coverage combine"
- coverage combine -a
- fi
- say " ${e}->${x} Running coverage xml"
- if [ "$(coverage xml -i)" != "No data to report." ];
- then
- files="$files
-$PWD/coverage.xml"
- else
- say " ${r}No data to report.${x}"
- fi
- cd "$proj_root"
- else
- say " ${r}No .coverage file found.${x}"
- fi
- else
- say "${e}==>${x} Python coveragepy not found"
- fi
- fi
- else
- say "${e}==>${x} Python coveragepy disabled"
- fi
-
- if [ "$search_in_o" != "" ];
- then
- # location override
- search_in="$search_in_o"
- fi
-
- say "$e==>$x Searching for coverage reports in:"
- for _path in $search_in
- do
- say " ${g}+${x} $_path"
- done
-
- patterns="find $search_in \( \
- -name vendor \
- -or -name htmlcov \
- -or -name virtualenv \
- -or -name js/generated/coverage \
- -or -name .virtualenv \
- -or -name virtualenvs \
- -or -name .virtualenvs \
- -or -name .env \
- -or -name .envs \
- -or -name env \
- -or -name .yarn-cache \
- -or -name envs \
- -or -name .venv \
- -or -name .venvs \
- -or -name venv \
- -or -name venvs \
- -or -name .git \
- -or -name .hg \
- -or -name .tox \
- -or -name __pycache__ \
- -or -name '.egg-info*' \
- -or -name '$bower_components' \
- -or -name node_modules \
- -or -name 'conftest_*.c.gcov' \
- \) -prune -or \
- -type f \( -name '*coverage*.*' \
- -or -name 'nosetests.xml' \
- -or -name 'jacoco*.xml' \
- -or -name 'clover.xml' \
- -or -name 'report.xml' \
- -or -name '*.codecov.*' \
- -or -name 'codecov.*' \
- -or -name 'cobertura.xml' \
- -or -name 'excoveralls.json' \
- -or -name 'luacov.report.out' \
- -or -name 'coverage-final.json' \
- -or -name 'naxsi.info' \
- -or -name 'lcov.info' \
- -or -name 'lcov.dat' \
- -or -name '*.lcov' \
- -or -name '*.clover' \
- -or -name 'cover.out' \
- -or -name 'gcov.info' \
- -or -name '*.gcov' \
- -or -name '*.lst' \
- $include_cov \) \
- $exclude_cov \
- -not -name '*.profdata' \
- -not -name 'coverage-summary.json' \
- -not -name 'phpunit-code-coverage.xml' \
- -not -name '*/classycle/report.xml' \
- -not -name 'remapInstanbul.coverage*.json' \
- -not -name 'phpunit-coverage.xml' \
- -not -name '*codecov.yml' \
- -not -name '*.serialized' \
- -not -name '.coverage*' \
- -not -name '.*coveragerc' \
- -not -name '*.sh' \
- -not -name '*.bat' \
- -not -name '*.ps1' \
- -not -name '*.env' \
- -not -name '*.cmake' \
- -not -name '*.dox' \
- -not -name '*.ec' \
- -not -name '*.rst' \
- -not -name '*.h' \
- -not -name '*.scss' \
- -not -name '*.o' \
- -not -name '*.proto' \
- -not -name '*.sbt' \
- -not -name '*.xcoverage.*' \
- -not -name '*.gz' \
- -not -name '*.conf' \
- -not -name '*.p12' \
- -not -name '*.csv' \
- -not -name '*.rsp' \
- -not -name '*.m4' \
- -not -name '*.pem' \
- -not -name '*~' \
- -not -name '*.exe' \
- -not -name '*.am' \
- -not -name '*.template' \
- -not -name '*.cp' \
- -not -name '*.bw' \
- -not -name '*.crt' \
- -not -name '*.log' \
- -not -name '*.cmake' \
- -not -name '*.pth' \
- -not -name '*.in' \
- -not -name '*.jar*' \
- -not -name '*.pom*' \
- -not -name '*.png' \
- -not -name '*.jpg' \
- -not -name '*.sql' \
- -not -name '*.jpeg' \
- -not -name '*.svg' \
- -not -name '*.gif' \
- -not -name '*.csv' \
- -not -name '*.snapshot' \
- -not -name '*.mak*' \
- -not -name '*.bash' \
- -not -name '*.data' \
- -not -name '*.py' \
- -not -name '*.class' \
- -not -name '*.xcconfig' \
- -not -name '*.ec' \
- -not -name '*.coverage' \
- -not -name '*.pyc' \
- -not -name '*.cfg' \
- -not -name '*.egg' \
- -not -name '*.ru' \
- -not -name '*.css' \
- -not -name '*.less' \
- -not -name '*.pyo' \
- -not -name '*.whl' \
- -not -name '*.html' \
- -not -name '*.ftl' \
- -not -name '*.erb' \
- -not -name '*.rb' \
- -not -name '*.js' \
- -not -name '*.jade' \
- -not -name '*.db' \
- -not -name '*.md' \
- -not -name '*.cpp' \
- -not -name '*.gradle' \
- -not -name '*.tar.tz' \
- -not -name '*.scss' \
- -not -name 'include.lst' \
- -not -name 'fullLocaleNames.lst' \
- -not -name 'inputFiles.lst' \
- -not -name 'createdFiles.lst' \
- -not -name 'scoverage.measurements.*' \
- -not -name 'test_*_coverage.txt' \
- -not -name 'testrunner-coverage*' \
- -print 2>/dev/null"
- files=$(eval "$patterns" || echo '')
-
-elif [ "$include_cov" != "" ];
-then
- files=$(eval "find $search_in -type f \( ${include_cov:5} \)$exclude_cov 2>/dev/null" || echo '')
-fi
-
-num_of_files=$(echo "$files" | wc -l | tr -d ' ')
-if [ "$num_of_files" != '' ] && [ "$files" != '' ];
-then
- say " ${e}->${x} Found $num_of_files reports"
-fi
-
-# no files found
-if [ "$files" = "" ];
-then
- say "${r}-->${x} No coverage report found."
- say " Please visit ${b}http://docs.codecov.io/docs/supported-languages${x}"
- exit ${exit_with};
-fi
-
-if [ "$ft_network" == "1" ];
-then
- say "${e}==>${x} Detecting git/mercurial file structure"
- network=$(cd "$git_root" && git ls-files 2>/dev/null || hg locate 2>/dev/null || echo "")
- if [ "$network" = "" ];
- then
- network=$(find "$git_root" \( \
- -name virtualenv \
- -name .virtualenv \
- -name virtualenvs \
- -name .virtualenvs \
- -name '*.png' \
- -name '*.gif' \
- -name '*.jpg' \
- -name '*.jpeg' \
- -name '*.md' \
- -name .env \
- -name .envs \
- -name env \
- -name envs \
- -name .venv \
- -name .venvs \
- -name venv \
- -name venvs \
- -name .git \
- -name .egg-info \
- -name shunit2-2.1.6 \
- -name vendor \
- -name __pycache__ \
- -name node_modules \
- -path '*/$bower_components/*' \
- -path '*/target/delombok/*' \
- -path '*/build/lib/*' \
- -path '*/js/generated/coverage/*' \
- \) -prune -or \
- -type f -print 2>/dev/null || echo '')
- fi
-
- if [ "$prefix_o" != "" ];
- then
- network=$(echo "$network" | awk "{print \"$prefix_o/\"\$0}")
- fi
-fi
-
-upload_file=`mktemp /tmp/codecov.XXXXXX`
-adjustments_file=`mktemp /tmp/codecov.adjustments.XXXXXX`
-
-cleanup() {
- rm -f $upload_file $adjustments_file $upload_file.gz
-}
-
-trap cleanup INT ABRT TERM
-
-if [ "$env" != "" ];
-then
- inc_env=""
- say "${e}==>${x} Appending build variables"
- for varname in $(echo "$env" | tr ',' ' ')
- do
- if [ "$varname" != "" ];
- then
- say " ${g}+${x} $varname"
- inc_env="${inc_env}${varname}=$(eval echo "\$${varname}")
-"
- fi
- done
-
-echo "$inc_env<<<<<< ENV" >> $upload_file
-fi
-
-# Append git file list
-# write discovered yaml location
-echo "$yaml" >> $upload_file
-if [ "$ft_network" == "1" ];
-then
- i="woff|eot|otf" # fonts
- i="$i|gif|png|jpg|jpeg|psd" # images
- i="$i|ptt|pptx|numbers|pages|md|txt|xlsx|docx|doc|pdf|html|csv" # docs
- i="$i|yml|yaml|.gitignore" # supporting docs
- echo "$network" | grep -vwE "($i)$" >> $upload_file
-fi
-echo "<<<<<< network" >> $upload_file
-
-fr=0
-say "${e}==>${x} Reading reports"
-while IFS='' read -r file;
-do
- # read the coverage file
- if [ "$(echo "$file" | tr -d ' ')" != '' ];
- then
- if [ -f "$file" ];
- then
- report_len=$(wc -c < "$file")
- if [ "$report_len" -ne 0 ];
- then
- say " ${g}+${x} $file ${e}bytes=$(echo "$report_len" | tr -d ' ')${x}"
- # append to to upload
- _filename=$(basename "$file")
- if [ "${_filename##*.}" = 'gcov' ];
- then
- echo "# path=$(echo "$file.reduced" | sed "s|^$git_root/||")" >> $upload_file
- # get file name
- head -1 $file >> $upload_file
- # 1. remove source code
- # 2. remove ending bracket lines
- # 3. remove whitespace
- # 4. remove contextual lines
- # 5. remove function names
- awk -F': *' '{print $1":"$2":"}' $file \
- | sed '\/: *} *$/d' \
- | sed 's/^ *//' \
- | sed '/^-/d' \
- | sed 's/^function.*/func/' >> $upload_file
- else
- echo "# path=$(echo "$file" | sed "s|^$git_root/||")" >> $upload_file
- cat "$file" >> $upload_file
- fi
- echo "<<<<<< EOF" >> $upload_file
- fr=1
- if [ "$clean" = "1" ];
- then
- rm "$file"
- fi
- else
- say " ${r}-${x} Skipping empty file $file"
- fi
- else
- say " ${r}-${x} file not found at $file"
- fi
- fi
-done <<< "$(echo -e "$files")"
-
-if [ "$fr" = "0" ];
-then
- say "${r}-->${x} No coverage data found."
- say " Please visit ${b}http://docs.codecov.io/docs/supported-languages${x}"
- say " search for your projects language to learn how to collect reports."
- exit ${exit_with};
-fi
-
-if [ "$ft_fix" = "1" ];
-then
- say "${e}==>${x} Appending adjustments"
- say " ${b}http://docs.codecov.io/docs/fixing-reports${x}"
-
- empty_line='^[[:space:]]*$'
- # //
- syntax_comment='^[[:space:]]*//.*'
- # /* or */
- syntax_comment_block='^[[:space:]]*(\/\*|\*\/)[[:space:]]*$'
- # { or }
- syntax_bracket='^[[:space:]]*[\{\}][[:space:]]*(//.*)?$'
- # [ or ]
- syntax_list='^[[:space:]]*[][][[:space:]]*(//.*)?$'
-
- skip_dirs="-not -path '*/$bower_components/*' \
- -not -path '*/node_modules/*'"
-
- cut_and_join() {
- awk 'BEGIN { FS=":" }
- $3 ~ /\/\*/ || $3 ~ /\*\// { print $0 ; next }
- $1!=key { if (key!="") print out ; key=$1 ; out=$1":"$2 ; next }
- { out=out","$2 }
- END { print out }' 2>/dev/null
- }
-
- if echo "$network" | grep -m1 '.kt$' 1>/dev/null;
- then
- # skip brackets and comments
- find "$git_root" -type f \
- -name '*.kt' \
- -exec \
- grep -nIHE -e $syntax_bracket \
- -e $syntax_comment_block {} \; \
- | cut_and_join \
- >> $adjustments_file \
- || echo ''
-
- # last line in file
- find "$git_root" -type f \
- -name '*.kt' -exec \
- wc -l {} \; \
- | while read l; do echo "EOF: $l"; done \
- 2>/dev/null \
- >> $adjustments_file \
- || echo ''
-
- fi
-
- if echo "$network" | grep -m1 '.go$' 1>/dev/null;
- then
- # skip empty lines, comments, and brackets
- find "$git_root" -not -path '*/vendor/*' \
- -type f \
- -name '*.go' \
- -exec \
- grep -nIHE \
- -e $empty_line \
- -e $syntax_comment \
- -e $syntax_comment_block \
- -e $syntax_bracket \
- {} \; \
- | cut_and_join \
- >> $adjustments_file \
- || echo ''
- fi
-
- if echo "$network" | grep -m1 '.dart$' 1>/dev/null;
- then
- # skip brackets
- find "$git_root" -type f \
- -name '*.dart' \
- -exec \
- grep -nIHE \
- -e $syntax_bracket \
- {} \; \
- | cut_and_join \
- >> $adjustments_file \
- || echo ''
- fi
-
- if echo "$network" | grep -m1 '.php$' 1>/dev/null;
- then
- # skip empty lines, comments, and brackets
- find "$git_root" -not -path "*/vendor/*" \
- -type f \
- -name '*.php' \
- -exec \
- grep -nIHE \
- -e $syntax_list \
- -e $syntax_bracket \
- -e '^[[:space:]]*\);[[:space:]]*(//.*)?$' \
- {} \; \
- | cut_and_join \
- >> $adjustments_file \
- || echo ''
- fi
-
- if echo "$network" | grep -m1 '\(.cpp\|.h\|.cxx\|.c\|.hpp\|.m\)$' 1>/dev/null;
- then
- # skip brackets
- find "$git_root" -type f \
- $skip_dirs \
- \( \
- -name '*.h' \
- -or -name '*.cpp' \
- -or -name '*.cxx' \
- -or -name '*.m' \
- -or -name '*.c' \
- -or -name '*.hpp' \
- \) -exec \
- grep -nIHE \
- -e $empty_line \
- -e $syntax_bracket \
- -e '// LCOV_EXCL' \
- {} \; \
- | cut_and_join \
- >> $adjustments_file \
- || echo ''
-
- # skip brackets
- find "$git_root" -type f \
- $skip_dirs \
- \( \
- -name '*.h' \
- -or -name '*.cpp' \
- -or -name '*.cxx' \
- -or -name '*.m' \
- -or -name '*.c' \
- -or -name '*.hpp' \
- \) -exec \
- grep -nIH '// LCOV_EXCL' \
- {} \; \
- >> $adjustments_file \
- || echo ''
-
- fi
-
- found=$(cat $adjustments_file | tr -d ' ')
-
- if [ "$found" != "" ];
- then
- say " ${g}+${x} Found adjustments"
- echo "# path=fixes" >> $upload_file
- cat $adjustments_file >> $upload_file
- echo "<<<<<< EOF" >> $upload_file
- rm -rf $adjustments_file
- else
- say " ${e}->${x} No adjustments found"
- fi
-fi
-
-if [ "$url_o" != "" ];
-then
- url="$url_o"
-fi
-
-if [ "$dump" != "0" ];
-then
- # trim whitespace from query
- say " ${e}->${x} Dumping upload file (no upload)"
- echo "$url/upload/v4?$(echo "package=bash-$VERSION&token=$token&$query" | tr -d ' ')"
- cat $upload_file
-else
-
- say "${e}==>${x} Gzipping contents"
- gzip -nf9 $upload_file
-
- query=$(echo "${query}" | tr -d ' ')
- say "${e}==>${x} Uploading reports"
- say " ${e}url:${x} $url"
- say " ${e}query:${x} $query"
-
- # now add token to query
- query=$(echo "package=bash-$VERSION&token=$token&$query" | tr -d ' ')
-
- if [ "$ft_s3" = "1" ];
- then
- i="0"
- while [ $i -lt 4 ]
- do
- i=$[$i+1]
- say " ${e}->${x} Pinging Codecov"
- res=$(curl $curl_s -X POST $curlargs $cacert \
- -H 'X-Reduced-Redundancy: false' \
- -H 'X-Content-Type: application/x-gzip' \
- "$url/upload/v4?$query" || true)
- # a good replay is "https://codecov.io" + "\n" + "https://codecov.s3.amazonaws.com/..."
- status=$(echo "$res" | head -1 | grep 'HTTP ' | cut -d' ' -f2)
- if [ "$status" = "" ];
- then
- s3target=$(echo "$res" | sed -n 2p)
- say " ${e}->${x} Uploading"
- s3=$(curl $curl_s -fiX PUT $curlawsargs \
- --data-binary @$upload_file.gz \
- -H 'Content-Type: application/x-gzip' \
- -H 'Content-Encoding: gzip' \
- -H 'x-amz-acl: public-read' \
- "$s3target" || true)
- if [ "$s3" != "" ];
- then
- say " ${g}->${x} View reports at ${b}$(echo "$res" | sed -n 1p)${x}"
- exit 0
- else
- say " ${r}X>${x} Failed to upload"
- fi
- elif [ "$status" = "400" ];
- then
- # 400 Error
- say "${g}${res}${x}"
- exit ${exit_with}
- fi
- say " ${e}->${x} Sleeping for 30s and trying again..."
- sleep 30
- done
- fi
-
- say " ${e}->${x} Uploading to Codecov"
- i="0"
- while [ $i -lt 4 ]
- do
- i=$[$i+1]
-
- res=$(curl $curl_s -X POST $curlargs $cacert \
- --data-binary @$upload_file.gz \
- -H 'Content-Type: text/plain' \
- -H 'Content-Encoding: gzip' \
- -H 'X-Content-Encoding: gzip' \
- -H 'Accept: text/plain' \
- "$url/upload/v2?$query" || echo 'HTTP 500')
- # HTTP 200
- # http://....
- status=$(echo "$res" | head -1 | cut -d' ' -f2)
- if [ "$status" = "" ];
- then
- say " View reports at ${b}$(echo "$res" | head -2 | tail -1)${x}"
- exit 0
-
- elif [ "${status:0:1}" = "5" ];
- then
- say " ${e}->${x} Sleeping for 30s and trying again..."
- sleep 30
-
- else
- say " ${g}${res}${x}"
- exit 0
- exit ${exit_with}
- fi
-
- done
-
- say " ${r}X> Failed to upload coverage reports${x}"
-fi
-
-exit ${exit_with}
diff --git a/ci/jenkins/Jenkinsfile_py3-master_cpu_unittest b/ci/jenkins/Jenkinsfile_py3-master_cpu_unittest
deleted file mode 100644
index fb87760de6..0000000000
--- a/ci/jenkins/Jenkinsfile_py3-master_cpu_unittest
+++ /dev/null
@@ -1,69 +0,0 @@
-// -*- mode: groovy -*-
-
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// Jenkins pipeline
-// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/
-
-// timeout in minutes
-max_time = 120
-
-node {
- // Loading the utilities requires a node context unfortunately
- checkout scm
- utils = load('ci/jenkins/utils.groovy')
- build_steps = load('ci/jenkins/build_steps.groovy')
-}
-utils.assign_node_labels(linux_gpu: 'linux-gpu', linux_cpu: 'linux-cpu')
-
-utils.main_wrapper(
-core_logic: {
- utils.parallel_stage('Sanity', [
- build_steps.sanity_lint('gluon-nlp-cpu-py3-master', 'cpu/py3-master', 'src/gluonnlp')
- ])
-
- utils.parallel_stage('Tests', [
- build_steps.test_unittest('gluon-nlp-cpu-py3-master', 'cpu/py3-master',
- 'tests/unittest', 'src/gluonnlp',
- 'not (gpu or serial or skip_master)',
- 4, false, false),
- build_steps.test_unittest('gluon-nlp-cpu-py3-master', 'cpu/py3-master',
- 'tests/unittest', 'src/gluonnlp',
- 'not (gpu or skip_master) and serial',
- 0, false, false),
- build_steps.test_unittest('gluon-nlp-cpu-py3-master', 'cpu/py3-master',
- 'scripts/tests', 'src/gluonnlp',
- 'not (gpu or serial or integration or skip_master)',
- 4, false, false),
- build_steps.test_unittest('gluon-nlp-cpu-py3-master', 'cpu/py3-master',
- 'scripts/tests', 'src/gluonnlp',
- '(not (gpu or integration or skip_master)) and serial',
- 0, false, false),
- build_steps.test_unittest('gluon-nlp-cpu-py3-master', 'cpu/py3-master',
- 'scripts/tests', 'src/gluonnlp',
- 'not (gpu or serial or skip_master) and integration',
- 4, false, false),
- build_steps.test_unittest('gluon-nlp-cpu-py3-master', 'cpu/py3-master',
- 'scripts/tests', 'src/gluonnlp',
- 'not (gpu or skip_master) and serial and integration',
- 0, false, false)
- ])
-}
-,
-failure_handler: {}
-)
diff --git a/ci/jenkins/Jenkinsfile_py3-master_gpu_doc b/ci/jenkins/Jenkinsfile_py3-master_gpu_doc
deleted file mode 100644
index 82d6cc5fee..0000000000
--- a/ci/jenkins/Jenkinsfile_py3-master_gpu_doc
+++ /dev/null
@@ -1,168 +0,0 @@
-// -*- mode: groovy -*-
-
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// Jenkins pipeline
-// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/
-
-// timeout in minutes
-max_time = 120
-
-node {
- // Loading the utilities requires a node context unfortunately
- checkout scm
- utils = load('ci/jenkins/utils.groovy')
- build_steps = load('ci/jenkins/build_steps.groovy')
-}
-utils.assign_node_labels(linux_gpu: 'linux-gpu', linux_cpu: 'linux-cpu')
-
-utils.main_wrapper(
-core_logic: {
- utils.parallel_stage('Doc Test', [
- build_steps.test_doctest('gluon-nlp-cpu-py3-master', 'cpu/py3-master',
- 'src/gluonnlp', 'src/gluonnlp', 4)
- ])
-
- // Compile example notebooks, Doctest & Create Website
- node { // Single node parallelism
- ws('gluon-nlp-cpu-py3-master') {
- stage("Prepare conda environment for website") {
- utils.init_git()
- // Require a full environment here due to sphinx build step
- // after compiling and downloading the notebooks
- sh 'source ci/prepare_clean_env.sh cpu/py3-master'
- }
-
- stage("Create Website") {
- def tests = [:]
- for (f in findFiles(glob: '**/docs/examples/*/*.md')) {
- def md_file = f.toString() // Convert FileWrapper to String
- def short_name = md_file["docs/examples/".length()..-1]
- tests[short_name] = { ->
- def base_name = md_file[0..-4] + ''
- def ipynb_file = base_name + '.ipynb'
- def stdout_file = base_name + '.stdout.log'
- def stderr_file = base_name + '.stderr.log'
- stage(short_name) { // remove common path from name
- // Submit AWS Batch jobs for each example notebook
- // The converted notebooks and the conversion logs are
- // saved to S3 and retrieved on the CI server once the jobs
- // finished.
-
- if (env.BRANCH_NAME.startsWith('PR-')){
- sh """
- set +e
- conda activate ./conda/cpu/py3-master
-
- python3 ci/batch/submit-job.py --region us-east-1 --wait \
- --timeout 1800 --saved-output ./docs/examples --conda-env docker/py3 \
- --name GluonNLP-${env.BRANCH_NAME}-${env.BUILD_NUMBER} \
- --save-path batch/${env.BRANCH_NAME}/${env.BUILD_NUMBER}/docs/examples \
- --work-dir . --source-ref refs/pull/${env.CHANGE_ID}/head \
- --command \"(python3 docs/md2ipynb.py ${md_file} | tee ${stdout_file}) 3>&1 1>&2 2>&3 | tee ${stderr_file} \"
- BATCH_EXIT_CODE=\$?
-
- aws s3api wait object-exists --bucket gluon-nlp-staging \
- --key batch/${env.BRANCH_NAME}/${env.BUILD_NUMBER}/${stderr_file}
- aws s3 cp s3://gluon-nlp-staging/batch/${env.BRANCH_NAME}/${env.BUILD_NUMBER}/${stderr_file} ${stderr_file}
- cat ${stderr_file}
-
- aws s3api wait object-exists --bucket gluon-nlp-staging \
- --key batch/${env.BRANCH_NAME}/${env.BUILD_NUMBER}/${stdout_file}
- aws s3 cp s3://gluon-nlp-staging/batch/${env.BRANCH_NAME}/${env.BUILD_NUMBER}/${stdout_file} ${stdout_file}
- cat ${stdout_file}
-
- if [ \$BATCH_EXIT_CODE -ne 0 ]; then
- echo AWS Batch Task Failed
- else
- aws s3api wait object-exists --bucket gluon-nlp-staging \
- --key batch/${env.BRANCH_NAME}/${env.BUILD_NUMBER}/${ipynb_file}
- aws s3 cp s3://gluon-nlp-staging/batch/${env.BRANCH_NAME}/${env.BUILD_NUMBER}/${ipynb_file} ${ipynb_file}
- fi
-
- exit \$BATCH_EXIT_CODE
- """
- } else {
- sh """
- set +e
- conda activate ./conda/cpu/py3-master
-
- python3 ci/batch/submit-job.py --region us-east-1 --wait \
- --timeout 1800 --saved-output ./docs/examples --conda-env docker/py3 \
- --name GluonNLP-${env.BRANCH_NAME}-${env.BUILD_NUMBER} \
- --save-path batch/${env.BRANCH_NAME}/${env.BUILD_NUMBER}/docs/examples \
- --work-dir . --source-ref ${env.BRANCH_NAME} \
- --command \"(python3 docs/md2ipynb.py ${md_file} | tee ${stdout_file}) 3>&1 1>&2 2>&3 | tee ${stderr_file} \"
- BATCH_EXIT_CODE=\$?
-
- aws s3api wait object-exists --bucket gluon-nlp-staging \
- --key batch/${env.BRANCH_NAME}/${env.BUILD_NUMBER}/${stderr_file}
- aws s3 cp s3://gluon-nlp-staging/batch/${env.BRANCH_NAME}/${env.BUILD_NUMBER}/${stderr_file} ${stderr_file}
- cat ${stderr_file}
-
- aws s3api wait object-exists --bucket gluon-nlp-staging \
- --key batch/${env.BRANCH_NAME}/${env.BUILD_NUMBER}/${stdout_file}
- aws s3 cp s3://gluon-nlp-staging/batch/${env.BRANCH_NAME}/${env.BUILD_NUMBER}/${stdout_file} ${stdout_file}
- cat ${stdout_file}
-
- if [ \$BATCH_EXIT_CODE -ne 0 ]; then
- echo AWS Batch Task Failed
- else
- aws s3api wait object-exists --bucket gluon-nlp-staging \
- --key batch/${env.BRANCH_NAME}/${env.BUILD_NUMBER}/${ipynb_file}
- aws s3 cp s3://gluon-nlp-staging/batch/${env.BRANCH_NAME}/${env.BUILD_NUMBER}/${ipynb_file} ${ipynb_file}
- fi
-
- exit \$BATCH_EXIT_CODE
- """
- }
- }
- }
- }
-
- parallel tests
- }
-
- stage("Upload Website") {
- if (env.BRANCH_NAME.startsWith('PR-')){
- bucket = 'gluon-nlp-staging'
- path = env.BRANCH_NAME+'/'+env.BUILD_NUMBER
- } else {
- bucket = 'gluon-nlp'
- path = env.BRANCH_NAME
- }
- sh """
- conda activate ./conda/cpu/py3-master
- make docs
- ci/upload_doc.sh ${bucket} ${path}
- """
- }
- }
- }
-
- utils.parallel_stage('Documentation', [
- build_steps.website_linkcheck('gluon-nlp-cpu-py3-master', 'cpu/py3-master')
- ])
-
- utils.parallel_stage('Deploy', [
- build_steps.post_website_link()
- ])
-}
-,
-failure_handler: {}
-)
diff --git a/ci/jenkins/Jenkinsfile_py3-master_gpu_integration b/ci/jenkins/Jenkinsfile_py3-master_gpu_integration
deleted file mode 100644
index 31002e4bdd..0000000000
--- a/ci/jenkins/Jenkinsfile_py3-master_gpu_integration
+++ /dev/null
@@ -1,53 +0,0 @@
-// -*- mode: groovy -*-
-
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// Jenkins pipeline
-// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/
-
-// timeout in minutes
-max_time = 120
-
-node {
- // Loading the utilities requires a node context unfortunately
- checkout scm
- utils = load('ci/jenkins/utils.groovy')
- build_steps = load('ci/jenkins/build_steps.groovy')
-}
-utils.assign_node_labels(linux_gpu: 'linux-gpu', linux_cpu: 'linux-cpu')
-
-utils.main_wrapper(
-core_logic: {
- utils.parallel_stage('Sanity', [
- build_steps.sanity_lint('gluon-nlp-gpu-py3-master', 'gpu/py3-master', 'scripts')
- ])
-
- utils.parallel_stage('Scripts', [
- build_steps.test_unittest('gluon-nlp-gpu-py3-master', 'gpu/py3-master',
- 'scripts/tests', 'src/gluonnlp',
- 'gpu and (not (serial or skip_master)) and integration',
- 4, true, true),
- build_steps.test_unittest('gluon-nlp-gpu-py3-master', 'gpu/py3-master',
- 'scripts/tests', 'src/gluonnlp',
- 'gpu and serial and integration and (not skip_master)',
- 0, true, true)
- ])
-}
-,
-failure_handler: {}
-)
diff --git a/ci/jenkins/Jenkinsfile_py3-master_gpu_unittest b/ci/jenkins/Jenkinsfile_py3-master_gpu_unittest
deleted file mode 100644
index 6275e40d58..0000000000
--- a/ci/jenkins/Jenkinsfile_py3-master_gpu_unittest
+++ /dev/null
@@ -1,61 +0,0 @@
-// -*- mode: groovy -*-
-
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// Jenkins pipeline
-// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/
-
-// timeout in minutes
-max_time = 120
-
-node {
- // Loading the utilities requires a node context unfortunately
- checkout scm
- utils = load('ci/jenkins/utils.groovy')
- build_steps = load('ci/jenkins/build_steps.groovy')
-}
-utils.assign_node_labels(linux_gpu: 'linux-gpu', linux_cpu: 'linux-cpu')
-
-utils.main_wrapper(
-core_logic: {
- utils.parallel_stage('Sanity', [
- build_steps.sanity_lint('gluon-nlp-gpu-py3-master', 'gpu/py3-master', 'src/gluonnlp')
- ])
-
- utils.parallel_stage('Tests', [
- build_steps.test_unittest('gluon-nlp-gpu-py3-master', 'gpu/py3-master',
- 'tests/unittest', 'src/gluonnlp',
- 'gpu and (not (serial or skip_master))',
- 4, true, false),
- build_steps.test_unittest('gluon-nlp-gpu-py3-master', 'gpu/py3-master',
- 'tests/unittest', 'src/gluonnlp',
- 'gpu and serial and not skip_master',
- 0, true, false),
- build_steps.test_unittest('gluon-nlp-gpu-py3-master', 'gpu/py3-master',
- 'scripts/tests', 'src/gluonnlp',
- 'gpu and (not (serial or skip_master or integration))',
- 4, true, false),
- build_steps.test_unittest('gluon-nlp-gpu-py3-master', 'gpu/py3-master',
- 'scripts/tests', 'src/gluonnlp',
- 'gpu and serial and not (skip_master or integration)',
- 0, true, false)
- ])
-}
-,
-failure_handler: {}
-)
diff --git a/ci/jenkins/Jenkinsfile_py3_cpu_unittest b/ci/jenkins/Jenkinsfile_py3_cpu_unittest
deleted file mode 100644
index 6d518fdbfd..0000000000
--- a/ci/jenkins/Jenkinsfile_py3_cpu_unittest
+++ /dev/null
@@ -1,69 +0,0 @@
-// -*- mode: groovy -*-
-
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// Jenkins pipeline
-// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/
-
-// timeout in minutes
-max_time = 120
-
-node {
- // Loading the utilities requires a node context unfortunately
- checkout scm
- utils = load('ci/jenkins/utils.groovy')
- build_steps = load('ci/jenkins/build_steps.groovy')
-}
-utils.assign_node_labels(linux_gpu: 'linux-gpu', linux_cpu: 'linux-cpu')
-
-utils.main_wrapper(
-core_logic: {
- utils.parallel_stage('Sanity', [
- build_steps.sanity_lint('gluon-nlp-cpu-py3', 'cpu/py3', 'src/gluonnlp')
- ])
-
- utils.parallel_stage('Tests', [
- build_steps.test_unittest('gluon-nlp-cpu-py3', 'cpu/py3',
- 'tests/unittest', 'src/gluonnlp',
- 'not (gpu or serial)',
- 4, false, false),
- build_steps.test_unittest('gluon-nlp-cpu-py3', 'cpu/py3',
- 'tests/unittest', 'src/gluonnlp',
- '(not gpu) and serial',
- 0, false, false),
- build_steps.test_unittest('gluon-nlp-cpu-py3', 'cpu/py3',
- 'scripts/tests', 'src/gluonnlp',
- 'not (gpu or serial or integration)',
- 4, false, false),
- build_steps.test_unittest('gluon-nlp-cpu-py3', 'cpu/py3',
- 'scripts/tests', 'src/gluonnlp',
- '(not (gpu or integration)) and serial',
- 0, false, false),
- build_steps.test_unittest('gluon-nlp-cpu-py3', 'cpu/py3',
- 'scripts/tests', 'src/gluonnlp',
- 'not (gpu or serial) and integration',
- 4, false, false),
- build_steps.test_unittest('gluon-nlp-cpu-py3', 'cpu/py3',
- 'scripts/tests', 'src/gluonnlp',
- '(not gpu) and serial and integration',
- 0, false, false)
- ])
-}
-,
-failure_handler: {}
-)
diff --git a/ci/jenkins/Jenkinsfile_py3_gpu_integration b/ci/jenkins/Jenkinsfile_py3_gpu_integration
deleted file mode 100644
index e683f5f14d..0000000000
--- a/ci/jenkins/Jenkinsfile_py3_gpu_integration
+++ /dev/null
@@ -1,53 +0,0 @@
-// -*- mode: groovy -*-
-
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// Jenkins pipeline
-// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/
-
-// timeout in minutes
-max_time = 120
-
-node {
- // Loading the utilities requires a node context unfortunately
- checkout scm
- utils = load('ci/jenkins/utils.groovy')
- build_steps = load('ci/jenkins/build_steps.groovy')
-}
-utils.assign_node_labels(linux_gpu: 'linux-gpu', linux_cpu: 'linux-cpu')
-
-utils.main_wrapper(
-core_logic: {
- utils.parallel_stage('Sanity', [
- build_steps.sanity_lint('gluon-nlp-gpu-py3', 'gpu/py3', 'scripts')
- ])
-
- utils.parallel_stage('Scripts', [
- build_steps.test_unittest('gluon-nlp-gpu-py3', 'gpu/py3',
- 'scripts/tests', 'src/gluonnlp',
- 'gpu and (not serial) and integration',
- 4, true, true),
- build_steps.test_unittest('gluon-nlp-gpu-py3', 'gpu/py3',
- 'scripts/tests', 'src/gluonnlp',
- 'gpu and serial and integration',
- 0, true, true)
- ])
-}
-,
-failure_handler: {}
-)
diff --git a/ci/jenkins/Jenkinsfile_py3_gpu_unittest b/ci/jenkins/Jenkinsfile_py3_gpu_unittest
deleted file mode 100644
index 8430ca0a36..0000000000
--- a/ci/jenkins/Jenkinsfile_py3_gpu_unittest
+++ /dev/null
@@ -1,61 +0,0 @@
-// -*- mode: groovy -*-
-
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// Jenkins pipeline
-// See documents at https://jenkins.io/doc/book/pipeline/jenkinsfile/
-
-// timeout in minutes
-max_time = 120
-
-node {
- // Loading the utilities requires a node context unfortunately
- checkout scm
- utils = load('ci/jenkins/utils.groovy')
- build_steps = load('ci/jenkins/build_steps.groovy')
-}
-utils.assign_node_labels(linux_gpu: 'linux-gpu', linux_cpu: 'linux-cpu')
-
-utils.main_wrapper(
-core_logic: {
- utils.parallel_stage('Sanity', [
- build_steps.sanity_lint('gluon-nlp-gpu-py3', 'gpu/py3', 'src/gluonnlp')
- ])
-
- utils.parallel_stage('Tests', [
- build_steps.test_unittest('gluon-nlp-gpu-py3', 'gpu/py3',
- 'tests/unittest', 'src/gluonnlp',
- 'gpu and not serial',
- 4, true, false),
- build_steps.test_unittest('gluon-nlp-gpu-py3', 'gpu/py3',
- 'tests/unittest', 'src/gluonnlp',
- 'gpu and serial',
- 0, true, false),
- build_steps.test_unittest('gluon-nlp-gpu-py3', 'gpu/py3',
- 'scripts/tests', 'src/gluonnlp',
- 'gpu and not (serial or integration)',
- 4, true, false),
- build_steps.test_unittest('gluon-nlp-gpu-py3', 'gpu/py3',
- 'scripts/tests', 'src/gluonnlp',
- 'gpu and serial and not integration',
- 0, true, false)
- ])
-}
-,
-failure_handler: {}
-)
diff --git a/ci/jenkins/build_steps.groovy b/ci/jenkins/build_steps.groovy
deleted file mode 100644
index 63bd59e81d..0000000000
--- a/ci/jenkins/build_steps.groovy
+++ /dev/null
@@ -1,127 +0,0 @@
-// -*- mode: groovy -*-
-
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations
-// under the License.
-//
-// This file contains the steps that will be used in the
-// Jenkins pipelines
-
-utils = load('ci/jenkins/utils.groovy')
-
-def sanity_lint(workspace_name, conda_env_name, path) {
- return ['Lint': {
- node {
- ws(workspace_name) {
- timeout(time: max_time, unit: 'MINUTES') {
- utils.init_git()
- sh """
- set -ex
- source ci/prepare_clean_env.sh ${conda_env_name}
- make lintdir=${path} lint
- set +ex
- """
- }
- }
- }
- }]
-}
-
-def test_unittest(workspace_name, conda_env_name,
- test_path, cov_path,
- mark,
- threads, gpu, skip_report) {
- capture_flag = env.BRANCH_NAME.startsWith('PR-')?'':'--capture=no'
- node_type = gpu?NODE_LINUX_GPU:NODE_LINUX_CPU
- return ["${conda_env_name}: ${test_path} -m '${mark}'": {
- node(node_type) {
- ws(workspace_name) {
- timeout(time: max_time, unit: 'MINUTES') {
- utils.init_git()
- sh """
- set -ex
- source ci/prepare_clean_env.sh ${conda_env_name}
- pytest -v ${capture_flag} -n ${threads} -m '${mark}' --durations=30 --cov ${cov_path} --cov-report=term --cov-report xml ${test_path}
- set +ex
- """
- if (!skip_report) utils.publish_test_coverage('GluonNLPCodeCov')
- }
- }
- }
- }]
-}
-
-def test_doctest(workspace_name, conda_env_name,
- test_path, cov_path, threads) {
- capture_flag = env.BRANCH_NAME.startsWith('PR-')?'':'--capture=no'
- return ["${conda_env_name}: doctest ${test_path}": {
- node(NODE_LINUX_CPU) {
- ws(workspace_name) {
- timeout(time: max_time, unit: 'MINUTES') {
- utils.init_git()
- sh """
- set -ex
- source ci/prepare_clean_env.sh ${conda_env_name}
- pytest -v ${capture_flag} -n ${threads} --durations=30 --cov ${cov_path} --cov-report=term --cov-report xml --doctest-modules ${test_path}
- set +ex
- """
- utils.publish_test_coverage('GluonNLPCodeCov')
- }
- }
- }
- }]
-}
-
-def website_linkcheck(workspace_name, conda_env_name) {
- return ["${conda_env_name}: website link check": {
- node(NODE_LINUX_CPU) {
- ws(workspace_name) {
- timeout(time: max_time, unit: 'MINUTES') {
- utils.init_git()
- sh """
- set -ex
- source ci/prepare_clean_env.sh ${conda_env_name}
- make distribute
- set +ex
- """
- linkcheck_errors = sh returnStdout: true, script: """
- conda activate ./conda/${conda_env_name}
- """
- linkcheck_errors = linkcheck_errors.split('\n').findAll {it ==~ '/^(line *[0-9]*) broken.*$/'}
- linkcheck_errors = linkcheck_errors.join('\n')
- linkcheck_errors = linkcheck_errors.trim()
- if (linkcheck_errors && env.BRANCH_NAME.startsWith("PR-")) {
- pullRequest.comment("Found link check problems in job ${env.BRANCH_NAME}/${env.BUILD_NUMBER}:\n"+linkcheck_errors)
- }
- }
- }
- }
- }]
-}
-
-def post_website_link() {
- return ["Deploy: ": {
- node {
- timeout(time: max_time, unit: 'MINUTES') {
- if (env.BRANCH_NAME.startsWith("PR-")) {
- pullRequest.comment("Job ${env.BRANCH_NAME}/${env.BUILD_NUMBER} is complete. \nDocs are uploaded to http://gluon-nlp-staging.s3-accelerate.dualstack.amazonaws.com/${env.BRANCH_NAME}/${env.BUILD_NUMBER}/index.html")
- }
- }
- }
- }]
-}
-
-return this
diff --git a/ci/jenkins/utils.groovy b/ci/jenkins/utils.groovy
deleted file mode 100644
index ddbde419d5..0000000000
--- a/ci/jenkins/utils.groovy
+++ /dev/null
@@ -1,214 +0,0 @@
-// -*- mode: groovy -*-
-
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-// initialize source codes
-def init_git() {
- deleteDir()
- retry(5) {
- try {
- // Make sure wait long enough for api.github.com request quota. Important: Don't increase the amount of
- // retries as this will increase the amount of requests and worsen the throttling
- timeout(time: 15, unit: 'MINUTES') {
- checkout scm
- sh 'git clean -xdff'
- sh 'git reset --hard'
- sh 'git submodule update --init --recursive'
- sh 'git submodule foreach --recursive git clean -ffxd'
- sh 'git submodule foreach --recursive git reset --hard'
- }
- } catch (exc) {
- deleteDir()
- error "Failed to fetch source codes with ${exc}"
- sleep 2
- }
- }
-}
-
-
-def get_git_commit_hash() {
- lastCommitMessage = sh (script: "git log -1 --pretty=%B", returnStdout: true)
- lastCommitMessage = lastCommitMessage.trim()
- if (lastCommitMessage.startsWith("Merge commit '") && lastCommitMessage.endsWith("' into HEAD")) {
- // Merge commit applied by Jenkins, skip that commit
- git_commit_hash = sh (script: "git rev-parse @~", returnStdout: true)
- } else {
- git_commit_hash = sh (script: "git rev-parse @", returnStdout: true)
- }
- return git_commit_hash.trim()
-}
-
-def publish_test_coverage(codecov_credential) {
- // CodeCovs auto detection has trouble with our CIs PR validation due the merging strategy
- git_commit_hash = get_git_commit_hash()
-
- if (env.CHANGE_ID) {
- // PR execution
- codecovArgs = "-B ${env.CHANGE_TARGET} -C ${git_commit_hash} -P ${env.CHANGE_ID}"
- } else {
- // Branch execution
- codecovArgs = "-B ${env.BRANCH_NAME} -C ${git_commit_hash}"
- }
-
- // To make sure we never fail because test coverage reporting is not available
- // Fall back to our own copy of the bash helper if it failed to download the public version
- withCredentials([string(credentialsId: codecov_credential, variable: 'CODECOV_TOKEN')]) {
- sh "(curl --retry 10 -s https://codecov.io/bash | bash -s - ${codecovArgs}) || (curl --retry 10 -s https://s3-us-west-2.amazonaws.com/mxnet-ci-prod-slave-data/codecov-bash.txt | bash -s - ${codecovArgs}) || true"
- }
-}
-
-// Allow publishing to GitHub with a custom context (the status shown under a PR)
-// Credit to https://plugins.jenkins.io/github
-def get_repo_url() {
- checkout scm
- return sh(returnStdout: true, script: "git config --get remote.origin.url").trim()
-}
-
-def update_github_commit_status(state, message) {
- node {
- // NOTE: https://issues.jenkins-ci.org/browse/JENKINS-39482
- //The GitHubCommitStatusSetter requires that the Git Server is defined under
- //*Manage Jenkins > Configure System > GitHub > GitHub Servers*.
- //Otherwise the GitHubCommitStatusSetter is not able to resolve the repository name
- //properly and you would see an empty list of repos:
- //[Set GitHub commit status (universal)] PENDING on repos [] (sha:xxxxxxx) with context:test/mycontext
- //See https://cwiki.apache.org/confluence/display/MXNET/Troubleshooting#Troubleshooting-GitHubcommit/PRstatusdoesnotgetpublished
-
- echo "Publishing commit status..."
-
- repoUrl = get_repo_url()
- echo "repoUrl=${repoUrl}"
-
- commitSha = get_git_commit_hash()
- echo "commitSha=${commitSha}"
-
- context = get_github_context()
- echo "context=${context}"
-
- // a few attempts need to be made: https://github.com/apache/incubator-mxnet/issues/11654
- for (int attempt = 1; attempt <= 3; attempt++) {
- echo "Sending GitHub status attempt ${attempt}..."
-
- step([
- $class: 'GitHubCommitStatusSetter',
- reposSource: [$class: "ManuallyEnteredRepositorySource", url: repoUrl],
- contextSource: [$class: "ManuallyEnteredCommitContextSource", context: context],
- commitShaSource: [$class: "ManuallyEnteredShaSource", sha: commitSha],
- statusBackrefSource: [$class: "ManuallyEnteredBackrefSource", backref: "${env.RUN_DISPLAY_URL}"],
- errorHandlers: [[$class: 'ShallowAnyErrorHandler']],
- statusResultSource: [
- $class: 'ConditionalStatusResultSource',
- results: [[$class: "AnyBuildResult", message: message, state: state]]
- ]
- ])
-
- if (attempt <= 2) {
- sleep 1
- }
- }
-
- echo "Publishing commit status done."
-
- }
-}
-
-def get_github_context() {
- // Since we use multi-branch pipelines, Jenkins appends the branch name to the job name
- if (env.BRANCH_NAME) {
- short_job_name = JOB_NAME.substring(0, JOB_NAME.lastIndexOf('/'))
- } else {
- short_job_name = JOB_NAME
- }
-
- return "ci/jenkins/${short_job_name}"
-}
-
-def parallel_stage(stage_name, steps) {
- // Allow to pass an array of steps that will be executed in parallel in a stage
- new_map = [:]
-
- for (def step in steps) {
- new_map = new_map << step
- }
-
- stage(stage_name) {
- parallel new_map
- }
-}
-
-def assign_node_labels(args) {
- // This function allows to assign instance labels to the generalized placeholders.
- // This serves two purposes:
- // 1. Allow generalized placeholders (e.g. NODE_WINDOWS_CPU) in the job definition
- // in order to abstract away the underlying node label. This allows to schedule a job
- // onto a different node for testing or security reasons. This could be, for example,
- // when you want to test a new set of slaves on separate labels or when a job should
- // only be run on restricted slaves
- // 2. Restrict the allowed job types within a Jenkinsfile. For example, a UNIX-CPU-only
- // Jenkinsfile should not allowed access to Windows or GPU instances. This prevents
- // users from just copy&pasting something into an existing Jenkinsfile without
- // knowing about the limitations.
- NODE_LINUX_GPU = args.linux_gpu
- NODE_LINUX_CPU = args.linux_cpu
-}
-
-def main_wrapper(args) {
- // Main Jenkinsfile pipeline wrapper handler that allows to wrap core logic into a format
- // that supports proper failure handling
- // args:
- // - core_logic: Jenkins pipeline containing core execution logic
- // - failure_handler: Failure handler
-
- // assign any caught errors here
- err = null
- try {
- update_github_commit_status('PENDING', 'Job has been enqueued')
-
- timestamps {
- args['core_logic']()
- }
-
- // set build status to success at the end
- currentBuild.result = "SUCCESS"
- update_github_commit_status('SUCCESS', 'Job succeeded')
- } catch (caughtError) {
- node {
- sh "echo caught ${caughtError}"
- err = caughtError
- currentBuild.result = "FAILURE"
- update_github_commit_status('FAILURE', 'Job failed')
- }
- } finally {
- timestamps {
- node {
- // Call failure handler
- args['failure_handler']()
-
- // Clean workspace to reduce space requirements
- cleanWs()
-
- // Remember to rethrow so the build is marked as failing
- if (err) {
- throw err
- }
- }
- }
- }
-}
-
-return this
diff --git a/ci/prepare_clean_env.sh b/ci/prepare_clean_env.sh
deleted file mode 100755
index 1a224c418a..0000000000
--- a/ci/prepare_clean_env.sh
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/bin/bash
-env_name=$1
-
-echo Preparing clean environment on $(hostname) in $(ls -id $(pwd))
-
-export LD_LIBRARY_PATH=/usr/local/cuda-10.0/lib64
-export CUDA_VISIBLE_DEVICES=$EXECUTOR_NUMBER
-export CONDA_ENVS_PATH=$PWD/conda
-export CONDA_PKGS_DIRS=$PWD/conda/pkgs
-export MXNET_HOME=$PWD/tests/data
-export HOROVOD_WITHOUT_TENSORFLOW=1
-export HOROVOD_WITHOUT_PYTORCH=1
-export HOROVOD_WITH_MXNET=1
-
-make clean
-conda env update --prune -p conda/${env_name} -f env/${env_name}.yml
-conda activate ./conda/${env_name}
-conda list
-printenv
-
-pip install -v -e .
-pip install horovod --no-cache-dir -U
-python -m spacy download en
-python -m spacy download de
-python -m nltk.downloader all
diff --git a/ci/rat/rat-excludes b/ci/rat/rat-excludes
deleted file mode 100755
index 3d6d00f7e8..0000000000
--- a/ci/rat/rat-excludes
+++ /dev/null
@@ -1,55 +0,0 @@
-\..*
-.*css
-\\.*
-.*ipynb
-.*html
-.*json
-.*txt
-3rdparty/*
-R-package/*
-trunk/*
-.*\\.m
-.*\\.mk
-.*\\.R
-.*svg
-.*cfg
-.*config
-.*rst
-__init__.py
-build/*
-.*\\.t
-MANIFEST
-Changes
-.*csv
-.*names
-CODEOWNERS
-snap.python
-bbox.pyx
-cpu_nms.pyx
-gpu_nms.pyx
-nms_kernel.cu
-_mask.pyx
-coco.py
-base.pyi
-special_functions-inl.h
-erfinv-inl.h
-im2col.cuh
-im2col.h
-pool.h
-dataset.cPickle
-image-classification/*
-rat-excludes
-apache-rat-tasks/*
-moderngpu/*
-deformable_im2col.cuh
-deformable_im2col.h
-REQUIRE
-Project.toml
-include/*
-.*.iml
-.*.json.ref
-searchtools_custom.js
-theme.conf
-LICENSE.binary.dependencies
-multi-bleu-detok.perl
-multi-bleu.perl
diff --git a/ci/upload_doc.sh b/ci/upload_doc.sh
deleted file mode 100755
index efa5e5d904..0000000000
--- a/ci/upload_doc.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/bash
-bucket=$1
-path=$2
-echo "Uploading doc to s3://${bucket}/${path}/"
-aws s3 sync --delete docs/_build/html/ s3://${bucket}/${path}/ --acl public-read
-echo "Uploaded doc to http://${bucket}.s3-accelerate.dualstack.amazonaws.com/${path}/index.html"
diff --git a/codecov.yml b/codecov.yml
deleted file mode 100644
index fcc1c6dece..0000000000
--- a/codecov.yml
+++ /dev/null
@@ -1,30 +0,0 @@
-codecov:
- notify:
- require_ci_to_pass: yes
- ci:
- - ci.mxnet.io
-
-coverage:
- precision: 2
- round: down
- range: "70...100"
-
- status:
- project: yes
- patch: yes
- changes: no
-
-parsers:
- gcov:
- branch_detection:
- conditional: yes
- loop: yes
- method: no
- macro: no
-
-comment:
- layout: "header, reach, diff, files"
- behavior: default
- require_changes: no
- require_base: no
- require_head: no
diff --git a/conftest.py b/conftest.py
deleted file mode 100644
index 8c9e442716..0000000000
--- a/conftest.py
+++ /dev/null
@@ -1,207 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-"""conftest.py contains configuration for pytest.
-
-Configuration file for tests in tests/ and scripts/ folders.
-
-Note that fixtures of higher-scoped fixtures (such as ``session``) are
-instantiated before lower-scoped fixtures (such as ``function``).
-
-"""
-
-import logging
-import os
-import random
-
-import numpy as np
-import mxnet as mx
-import gluonnlp
-import pytest
-
-
-def pytest_sessionfinish(session, exitstatus):
- if exitstatus == 5: # Don't fail if no tests were run
- session.exitstatus = 0
-
-
-# * Random seed setup
-def pytest_configure():
- """Pytest configuration hook to help reproduce test segfaults
-
- Sets and outputs rng seeds.
-
- The segfault-debug procedure on a module called test_module.py is:
-
- 1. run "pytest --verbose test_module.py". A seg-faulting output might be:
-
- [INFO] np, mx and python random seeds = 4018804151
- test_module.test1 ... ok
- test_module.test2 ... Illegal instruction (core dumped)
-
- 2. Copy the module-starting seed into the next command, then run:
-
- MXNET_MODULE_SEED=4018804151 pytest --log-level=DEBUG --verbose test_module.py
-
- Output might be:
-
- [WARNING] **** module-level seed is set: all tests running deterministically ****
- [INFO] np, mx and python random seeds = 4018804151
- test_module.test1 ... [DEBUG] np and mx random seeds = 3935862516
- ok
- test_module.test2 ... [DEBUG] np and mx random seeds = 1435005594
- Illegal instruction (core dumped)
-
- 3. Copy the segfaulting-test seed into the command:
- MXNET_TEST_SEED=1435005594 pytest --log-level=DEBUG --verbose test_module.py:test2
- Output might be:
-
- [INFO] np, mx and python random seeds = 2481884723
- test_module.test2 ... [DEBUG] np and mx random seeds = 1435005594
- Illegal instruction (core dumped)
-
- 3. Finally reproduce the segfault directly under gdb (might need additional os packages)
- by editing the bottom of test_module.py to be
-
- if __name__ == '__main__':
- logging.getLogger().setLevel(logging.DEBUG)
- test2()
-
- MXNET_TEST_SEED=1435005594 gdb -ex r --args python test_module.py
-
- 4. When finished debugging the segfault, remember to unset any exported MXNET_ seed
- variables in the environment to return to non-deterministic testing (a good thing).
- """
-
- module_seed_str = os.getenv('MXNET_MODULE_SEED')
- if module_seed_str is None:
- seed = np.random.randint(0, np.iinfo(np.int32).max)
- else:
- seed = int(module_seed_str)
- logging.warning('*** module-level seed is set: '
- 'all tests running deterministically ***')
- print('Setting module np/mx/python random seeds, '
- 'use MXNET_MODULE_SEED={} to reproduce.'.format(seed))
-
- np.random.seed(seed)
- mx.random.seed(seed)
- random.seed(seed)
-
- # The MXNET_TEST_SEED environment variable will override MXNET_MODULE_SEED for tests with
- # the 'with_seed()' decoration. Inform the user of this once here at the module level.
- if os.getenv('MXNET_TEST_SEED') is not None:
- logging.warning('*** test-level seed set: all "@with_seed()" '
- 'tests run deterministically ***')
-
-
-@pytest.hookimpl(tryfirst=True, hookwrapper=True)
-def pytest_runtest_makereport(item, call):
- """Make test outcome available to fixture.
-
- https://docs.pytest.org/en/latest/example/simple.html#making-test-result-information-available-in-fixtures
- """
- # execute all other hooks to obtain the report object
- outcome = yield
- rep = outcome.get_result()
-
- # set a report attribute for each phase of a call, which can
- # be "setup", "call", "teardown"
- setattr(item, "rep_" + rep.when, rep)
-
-
-@pytest.fixture(scope='function', autouse=True)
-def function_scope_seed(request):
- """A function scope fixture that manages rng seeds.
-
- This fixture automatically initializes the python, numpy and mxnet random
- number generators randomly on every test run.
-
- def test_ok_with_random_data():
- ...
-
- To fix the seed used for a test case mark the test function with the
- desired seed:
-
- @pytest.mark.seed(1)
- def test_not_ok_with_random_data():
- '''This testcase actually works.'''
- assert 17 == random.randint(0, 100)
-
- When a test fails, the fixture outputs the seed used. The user can then set
- the environment variable MXNET_TEST_SEED to the value reported, then rerun
- the test with:
-
- pytest --verbose -s -k
-
- To run a test repeatedly, install pytest-repeat and add the --count argument:
-
- pip install pytest-repeat
- pytest --verbose -s -k --count 1000
-
- """
-
- seed = request.node.get_closest_marker('seed')
- env_seed_str = os.getenv('MXNET_TEST_SEED')
-
- if seed is not None:
- seed = seed.args[0]
- assert isinstance(seed, int)
- elif env_seed_str is not None:
- seed = int(env_seed_str)
- else:
- seed = np.random.randint(0, np.iinfo(np.int32).max)
-
- post_test_state = np.random.get_state()
- np.random.seed(seed)
- mx.random.seed(seed)
- random.seed(seed)
-
- seed_message = ('np/mx/python random seeds are set to '
- '{}, use MXNET_TEST_SEED={} to reproduce.')
- seed_message = seed_message.format(seed, seed)
-
- # Always log seed on DEBUG log level. This makes sure we can find out the
- # value of the seed even if the test case causes a segfault and subsequent
- # teardown code is not run.
- logging.debug(seed_message)
-
- yield # run the test
-
- if request.node.rep_setup.failed:
- logging.info("Setting up a test failed: {}", request.node.nodeid)
- elif request.node.rep_call.outcome == 'failed':
- # Either request.node.rep_setup.failed or request.node.rep_setup.passed
- # should be True
- assert request.node.rep_setup.passed
- # On failure also log seed on INFO log level
- logging.info(seed_message)
-
- np.random.set_state(post_test_state)
-
-
-# * Shared test fixtures
-@pytest.fixture(params=[True, False])
-def hybridize(request):
- return request.param
-
-@pytest.fixture(autouse=True)
-def doctest(doctest_namespace):
- doctest_namespace['np'] = np
- doctest_namespace['gluonnlp'] = gluonnlp
- doctest_namespace['mx'] = mx
- doctest_namespace['gluon'] = mx.gluon
- import doctest
- doctest.ELLIPSIS_MARKER = '-etc-'
diff --git a/docs/.gitignore b/docs/.gitignore
deleted file mode 100644
index 8d834878f5..0000000000
--- a/docs/.gitignore
+++ /dev/null
@@ -1,5 +0,0 @@
-doxygen
-_build
-gen_modules
-tutorials
-doctrees
diff --git a/docs/.nojekyll b/docs/.nojekyll
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/docs/404.rst b/docs/404.rst
deleted file mode 100644
index 4b9385eae7..0000000000
--- a/docs/404.rst
+++ /dev/null
@@ -1,25 +0,0 @@
-:orphan:
-
-Page Not Found
---------------
-
-You stumbled upon a page that's making us scratch our brains right now. Before any of us panics,
-we'd like you to know that you are being redirected to a better known and cozy place, in just a few seconds.
-
-.. image:: _static/404.jpg
- :alt: Page Not Found
- :width: 60%
- :align: center
- :target: ./index.html
-
-.. raw:: html
-
-
diff --git a/docs/Doxyfile b/docs/Doxyfile
deleted file mode 100644
index 0a53e8d9e6..0000000000
--- a/docs/Doxyfile
+++ /dev/null
@@ -1,2353 +0,0 @@
-# Doxyfile 1.8.8
-
-# This file describes the settings to be used by the documentation system
-# doxygen (www.doxygen.org) for a project.
-#
-# All text after a double hash (##) is considered a comment and is placed in
-# front of the TAG it is preceding.
-#
-# All text after a single hash (#) is considered a comment and will be ignored.
-# The format is:
-# TAG = value [value, ...]
-# For lists, items can also be appended using:
-# TAG += value [value, ...]
-# Values that contain spaces should be placed between quotes (\" \").
-
-#---------------------------------------------------------------------------
-# Project related configuration options
-#---------------------------------------------------------------------------
-
-# This tag specifies the encoding used for all characters in the config file
-# that follow. The default is UTF-8 which is also the encoding used for all text
-# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv
-# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv
-# for the list of possible encodings.
-# The default value is: UTF-8.
-
-DOXYFILE_ENCODING = UTF-8
-
-# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by
-# double-quotes, unless you are using Doxywizard) that should identify the
-# project for which the documentation is generated. This name is used in the
-# title of most generated pages and in a few other places.
-# The default value is: My Project.
-
-PROJECT_NAME = "gluonvision"
-
-# The PROJECT_NUMBER tag can be used to enter a project or revision number. This
-# could be handy for archiving the generated documentation or if some version
-# control system is used.
-
-PROJECT_NUMBER =
-
-# Using the PROJECT_BRIEF tag one can provide an optional one line description
-# for a project that appears at the top of each page and should give viewer a
-# quick idea about the purpose of the project. Keep the description short.
-
-PROJECT_BRIEF =
-
-# With the PROJECT_LOGO tag one can specify an logo or icon that is included in
-# the documentation. The maximum height of the logo should not exceed 55 pixels
-# and the maximum width should not exceed 200 pixels. Doxygen will copy the logo
-# to the output directory.
-
-PROJECT_LOGO =
-
-# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path
-# into which the generated documentation will be written. If a relative path is
-# entered, it will be relative to the location where doxygen was started. If
-# left blank the current directory will be used.
-
-OUTPUT_DIRECTORY = docs/doxygen
-
-# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create 4096 sub-
-# directories (in 2 levels) under the output directory of each output format and
-# will distribute the generated files over these directories. Enabling this
-# option can be useful when feeding doxygen a huge amount of source files, where
-# putting all generated files in the same directory would otherwise causes
-# performance problems for the file system.
-# The default value is: NO.
-
-CREATE_SUBDIRS = NO
-
-# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII
-# characters to appear in the names of generated files. If set to NO, non-ASCII
-# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode
-# U+3044.
-# The default value is: NO.
-
-#ALLOW_UNICODE_NAMES = NO
-
-# The OUTPUT_LANGUAGE tag is used to specify the language in which all
-# documentation generated by doxygen is written. Doxygen will use this
-# information to generate all constant output in the proper language.
-# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese,
-# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States),
-# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian,
-# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages),
-# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian,
-# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian,
-# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish,
-# Ukrainian and Vietnamese.
-# The default value is: English.
-
-OUTPUT_LANGUAGE = English
-
-# If the BRIEF_MEMBER_DESC tag is set to YES doxygen will include brief member
-# descriptions after the members that are listed in the file and class
-# documentation (similar to Javadoc). Set to NO to disable this.
-# The default value is: YES.
-
-BRIEF_MEMBER_DESC = YES
-
-# If the REPEAT_BRIEF tag is set to YES doxygen will prepend the brief
-# description of a member or function before the detailed description
-#
-# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
-# brief descriptions will be completely suppressed.
-# The default value is: YES.
-
-REPEAT_BRIEF = YES
-
-# This tag implements a quasi-intelligent brief description abbreviator that is
-# used to form the text in various listings. Each string in this list, if found
-# as the leading text of the brief description, will be stripped from the text
-# and the result, after processing the whole list, is used as the annotated
-# text. Otherwise, the brief description is used as-is. If left blank, the
-# following values are used ($name is automatically replaced with the name of
-# the entity):The $name class, The $name widget, The $name file, is, provides,
-# specifies, contains, represents, a, an and the.
-
-ABBREVIATE_BRIEF =
-
-# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
-# doxygen will generate a detailed section even if there is only a brief
-# description.
-# The default value is: NO.
-
-ALWAYS_DETAILED_SEC = NO
-
-# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
-# inherited members of a class in the documentation of that class as if those
-# members were ordinary class members. Constructors, destructors and assignment
-# operators of the base classes will not be shown.
-# The default value is: NO.
-
-INLINE_INHERITED_MEMB = NO
-
-# If the FULL_PATH_NAMES tag is set to YES doxygen will prepend the full path
-# before files name in the file list and in the header files. If set to NO the
-# shortest path that makes the file name unique will be used
-# The default value is: YES.
-
-FULL_PATH_NAMES = YES
-
-# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path.
-# Stripping is only done if one of the specified strings matches the left-hand
-# part of the path. The tag can be used to show relative paths in the file list.
-# If left blank the directory from which doxygen is run is used as the path to
-# strip.
-#
-# Note that you can specify absolute paths here, but also relative paths, which
-# will be relative from the directory where doxygen is started.
-# This tag requires that the tag FULL_PATH_NAMES is set to YES.
-
-STRIP_FROM_PATH =
-
-# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the
-# path mentioned in the documentation of a class, which tells the reader which
-# header file to include in order to use a class. If left blank only the name of
-# the header file containing the class definition is used. Otherwise one should
-# specify the list of include paths that are normally passed to the compiler
-# using the -I flag.
-
-STRIP_FROM_INC_PATH =
-
-# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but
-# less readable) file names. This can be useful is your file systems doesn't
-# support long names like on DOS, Mac, or CD-ROM.
-# The default value is: NO.
-
-SHORT_NAMES = NO
-
-# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the
-# first line (until the first dot) of a Javadoc-style comment as the brief
-# description. If set to NO, the Javadoc-style will behave just like regular Qt-
-# style comments (thus requiring an explicit @brief command for a brief
-# description.)
-# The default value is: NO.
-
-JAVADOC_AUTOBRIEF = NO
-
-# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first
-# line (until the first dot) of a Qt-style comment as the brief description. If
-# set to NO, the Qt-style will behave just like regular Qt-style comments (thus
-# requiring an explicit \brief command for a brief description.)
-# The default value is: NO.
-
-QT_AUTOBRIEF = NO
-
-# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a
-# multi-line C++ special comment block (i.e. a block of //! or /// comments) as
-# a brief description. This used to be the default behavior. The new default is
-# to treat a multi-line C++ comment block as a detailed description. Set this
-# tag to YES if you prefer the old behavior instead.
-#
-# Note that setting this tag to YES also means that rational rose comments are
-# not recognized any more.
-# The default value is: NO.
-
-MULTILINE_CPP_IS_BRIEF = NO
-
-# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the
-# documentation from any documented member that it re-implements.
-# The default value is: YES.
-
-INHERIT_DOCS = YES
-
-# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce a
-# new page for each member. If set to NO, the documentation of a member will be
-# part of the file/class/namespace that contains it.
-# The default value is: NO.
-
-SEPARATE_MEMBER_PAGES = NO
-
-# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen
-# uses this value to replace tabs by spaces in code fragments.
-# Minimum value: 1, maximum value: 16, default value: 4.
-
-TAB_SIZE = 8
-
-# This tag can be used to specify a number of aliases that act as commands in
-# the documentation. An alias has the form:
-# name=value
-# For example adding
-# "sideeffect=@par Side Effects:\n"
-# will allow you to put the command \sideeffect (or @sideeffect) in the
-# documentation, which will result in a user-defined paragraph with heading
-# "Side Effects:". You can put \n's in the value part of an alias to insert
-# newlines.
-
-ALIASES =
-
-# This tag can be used to specify a number of word-keyword mappings (TCL only).
-# A mapping has the form "name=value". For example adding "class=itcl::class"
-# will allow you to use the command class in the itcl::class meaning.
-
-TCL_SUBST =
-
-# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources
-# only. Doxygen will then generate output that is more tailored for C. For
-# instance, some of the names that are used will be different. The list of all
-# members will be omitted, etc.
-# The default value is: NO.
-
-OPTIMIZE_OUTPUT_FOR_C = NO
-
-# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or
-# Python sources only. Doxygen will then generate output that is more tailored
-# for that language. For instance, namespaces will be presented as packages,
-# qualified scopes will look different, etc.
-# The default value is: NO.
-
-OPTIMIZE_OUTPUT_JAVA = NO
-
-# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
-# sources. Doxygen will then generate output that is tailored for Fortran.
-# The default value is: NO.
-
-OPTIMIZE_FOR_FORTRAN = NO
-
-# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
-# sources. Doxygen will then generate output that is tailored for VHDL.
-# The default value is: NO.
-
-OPTIMIZE_OUTPUT_VHDL = NO
-
-# Doxygen selects the parser to use depending on the extension of the files it
-# parses. With this tag you can assign which parser to use for a given
-# extension. Doxygen has a built-in mapping, but you can override or extend it
-# using this tag. The format is ext=language, where ext is a file extension, and
-# language is one of the parsers supported by doxygen: IDL, Java, Javascript,
-# C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran:
-# FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran:
-# Fortran. In the later case the parser tries to guess whether the code is fixed
-# or free formatted code, this is the default for Fortran type files), VHDL. For
-# instance to make doxygen treat .inc files as Fortran files (default is PHP),
-# and .f files as C (default is Fortran), use: inc=Fortran f=C.
-#
-# Note For files without extension you can use no_extension as a placeholder.
-#
-# Note that for custom extensions you also need to set FILE_PATTERNS otherwise
-# the files are not read by doxygen.
-
-EXTENSION_MAPPING =
-
-# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments
-# according to the Markdown format, which allows for more readable
-# documentation. See http://daringfireball.net/projects/markdown/ for details.
-# The output of markdown processing is further processed by doxygen, so you can
-# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in
-# case of backward compatibilities issues.
-# The default value is: YES.
-
-#MARKDOWN_SUPPORT = YES
-
-# When enabled doxygen tries to link words that correspond to documented
-# classes, or namespaces to their corresponding documentation. Such a link can
-# be prevented in individual cases by by putting a % sign in front of the word
-# or globally by setting AUTOLINK_SUPPORT to NO.
-# The default value is: YES.
-
-#AUTOLINK_SUPPORT = YES
-
-# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
-# to include (a tag file for) the STL sources as input, then you should set this
-# tag to YES in order to let doxygen match functions declarations and
-# definitions whose arguments contain STL classes (e.g. func(std::string);
-# versus func(std::string) {}). This also make the inheritance and collaboration
-# diagrams that involve STL classes more complete and accurate.
-# The default value is: NO.
-
-BUILTIN_STL_SUPPORT = NO
-
-# If you use Microsoft's C++/CLI language, you should set this option to YES to
-# enable parsing support.
-# The default value is: NO.
-
-CPP_CLI_SUPPORT = NO
-
-# Set the SIP_SUPPORT tag to YES if your project consists of sip (see:
-# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen
-# will parse them like normal C++ but will assume all classes use public instead
-# of private inheritance when no explicit protection keyword is present.
-# The default value is: NO.
-
-SIP_SUPPORT = NO
-
-# For Microsoft's IDL there are propget and propput attributes to indicate
-# getter and setter methods for a property. Setting this option to YES will make
-# doxygen to replace the get and set methods by a property in the documentation.
-# This will only work if the methods are indeed getting or setting a simple
-# type. If this is not the case, or you want to show the methods anyway, you
-# should set this option to NO.
-# The default value is: YES.
-
-IDL_PROPERTY_SUPPORT = YES
-
-# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
-# tag is set to YES, then doxygen will reuse the documentation of the first
-# member in the group (if any) for the other members of the group. By default
-# all members of a group must be documented explicitly.
-# The default value is: NO.
-
-DISTRIBUTE_GROUP_DOC = NO
-
-# Set the SUBGROUPING tag to YES to allow class member groups of the same type
-# (for instance a group of public functions) to be put as a subgroup of that
-# type (e.g. under the Public Functions section). Set it to NO to prevent
-# subgrouping. Alternatively, this can be done per class using the
-# \nosubgrouping command.
-# The default value is: YES.
-
-SUBGROUPING = YES
-
-# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions
-# are shown inside the group in which they are included (e.g. using \ingroup)
-# instead of on a separate page (for HTML and Man pages) or section (for LaTeX
-# and RTF).
-#
-# Note that this feature does not work in combination with
-# SEPARATE_MEMBER_PAGES.
-# The default value is: NO.
-
-INLINE_GROUPED_CLASSES = NO
-
-# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions
-# with only public data fields or simple typedef fields will be shown inline in
-# the documentation of the scope in which they are defined (i.e. file,
-# namespace, or group documentation), provided this scope is documented. If set
-# to NO, structs, classes, and unions are shown on a separate page (for HTML and
-# Man pages) or section (for LaTeX and RTF).
-# The default value is: NO.
-
-INLINE_SIMPLE_STRUCTS = NO
-
-# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or
-# enum is documented as struct, union, or enum with the name of the typedef. So
-# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
-# with name TypeT. When disabled the typedef will appear as a member of a file,
-# namespace, or class. And the struct will be named TypeS. This can typically be
-# useful for C code in case the coding convention dictates that all compound
-# types are typedef'ed and only the typedef is referenced, never the tag name.
-# The default value is: NO.
-
-TYPEDEF_HIDES_STRUCT = NO
-
-# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This
-# cache is used to resolve symbols given their name and scope. Since this can be
-# an expensive process and often the same symbol appears multiple times in the
-# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small
-# doxygen will become slower. If the cache is too large, memory is wasted. The
-# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range
-# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536
-# symbols. At the end of a run doxygen will report the cache usage and suggest
-# the optimal cache size from a speed point of view.
-# Minimum value: 0, maximum value: 9, default value: 0.
-
-LOOKUP_CACHE_SIZE = 0
-
-#---------------------------------------------------------------------------
-# Build related configuration options
-#---------------------------------------------------------------------------
-
-# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
-# documentation are documented, even if no documentation was available. Private
-# class members and static file members will be hidden unless the
-# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES.
-# Note: This will also disable the warnings about undocumented members that are
-# normally produced when WARNINGS is set to YES.
-# The default value is: NO.
-
-EXTRACT_ALL = YES
-
-# If the EXTRACT_PRIVATE tag is set to YES all private members of a class will
-# be included in the documentation.
-# The default value is: NO.
-
-EXTRACT_PRIVATE = NO
-
-# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal
-# scope will be included in the documentation.
-# The default value is: NO.
-
-#EXTRACT_PACKAGE = NO
-
-# If the EXTRACT_STATIC tag is set to YES all static members of a file will be
-# included in the documentation.
-# The default value is: NO.
-
-EXTRACT_STATIC = NO
-
-# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) defined
-# locally in source files will be included in the documentation. If set to NO
-# only classes defined in header files are included. Does not have any effect
-# for Java sources.
-# The default value is: YES.
-
-EXTRACT_LOCAL_CLASSES = YES
-
-# This flag is only useful for Objective-C code. When set to YES local methods,
-# which are defined in the implementation section but not in the interface are
-# included in the documentation. If set to NO only methods in the interface are
-# included.
-# The default value is: NO.
-
-EXTRACT_LOCAL_METHODS = NO
-
-# If this flag is set to YES, the members of anonymous namespaces will be
-# extracted and appear in the documentation as a namespace called
-# 'anonymous_namespace{file}', where file will be replaced with the base name of
-# the file that contains the anonymous namespace. By default anonymous namespace
-# are hidden.
-# The default value is: NO.
-
-EXTRACT_ANON_NSPACES = NO
-
-# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all
-# undocumented members inside documented classes or files. If set to NO these
-# members will be included in the various overviews, but no documentation
-# section is generated. This option has no effect if EXTRACT_ALL is enabled.
-# The default value is: NO.
-
-HIDE_UNDOC_MEMBERS = NO
-
-# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all
-# undocumented classes that are normally visible in the class hierarchy. If set
-# to NO these classes will be included in the various overviews. This option has
-# no effect if EXTRACT_ALL is enabled.
-# The default value is: NO.
-
-HIDE_UNDOC_CLASSES = NO
-
-# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend
-# (class|struct|union) declarations. If set to NO these declarations will be
-# included in the documentation.
-# The default value is: NO.
-
-HIDE_FRIEND_COMPOUNDS = NO
-
-# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any
-# documentation blocks found inside the body of a function. If set to NO these
-# blocks will be appended to the function's detailed documentation block.
-# The default value is: NO.
-
-HIDE_IN_BODY_DOCS = NO
-
-# The INTERNAL_DOCS tag determines if documentation that is typed after a
-# \internal command is included. If the tag is set to NO then the documentation
-# will be excluded. Set it to YES to include the internal documentation.
-# The default value is: NO.
-
-INTERNAL_DOCS = NO
-
-# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file
-# names in lower-case letters. If set to YES upper-case letters are also
-# allowed. This is useful if you have classes or files whose names only differ
-# in case and if your file system supports case sensitive file names. Windows
-# and Mac users are advised to set this option to NO.
-# The default value is: system dependent.
-
-CASE_SENSE_NAMES = YES
-
-# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with
-# their full class and namespace scopes in the documentation. If set to YES the
-# scope will be hidden.
-# The default value is: NO.
-
-HIDE_SCOPE_NAMES = NO
-
-# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of
-# the files that are included by a file in the documentation of that file.
-# The default value is: YES.
-
-SHOW_INCLUDE_FILES = YES
-
-# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each
-# grouped member an include statement to the documentation, telling the reader
-# which file to include in order to use the member.
-# The default value is: NO.
-
-#SHOW_GROUPED_MEMB_INC = NO
-
-# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include
-# files with double quotes in the documentation rather than with sharp brackets.
-# The default value is: NO.
-
-FORCE_LOCAL_INCLUDES = NO
-
-# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the
-# documentation for inline members.
-# The default value is: YES.
-
-INLINE_INFO = YES
-
-# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the
-# (detailed) documentation of file and class members alphabetically by member
-# name. If set to NO the members will appear in declaration order.
-# The default value is: YES.
-
-SORT_MEMBER_DOCS = YES
-
-# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief
-# descriptions of file, namespace and class members alphabetically by member
-# name. If set to NO the members will appear in declaration order. Note that
-# this will also influence the order of the classes in the class list.
-# The default value is: NO.
-
-SORT_BRIEF_DOCS = NO
-
-# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the
-# (brief and detailed) documentation of class members so that constructors and
-# destructors are listed first. If set to NO the constructors will appear in the
-# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS.
-# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief
-# member documentation.
-# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting
-# detailed member documentation.
-# The default value is: NO.
-
-SORT_MEMBERS_CTORS_1ST = NO
-
-# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy
-# of group names into alphabetical order. If set to NO the group names will
-# appear in their defined order.
-# The default value is: NO.
-
-SORT_GROUP_NAMES = NO
-
-# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by
-# fully-qualified names, including namespaces. If set to NO, the class list will
-# be sorted only by class name, not including the namespace part.
-# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
-# Note: This option applies only to the class list, not to the alphabetical
-# list.
-# The default value is: NO.
-
-SORT_BY_SCOPE_NAME = NO
-
-# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper
-# type resolution of all parameters of a function it will reject a match between
-# the prototype and the implementation of a member function even if there is
-# only one candidate or it is obvious which candidate to choose by doing a
-# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still
-# accept a match between prototype and implementation in such cases.
-# The default value is: NO.
-
-STRICT_PROTO_MATCHING = NO
-
-# The GENERATE_TODOLIST tag can be used to enable ( YES) or disable ( NO) the
-# todo list. This list is created by putting \todo commands in the
-# documentation.
-# The default value is: YES.
-
-GENERATE_TODOLIST = YES
-
-# The GENERATE_TESTLIST tag can be used to enable ( YES) or disable ( NO) the
-# test list. This list is created by putting \test commands in the
-# documentation.
-# The default value is: YES.
-
-GENERATE_TESTLIST = YES
-
-# The GENERATE_BUGLIST tag can be used to enable ( YES) or disable ( NO) the bug
-# list. This list is created by putting \bug commands in the documentation.
-# The default value is: YES.
-
-GENERATE_BUGLIST = YES
-
-# The GENERATE_DEPRECATEDLIST tag can be used to enable ( YES) or disable ( NO)
-# the deprecated list. This list is created by putting \deprecated commands in
-# the documentation.
-# The default value is: YES.
-
-GENERATE_DEPRECATEDLIST= YES
-
-# The ENABLED_SECTIONS tag can be used to enable conditional documentation
-# sections, marked by \if ... \endif and \cond
-# ... \endcond blocks.
-
-ENABLED_SECTIONS =
-
-# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the
-# initial value of a variable or macro / define can have for it to appear in the
-# documentation. If the initializer consists of more lines than specified here
-# it will be hidden. Use a value of 0 to hide initializers completely. The
-# appearance of the value of individual variables and macros / defines can be
-# controlled using \showinitializer or \hideinitializer command in the
-# documentation regardless of this setting.
-# Minimum value: 0, maximum value: 10000, default value: 30.
-
-MAX_INITIALIZER_LINES = 30
-
-# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at
-# the bottom of the documentation of classes and structs. If set to YES the list
-# will mention the files that were used to generate the documentation.
-# The default value is: YES.
-
-SHOW_USED_FILES = YES
-
-# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This
-# will remove the Files entry from the Quick Index and from the Folder Tree View
-# (if specified).
-# The default value is: YES.
-
-SHOW_FILES = YES
-
-# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces
-# page. This will remove the Namespaces entry from the Quick Index and from the
-# Folder Tree View (if specified).
-# The default value is: YES.
-
-SHOW_NAMESPACES = YES
-
-# The FILE_VERSION_FILTER tag can be used to specify a program or script that
-# doxygen should invoke to get the current version for each file (typically from
-# the version control system). Doxygen will invoke the program by executing (via
-# popen()) the command command input-file, where command is the value of the
-# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided
-# by doxygen. Whatever the program writes to standard output is used as the file
-# version. For an example see the documentation.
-
-FILE_VERSION_FILTER =
-
-# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
-# by doxygen. The layout file controls the global structure of the generated
-# output files in an output format independent way. To create the layout file
-# that represents doxygen's defaults, run doxygen with the -l option. You can
-# optionally specify a file name after the option, if omitted DoxygenLayout.xml
-# will be used as the name of the layout file.
-#
-# Note that if you run doxygen from a directory containing a file called
-# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE
-# tag is left empty.
-
-LAYOUT_FILE =
-
-# The CITE_BIB_FILES tag can be used to specify one or more bib files containing
-# the reference definitions. This must be a list of .bib files. The .bib
-# extension is automatically appended if omitted. This requires the bibtex tool
-# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info.
-# For LaTeX the style of the bibliography can be controlled using
-# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the
-# search path. See also \cite for info how to create references.
-
-CITE_BIB_FILES =
-
-#---------------------------------------------------------------------------
-# Configuration options related to warning and progress messages
-#---------------------------------------------------------------------------
-
-# The QUIET tag can be used to turn on/off the messages that are generated to
-# standard output by doxygen. If QUIET is set to YES this implies that the
-# messages are off.
-# The default value is: NO.
-
-QUIET = NO
-
-# The WARNINGS tag can be used to turn on/off the warning messages that are
-# generated to standard error ( stderr) by doxygen. If WARNINGS is set to YES
-# this implies that the warnings are on.
-#
-# Tip: Turn warnings on while writing the documentation.
-# The default value is: YES.
-
-WARNINGS = YES
-
-# If the WARN_IF_UNDOCUMENTED tag is set to YES, then doxygen will generate
-# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag
-# will automatically be disabled.
-# The default value is: YES.
-
-WARN_IF_UNDOCUMENTED = YES
-
-# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for
-# potential errors in the documentation, such as not documenting some parameters
-# in a documented function, or documenting parameters that don't exist or using
-# markup commands wrongly.
-# The default value is: YES.
-
-WARN_IF_DOC_ERROR = YES
-
-# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that
-# are documented, but have no documentation for their parameters or return
-# value. If set to NO doxygen will only warn about wrong or incomplete parameter
-# documentation, but not about the absence of documentation.
-# The default value is: NO.
-
-WARN_NO_PARAMDOC = YES
-
-# The WARN_FORMAT tag determines the format of the warning messages that doxygen
-# can produce. The string should contain the $file, $line, and $text tags, which
-# will be replaced by the file and line number from which the warning originated
-# and the warning text. Optionally the format may contain $version, which will
-# be replaced by the version of the file (if it could be obtained via
-# FILE_VERSION_FILTER)
-# The default value is: $file:$line: $text.
-
-WARN_FORMAT = "$file:$line: $text"
-
-# The WARN_LOGFILE tag can be used to specify a file to which warning and error
-# messages should be written. If left blank the output is written to standard
-# error (stderr).
-
-WARN_LOGFILE =
-
-#---------------------------------------------------------------------------
-# Configuration options related to the input files
-#---------------------------------------------------------------------------
-
-# The INPUT tag is used to specify the files and/or directories that contain
-# documented source files. You may enter file names like myfile.cpp or
-# directories like /usr/src/myproject. Separate the files or directories with
-# spaces.
-# Note: If this tag is empty the current directory is searched.
-
-INPUT = include/nnvm
-
-# This tag can be used to specify the character encoding of the source files
-# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
-# libiconv (or the iconv built into libc) for the transcoding. See the libiconv
-# documentation (see: http://www.gnu.org/software/libiconv) for the list of
-# possible encodings.
-# The default value is: UTF-8.
-
-INPUT_ENCODING = UTF-8
-
-# If the value of the INPUT tag contains directories, you can use the
-# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and
-# *.h) to filter out the source-files in the directories. If left blank the
-# following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii,
-# *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp,
-# *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown,
-# *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf,
-# *.qsf, *.as and *.js.
-
-FILE_PATTERNS = *.h
-
-# The RECURSIVE tag can be used to specify whether or not subdirectories should
-# be searched for input files as well.
-# The default value is: NO.
-
-RECURSIVE = YES
-
-# The EXCLUDE tag can be used to specify files and/or directories that should be
-# excluded from the INPUT source files. This way you can easily exclude a
-# subdirectory from a directory tree whose root is specified with the INPUT tag.
-#
-# Note that relative paths are relative to the directory from which doxygen is
-# run.
-
-EXCLUDE =
-
-# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
-# directories that are symbolic links (a Unix file system feature) are excluded
-# from the input.
-# The default value is: NO.
-
-EXCLUDE_SYMLINKS = NO
-
-# If the value of the INPUT tag contains directories, you can use the
-# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
-# certain files from those directories.
-#
-# Note that the wildcards are matched against the file with absolute path, so to
-# exclude all test directories for example use the pattern */test/*
-
-EXCLUDE_PATTERNS = */test/* \
- logging.h
-
-# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
-# (namespaces, classes, functions, etc.) that should be excluded from the
-# output. The symbol name can be a fully qualified name, a word, or if the
-# wildcard * is used, a substring. Examples: ANamespace, AClass,
-# AClass::ANamespace, ANamespace::*Test
-#
-# Note that the wildcards are matched against the file with absolute path, so to
-# exclude all test directories use the pattern */test/*
-
-EXCLUDE_SYMBOLS =
-
-# The EXAMPLE_PATH tag can be used to specify one or more files or directories
-# that contain example code fragments that are included (see the \include
-# command).
-
-EXAMPLE_PATH =
-
-# If the value of the EXAMPLE_PATH tag contains directories, you can use the
-# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and
-# *.h) to filter out the source-files in the directories. If left blank all
-# files are included.
-
-EXAMPLE_PATTERNS =
-
-# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
-# searched for input files to be used with the \include or \dontinclude commands
-# irrespective of the value of the RECURSIVE tag.
-# The default value is: NO.
-
-EXAMPLE_RECURSIVE = NO
-
-# The IMAGE_PATH tag can be used to specify one or more files or directories
-# that contain images that are to be included in the documentation (see the
-# \image command).
-
-IMAGE_PATH =
-
-# The INPUT_FILTER tag can be used to specify a program that doxygen should
-# invoke to filter for each input file. Doxygen will invoke the filter program
-# by executing (via popen()) the command:
-#
-#
-#
-# where is the value of the INPUT_FILTER tag, and is the
-# name of an input file. Doxygen will then use the output that the filter
-# program writes to standard output. If FILTER_PATTERNS is specified, this tag
-# will be ignored.
-#
-# Note that the filter must not add or remove lines; it is applied before the
-# code is scanned, but not when the output code is generated. If lines are added
-# or removed, the anchors will not be placed correctly.
-
-INPUT_FILTER =
-
-# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
-# basis. Doxygen will compare the file name with each pattern and apply the
-# filter if there is a match. The filters are a list of the form: pattern=filter
-# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how
-# filters are used. If the FILTER_PATTERNS tag is empty or if none of the
-# patterns match the file name, INPUT_FILTER is applied.
-
-FILTER_PATTERNS =
-
-# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
-# INPUT_FILTER ) will also be used to filter the input files that are used for
-# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES).
-# The default value is: NO.
-
-FILTER_SOURCE_FILES = NO
-
-# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
-# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and
-# it is also possible to disable source filtering for a specific pattern using
-# *.ext= (so without naming a filter).
-# This tag requires that the tag FILTER_SOURCE_FILES is set to YES.
-
-FILTER_SOURCE_PATTERNS =
-
-# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that
-# is part of the input, its contents will be placed on the main page
-# (index.html). This can be useful if you have a project on for instance GitHub
-# and want to reuse the introduction page also for the doxygen output.
-
-#USE_MDFILE_AS_MAINPAGE =
-
-#---------------------------------------------------------------------------
-# Configuration options related to source browsing
-#---------------------------------------------------------------------------
-
-# If the SOURCE_BROWSER tag is set to YES then a list of source files will be
-# generated. Documented entities will be cross-referenced with these sources.
-#
-# Note: To get rid of all source code in the generated output, make sure that
-# also VERBATIM_HEADERS is set to NO.
-# The default value is: NO.
-
-SOURCE_BROWSER = NO
-
-# Setting the INLINE_SOURCES tag to YES will include the body of functions,
-# classes and enums directly into the documentation.
-# The default value is: NO.
-
-INLINE_SOURCES = NO
-
-# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any
-# special comment blocks from generated source code fragments. Normal C, C++ and
-# Fortran comments will always remain visible.
-# The default value is: YES.
-
-STRIP_CODE_COMMENTS = YES
-
-# If the REFERENCED_BY_RELATION tag is set to YES then for each documented
-# function all documented functions referencing it will be listed.
-# The default value is: NO.
-
-REFERENCED_BY_RELATION = NO
-
-# If the REFERENCES_RELATION tag is set to YES then for each documented function
-# all documented entities called/used by that function will be listed.
-# The default value is: NO.
-
-REFERENCES_RELATION = NO
-
-# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set
-# to YES, then the hyperlinks from functions in REFERENCES_RELATION and
-# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will
-# link to the documentation.
-# The default value is: YES.
-
-REFERENCES_LINK_SOURCE = YES
-
-# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the
-# source code will show a tooltip with additional information such as prototype,
-# brief description and links to the definition and documentation. Since this
-# will make the HTML file larger and loading of large files a bit slower, you
-# can opt to disable this feature.
-# The default value is: YES.
-# This tag requires that the tag SOURCE_BROWSER is set to YES.
-
-#SOURCE_TOOLTIPS = YES
-
-# If the USE_HTAGS tag is set to YES then the references to source code will
-# point to the HTML generated by the htags(1) tool instead of doxygen built-in
-# source browser. The htags tool is part of GNU's global source tagging system
-# (see http://www.gnu.org/software/global/global.html). You will need version
-# 4.8.6 or higher.
-#
-# To use it do the following:
-# - Install the latest version of global
-# - Enable SOURCE_BROWSER and USE_HTAGS in the config file
-# - Make sure the INPUT points to the root of the source tree
-# - Run doxygen as normal
-#
-# Doxygen will invoke htags (and that will in turn invoke gtags), so these
-# tools must be available from the command line (i.e. in the search path).
-#
-# The result: instead of the source browser generated by doxygen, the links to
-# source code will now point to the output of htags.
-# The default value is: NO.
-# This tag requires that the tag SOURCE_BROWSER is set to YES.
-
-USE_HTAGS = NO
-
-# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a
-# verbatim copy of the header file for each class for which an include is
-# specified. Set to NO to disable this.
-# See also: Section \class.
-# The default value is: YES.
-
-VERBATIM_HEADERS = YES
-
-# If the CLANG_ASSISTED_PARSING tag is set to YES, then doxygen will use the
-# clang parser (see: http://clang.llvm.org/) for more accurate parsing at the
-# cost of reduced performance. This can be particularly helpful with template
-# rich C++ code for which doxygen's built-in parser lacks the necessary type
-# information.
-# Note: The availability of this option depends on whether or not doxygen was
-# compiled with the --with-libclang option.
-# The default value is: NO.
-
-#CLANG_ASSISTED_PARSING = NO
-
-# If clang assisted parsing is enabled you can provide the compiler with command
-# line options that you would normally use when invoking the compiler. Note that
-# the include paths will already be set by doxygen for the files and directories
-# specified with INPUT and INCLUDE_PATH.
-# This tag requires that the tag CLANG_ASSISTED_PARSING is set to YES.
-
-#CLANG_OPTIONS =
-
-#---------------------------------------------------------------------------
-# Configuration options related to the alphabetical class index
-#---------------------------------------------------------------------------
-
-# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all
-# compounds will be generated. Enable this if the project contains a lot of
-# classes, structs, unions or interfaces.
-# The default value is: YES.
-
-ALPHABETICAL_INDEX = YES
-
-# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in
-# which the alphabetical index list will be split.
-# Minimum value: 1, maximum value: 20, default value: 5.
-# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
-
-COLS_IN_ALPHA_INDEX = 5
-
-# In case all classes in a project start with a common prefix, all classes will
-# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag
-# can be used to specify a prefix (or a list of prefixes) that should be ignored
-# while generating the index headers.
-# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
-
-IGNORE_PREFIX =
-
-#---------------------------------------------------------------------------
-# Configuration options related to the HTML output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_HTML tag is set to YES doxygen will generate HTML output
-# The default value is: YES.
-
-GENERATE_HTML = YES
-
-# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a
-# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
-# it.
-# The default directory is: html.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_OUTPUT = html
-
-# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each
-# generated HTML page (for example: .htm, .php, .asp).
-# The default value is: .html.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_FILE_EXTENSION = .html
-
-# The HTML_HEADER tag can be used to specify a user-defined HTML header file for
-# each generated HTML page. If the tag is left blank doxygen will generate a
-# standard header.
-#
-# To get valid HTML the header file that includes any scripts and style sheets
-# that doxygen needs, which is dependent on the configuration options used (e.g.
-# the setting GENERATE_TREEVIEW). It is highly recommended to start with a
-# default header using
-# doxygen -w html new_header.html new_footer.html new_stylesheet.css
-# YourConfigFile
-# and then modify the file new_header.html. See also section "Doxygen usage"
-# for information on how to generate the default header that doxygen normally
-# uses.
-# Note: The header is subject to change so you typically have to regenerate the
-# default header when upgrading to a newer version of doxygen. For a description
-# of the possible markers and block names see the documentation.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_HEADER =
-
-# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each
-# generated HTML page. If the tag is left blank doxygen will generate a standard
-# footer. See HTML_HEADER for more information on how to generate a default
-# footer and what special commands can be used inside the footer. See also
-# section "Doxygen usage" for information on how to generate the default footer
-# that doxygen normally uses.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_FOOTER =
-
-# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style
-# sheet that is used by each HTML page. It can be used to fine-tune the look of
-# the HTML output. If left blank doxygen will generate a default style sheet.
-# See also section "Doxygen usage" for information on how to generate the style
-# sheet that doxygen normally uses.
-# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as
-# it is more robust and this tag (HTML_STYLESHEET) will in the future become
-# obsolete.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_STYLESHEET =
-
-# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined
-# cascading style sheets that are included after the standard style sheets
-# created by doxygen. Using this option one can overrule certain style aspects.
-# This is preferred over using HTML_STYLESHEET since it does not replace the
-# standard style sheet and is therefor more robust against future updates.
-# Doxygen will copy the style sheet files to the output directory.
-# Note: The order of the extra stylesheet files is of importance (e.g. the last
-# stylesheet in the list overrules the setting of the previous ones in the
-# list). For an example see the documentation.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-#HTML_EXTRA_STYLESHEET =
-
-# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
-# other source files which should be copied to the HTML output directory. Note
-# that these files will be copied to the base HTML output directory. Use the
-# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
-# files. In the HTML_STYLESHEET file, use the file name only. Also note that the
-# files will be copied as-is; there are no commands or markers available.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_EXTRA_FILES =
-
-# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
-# will adjust the colors in the stylesheet and background images according to
-# this color. Hue is specified as an angle on a colorwheel, see
-# http://en.wikipedia.org/wiki/Hue for more information. For instance the value
-# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300
-# purple, and 360 is red again.
-# Minimum value: 0, maximum value: 359, default value: 220.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_COLORSTYLE_HUE = 220
-
-# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors
-# in the HTML output. For a value of 0 the output will use grayscales only. A
-# value of 255 will produce the most vivid colors.
-# Minimum value: 0, maximum value: 255, default value: 100.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_COLORSTYLE_SAT = 100
-
-# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the
-# luminance component of the colors in the HTML output. Values below 100
-# gradually make the output lighter, whereas values above 100 make the output
-# darker. The value divided by 100 is the actual gamma applied, so 80 represents
-# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not
-# change the gamma.
-# Minimum value: 40, maximum value: 240, default value: 80.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_COLORSTYLE_GAMMA = 80
-
-# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
-# page will contain the date and time when the page was generated. Setting this
-# to NO can help when comparing the output of multiple runs.
-# The default value is: YES.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_TIMESTAMP = YES
-
-# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
-# documentation will contain sections that can be hidden and shown after the
-# page has loaded.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-HTML_DYNAMIC_SECTIONS = NO
-
-# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries
-# shown in the various tree structured indices initially; the user can expand
-# and collapse entries dynamically later on. Doxygen will expand the tree to
-# such a level that at most the specified number of entries are visible (unless
-# a fully collapsed tree already exceeds this amount). So setting the number of
-# entries 1 will produce a full collapsed tree by default. 0 is a special value
-# representing an infinite number of entries and will result in a full expanded
-# tree by default.
-# Minimum value: 0, maximum value: 9999, default value: 100.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-#HTML_INDEX_NUM_ENTRIES = 100
-
-# If the GENERATE_DOCSET tag is set to YES, additional index files will be
-# generated that can be used as input for Apple's Xcode 3 integrated development
-# environment (see: http://developer.apple.com/tools/xcode/), introduced with
-# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a
-# Makefile in the HTML output directory. Running make will produce the docset in
-# that directory and running make install will install the docset in
-# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at
-# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
-# for more information.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-GENERATE_DOCSET = NO
-
-# This tag determines the name of the docset feed. A documentation feed provides
-# an umbrella under which multiple documentation sets from a single provider
-# (such as a company or product suite) can be grouped.
-# The default value is: Doxygen generated docs.
-# This tag requires that the tag GENERATE_DOCSET is set to YES.
-
-DOCSET_FEEDNAME = "Doxygen generated docs"
-
-# This tag specifies a string that should uniquely identify the documentation
-# set bundle. This should be a reverse domain-name style string, e.g.
-# com.mycompany.MyDocSet. Doxygen will append .docset to the name.
-# The default value is: org.doxygen.Project.
-# This tag requires that the tag GENERATE_DOCSET is set to YES.
-
-DOCSET_BUNDLE_ID = org.doxygen.Project
-
-# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify
-# the documentation publisher. This should be a reverse domain-name style
-# string, e.g. com.mycompany.MyDocSet.documentation.
-# The default value is: org.doxygen.Publisher.
-# This tag requires that the tag GENERATE_DOCSET is set to YES.
-
-DOCSET_PUBLISHER_ID = org.doxygen.Publisher
-
-# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher.
-# The default value is: Publisher.
-# This tag requires that the tag GENERATE_DOCSET is set to YES.
-
-DOCSET_PUBLISHER_NAME = Publisher
-
-# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three
-# additional HTML index files: index.hhp, index.hhc, and index.hhk. The
-# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop
-# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on
-# Windows.
-#
-# The HTML Help Workshop contains a compiler that can convert all HTML output
-# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML
-# files are now used as the Windows 98 help format, and will replace the old
-# Windows help format (.hlp) on all Windows platforms in the future. Compressed
-# HTML files also contain an index, a table of contents, and you can search for
-# words in the documentation. The HTML workshop also contains a viewer for
-# compressed HTML files.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-GENERATE_HTMLHELP = NO
-
-# The CHM_FILE tag can be used to specify the file name of the resulting .chm
-# file. You can add a path in front of the file if the result should not be
-# written to the html output directory.
-# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
-
-CHM_FILE =
-
-# The HHC_LOCATION tag can be used to specify the location (absolute path
-# including file name) of the HTML help compiler ( hhc.exe). If non-empty
-# doxygen will try to run the HTML help compiler on the generated index.hhp.
-# The file has to be specified with full path.
-# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
-
-HHC_LOCATION =
-
-# The GENERATE_CHI flag controls if a separate .chi index file is generated (
-# YES) or that it should be included in the master .chm file ( NO).
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
-
-GENERATE_CHI = NO
-
-# The CHM_INDEX_ENCODING is used to encode HtmlHelp index ( hhk), content ( hhc)
-# and project file content.
-# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
-
-CHM_INDEX_ENCODING =
-
-# The BINARY_TOC flag controls whether a binary table of contents is generated (
-# YES) or a normal table of contents ( NO) in the .chm file. Furthermore it
-# enables the Previous and Next buttons.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
-
-BINARY_TOC = NO
-
-# The TOC_EXPAND flag can be set to YES to add extra items for group members to
-# the table of contents of the HTML help documentation and to the tree view.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
-
-TOC_EXPAND = NO
-
-# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
-# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that
-# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help
-# (.qch) of the generated HTML documentation.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-GENERATE_QHP = NO
-
-# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify
-# the file name of the resulting .qch file. The path specified is relative to
-# the HTML output folder.
-# This tag requires that the tag GENERATE_QHP is set to YES.
-
-QCH_FILE =
-
-# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help
-# Project output. For more information please see Qt Help Project / Namespace
-# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace).
-# The default value is: org.doxygen.Project.
-# This tag requires that the tag GENERATE_QHP is set to YES.
-
-QHP_NAMESPACE = org.doxygen.Project
-
-# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt
-# Help Project output. For more information please see Qt Help Project / Virtual
-# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual-
-# folders).
-# The default value is: doc.
-# This tag requires that the tag GENERATE_QHP is set to YES.
-
-QHP_VIRTUAL_FOLDER = doc
-
-# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom
-# filter to add. For more information please see Qt Help Project / Custom
-# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
-# filters).
-# This tag requires that the tag GENERATE_QHP is set to YES.
-
-QHP_CUST_FILTER_NAME =
-
-# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the
-# custom filter to add. For more information please see Qt Help Project / Custom
-# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
-# filters).
-# This tag requires that the tag GENERATE_QHP is set to YES.
-
-QHP_CUST_FILTER_ATTRS =
-
-# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
-# project's filter section matches. Qt Help Project / Filter Attributes (see:
-# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes).
-# This tag requires that the tag GENERATE_QHP is set to YES.
-
-QHP_SECT_FILTER_ATTRS =
-
-# The QHG_LOCATION tag can be used to specify the location of Qt's
-# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the
-# generated .qhp file.
-# This tag requires that the tag GENERATE_QHP is set to YES.
-
-QHG_LOCATION =
-
-# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be
-# generated, together with the HTML files, they form an Eclipse help plugin. To
-# install this plugin and make it available under the help contents menu in
-# Eclipse, the contents of the directory containing the HTML and XML files needs
-# to be copied into the plugins directory of eclipse. The name of the directory
-# within the plugins directory should be the same as the ECLIPSE_DOC_ID value.
-# After copying Eclipse needs to be restarted before the help appears.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-GENERATE_ECLIPSEHELP = NO
-
-# A unique identifier for the Eclipse help plugin. When installing the plugin
-# the directory name containing the HTML and XML files should also have this
-# name. Each documentation set should have its own identifier.
-# The default value is: org.doxygen.Project.
-# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES.
-
-ECLIPSE_DOC_ID = org.doxygen.Project
-
-# If you want full control over the layout of the generated HTML pages it might
-# be necessary to disable the index and replace it with your own. The
-# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top
-# of each HTML page. A value of NO enables the index and the value YES disables
-# it. Since the tabs in the index contain the same information as the navigation
-# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-DISABLE_INDEX = NO
-
-# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
-# structure should be generated to display hierarchical information. If the tag
-# value is set to YES, a side panel will be generated containing a tree-like
-# index structure (just like the one that is generated for HTML Help). For this
-# to work a browser that supports JavaScript, DHTML, CSS and frames is required
-# (i.e. any modern browser). Windows users are probably better off using the
-# HTML help feature. Via custom stylesheets (see HTML_EXTRA_STYLESHEET) one can
-# further fine-tune the look of the index. As an example, the default style
-# sheet generated by doxygen has an example that shows how to put an image at
-# the root of the tree instead of the PROJECT_NAME. Since the tree basically has
-# the same information as the tab index, you could consider setting
-# DISABLE_INDEX to YES when enabling this option.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-GENERATE_TREEVIEW = NO
-
-# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that
-# doxygen will group on one line in the generated HTML documentation.
-#
-# Note that a value of 0 will completely suppress the enum values from appearing
-# in the overview section.
-# Minimum value: 0, maximum value: 20, default value: 4.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-ENUM_VALUES_PER_LINE = 4
-
-# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used
-# to set the initial width (in pixels) of the frame in which the tree is shown.
-# Minimum value: 0, maximum value: 1500, default value: 250.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-TREEVIEW_WIDTH = 250
-
-# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open links to
-# external symbols imported via tag files in a separate window.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-EXT_LINKS_IN_WINDOW = NO
-
-# Use this tag to change the font size of LaTeX formulas included as images in
-# the HTML documentation. When you change the font size after a successful
-# doxygen run you need to manually remove any form_*.png images from the HTML
-# output directory to force them to be regenerated.
-# Minimum value: 8, maximum value: 50, default value: 10.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-FORMULA_FONTSIZE = 10
-
-# Use the FORMULA_TRANPARENT tag to determine whether or not the images
-# generated for formulas are transparent PNGs. Transparent PNGs are not
-# supported properly for IE 6.0, but are supported on all modern browsers.
-#
-# Note that when changing this option you need to delete any form_*.png files in
-# the HTML output directory before the changes have effect.
-# The default value is: YES.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-FORMULA_TRANSPARENT = YES
-
-# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see
-# http://www.mathjax.org) which uses client side Javascript for the rendering
-# instead of using prerendered bitmaps. Use this if you do not have LaTeX
-# installed or if you want to formulas look prettier in the HTML output. When
-# enabled you may also need to install MathJax separately and configure the path
-# to it using the MATHJAX_RELPATH option.
-# The default value is: NO.
-# This tag requires that the tag GENERATE_HTML is set to YES.
-
-USE_MATHJAX = NO
-
-# When MathJax is enabled you can set the default output format to be used for
-# the MathJax output. See the MathJax site (see:
-# http://docs.mathjax.org/en/latest/output.html) for more details.
-# Possible values are: HTML-CSS (which is slower, but has the best
-# compatibility), NativeMML (i.e. MathML) and SVG.
-# The default value is: HTML-CSS.
-# This tag requires that the tag USE_MATHJAX is set to YES.
-
-#MATHJAX_FORMAT = HTML-CSS
-
-# When MathJax is enabled you need to specify the location relative to the HTML
-# output directory using the MATHJAX_RELPATH option. The destination directory
-# should contain the MathJax.js script. For instance, if the mathjax directory
-# is located at the same level as the HTML output directory, then
-# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax
-# Content Delivery Network so you can quickly see the result without installing
-# MathJax. However, it is strongly recommended to install a local copy of
-# MathJax from http://www.mathjax.org before deployment.
-# The default value is: http://cdn.mathjax.org/mathjax/latest.
-# This tag requires that the tag USE_MATHJAX is set to YES.
-
-MATHJAX_RELPATH = http://www.mathjax.org/mathjax
-
-# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax
-# extension names that should be enabled during MathJax rendering. For example
-# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
-# This tag requires that the tag USE_MATHJAX is set to YES.
-
-MATHJAX_EXTENSIONS =
-
-# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces
-# of code that will be used on startup of the MathJax code. See the MathJax site
-# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an
-# example see the documentation.
-# This tag requires that the tag USE_MATHJAX is set to YES.
-
-#MATHJAX_CODEFILE =
-
-# When the SEARCHENGINE tag is enabled doxygen will generate a search box for
-# the HTML output. The underlying search engine uses javascript and DHTML and
-# should work on any modern browser. Note that when using HTML help
-# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET)
-# there is already a search function so this one should typically be disabled.
-# For large projects the javascript based search engine can be slow, then
-# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to
-# search using the keyboard; to jump to the search box use + S
-# (what the is depends on the OS and browser, but it is typically
-# , /