From 6a7f28bc1c709a34c99f341d6092783fc25395ab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mi=C5=A1o=20Belica?= Date: Sat, 7 Mar 2020 20:25:12 +0100 Subject: [PATCH] Try to fix tests for Python 2.7 --- .travis.yml | 4 +++- tests/test_tokenizers.py | 6 +++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index 68dd03b7..832001bb 100644 --- a/.travis.yml +++ b/.travis.yml @@ -6,6 +6,8 @@ matrix: include: - name: "Python 2.7 on Linux" python: 2.7 + # use "JPype1==0.7.1" (for kolnpy) because of Python 2 support + install: pip install "JPype1==0.7.1" - name: "Python 3.5 on Linux" python: 3.5 - name: "Python 3.6 on Linux" @@ -51,7 +53,7 @@ before_install: install: - pip install -U pip wheel setuptools - python setup.py install - - pip install -U numpy tinysegmenter jieba konlpy + - pip install numpy tinysegmenter jieba konlpy - python -c "import nltk; nltk.download('punkt')" - pip install -U pytest codecov pytest-cov diff --git a/tests/test_tokenizers.py b/tests/test_tokenizers.py index f63dc42a..29cb6827 100644 --- a/tests/test_tokenizers.py +++ b/tests/test_tokenizers.py @@ -2,6 +2,8 @@ from __future__ import absolute_import, division, print_function, unicode_literals +import sys + import pytest from sumy.nlp.tokenizers import Tokenizer @@ -55,10 +57,11 @@ def test_language_getter(): "好用的文档自动化摘要程序", ("好用", "的", "文档", "自动化", "摘要", "程序"), ), - ( + pytest.param( "korean", "대학에서 DB, 통계학, 이산수학 등을 배웠지만...", ("대학", "통계학", "이산", "이산수학", "수학", "등"), + marks=pytest.mark.skipif(sys.version_info < (3,), reason="JPype1 from konlpy does not support Python 2 anymore") ), ]) def test_tokenize_sentence_to_words(language, sentence, expected_words): @@ -137,6 +140,7 @@ def test_tokenize_chinese_paragraph(): assert expected == tokenizer.to_sentences(paragraph) +@pytest.mark.skipif(sys.version_info < (3,), reason="JPype1 from konlpy does not support Python 2 anymore") def test_tokenize_korean_paragraph(): tokenizer = Tokenizer('korean') expected = (