From dd784e7ef06f69be2c033fdb65ee9a285b7eac0c Mon Sep 17 00:00:00 2001 From: Anton Rubin Date: Thu, 15 Aug 2024 13:54:30 +0100 Subject: [PATCH 1/7] Adding dictionary decompounder docs #7979 Signed-off-by: Anton Rubin --- .../token-filters/dictionary-decompounder.md | 99 +++++++++++++++++++ _analyzers/token-filters/index.md | 2 +- 2 files changed, 100 insertions(+), 1 deletion(-) create mode 100644 _analyzers/token-filters/dictionary-decompounder.md diff --git a/_analyzers/token-filters/dictionary-decompounder.md b/_analyzers/token-filters/dictionary-decompounder.md new file mode 100644 index 0000000000..f99dd7586d --- /dev/null +++ b/_analyzers/token-filters/dictionary-decompounder.md @@ -0,0 +1,99 @@ +--- +layout: default +title: Dictionary decompounder +parent: Token filters +nav_order: 110 +--- + +# Dictionary decompounder token filter + +The `dictionary_decompounder` token filter in OpenSearch is used to split compound words into their constituent parts based on a predefined dictionary. This filter is particularly useful in languages like German, Dutch, or Finnish, where compound words are common, and breaking them down can improve search relevance. The `dictionary_decompounder` token filter works by taking each token (word) and checking if it can be split into smaller tokens based on a list of known words. If it finds a way to split the token into known words, it generates the sub-tokens. + +## Parameters + +The `dictionary_decompounder` token filter in OpenSearch has the following parameters: + +- `word_list`: The dictionary that the filter uses to split compound words. (_Required_ unless `word_list_path` is configured) +- `word_list_path`: A file path to a text file containing the dictionary words. (_Required_ unless `word_list` is configured) +- `min_word_size`: The minimum length of the entire compound word that will be considered for splitting. (_Optional_) +- `min_subword_size`: The minimum length for any subword. If a subword is smaller than this size, it will not be split. (_Optional_) +- `max_subword_size`: The maximum length for any subword. If a subword is longer than this size, it will not be split. (_Optional_) +- `only_longest_match`: If set to `true`, only the longest matching subword will be returned. Default is `false` (_Optional_) + +## Example + +The following example request creates a new index named `decompound_example` and configures an analyzer with `dictionary_decompounder` filter: + +```json +PUT /decompound_example +{ + "settings": { + "analysis": { + "filter": { + "my_dictionary_decompounder": { + "type": "dictionary_decompounder", + "word_list": ["slow", "green", "turtle"] + } + }, + "analyzer": { + "my_analyzer": { + "type": "custom", + "tokenizer": "standard", + "filter": ["lowercase", "my_dictionary_decompounder"] + } + } + } + } +} +``` +{% include copy-curl.html %} + +## Generated tokens + +Use the following request to examine the tokens generated using the created analyzer: + +```json +POST /decompound_example/_analyze +{ + "analyzer": "my_analyzer", + "text": "slowgreenturtleswim" +} +``` +{% include copy-curl.html %} + +The response contains the generated tokens: + +```json +{ + "tokens": [ + { + "token": "slowgreenturtleswim", + "start_offset": 0, + "end_offset": 19, + "type": "", + "position": 0 + }, + { + "token": "slow", + "start_offset": 0, + "end_offset": 19, + "type": "", + "position": 0 + }, + { + "token": "green", + "start_offset": 0, + "end_offset": 19, + "type": "", + "position": 0 + }, + { + "token": "turtle", + "start_offset": 0, + "end_offset": 19, + "type": "", + "position": 0 + } + ] +} +``` diff --git a/_analyzers/token-filters/index.md b/_analyzers/token-filters/index.md index f4e9c434e7..0620384e29 100644 --- a/_analyzers/token-filters/index.md +++ b/_analyzers/token-filters/index.md @@ -23,7 +23,7 @@ Token filter | Underlying Lucene token filter| Description `decimal_digit` | [DecimalDigitFilter](https://lucene.apache.org/core/9_10_0/analysis/common/org/apache/lucene/analysis/core/DecimalDigitFilter.html) | Converts all digits in the Unicode decimal number general category to basic Latin digits (0--9). `delimited_payload` | [DelimitedPayloadTokenFilter](https://lucene.apache.org/core/9_10_0/analysis/common/org/apache/lucene/analysis/payloads/DelimitedPayloadTokenFilter.html) | Separates a token stream into tokens with corresponding payloads, based on a provided delimiter. A token consists of all characters before the delimiter, and a payload consists of all characters after the delimiter. For example, if the delimiter is `|`, then for the string `foo|bar`, `foo` is the token and `bar` is the payload. [`delimited_term_freq`]({{site.url}}{{site.baseurl}}/analyzers/token-filters/delimited-term-frequency/) | [DelimitedTermFrequencyTokenFilter](https://lucene.apache.org/core/9_7_0/analysis/common/org/apache/lucene/analysis/miscellaneous/DelimitedTermFrequencyTokenFilter.html) | Separates a token stream into tokens with corresponding term frequencies, based on a provided delimiter. A token consists of all characters before the delimiter, and a term frequency is the integer after the delimiter. For example, if the delimiter is `|`, then for the string `foo|5`, `foo` is the token and `5` is the term frequency. -`dictionary_decompounder` | [DictionaryCompoundWordTokenFilter](https://lucene.apache.org/core/9_10_0/analysis/common/org/apache/lucene/analysis/compound/DictionaryCompoundWordTokenFilter.html) | Decomposes compound words found in many Germanic languages. +[`dictionary_decompounder`]({{site.url}}{{site.baseurl}}/analyzers/token-filters/dictionary-decompounder/) | [DictionaryCompoundWordTokenFilter](https://lucene.apache.org/core/9_10_0/analysis/common/org/apache/lucene/analysis/compound/DictionaryCompoundWordTokenFilter.html) | Decomposes compound words found in many Germanic languages. `edge_ngram` | [EdgeNGramTokenFilter](https://lucene.apache.org/core/9_10_0/analysis/common/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.html) | Tokenizes the given token into edge n-grams (n-grams that start at the beginning of the token) of lengths between `min_gram` and `max_gram`. Optionally, keeps the original token. `elision` | [ElisionFilter](https://lucene.apache.org/core/9_10_0/analysis/common/org/apache/lucene/analysis/util/ElisionFilter.html) | Removes the specified [elisions](https://en.wikipedia.org/wiki/Elision) from the beginning of tokens. For example, changes `l'avion` (the plane) to `avion` (plane). `fingerprint` | [FingerprintFilter](https://lucene.apache.org/core/9_10_0/analysis/common/org/apache/lucene/analysis/miscellaneous/FingerprintFilter.html) | Sorts and deduplicates the token list and concatenates tokens into a single token. From 4166d4773b5ffc40d5204f28d96f13a7c1e95308 Mon Sep 17 00:00:00 2001 From: AntonEliatra Date: Tue, 3 Sep 2024 16:13:53 +0100 Subject: [PATCH 2/7] Update dictionary-decompounder.md Signed-off-by: AntonEliatra --- _analyzers/token-filters/dictionary-decompounder.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/_analyzers/token-filters/dictionary-decompounder.md b/_analyzers/token-filters/dictionary-decompounder.md index f99dd7586d..03d0b54b33 100644 --- a/_analyzers/token-filters/dictionary-decompounder.md +++ b/_analyzers/token-filters/dictionary-decompounder.md @@ -16,7 +16,7 @@ The `dictionary_decompounder` token filter in OpenSearch has the following param - `word_list`: The dictionary that the filter uses to split compound words. (_Required_ unless `word_list_path` is configured) - `word_list_path`: A file path to a text file containing the dictionary words. (_Required_ unless `word_list` is configured) - `min_word_size`: The minimum length of the entire compound word that will be considered for splitting. (_Optional_) -- `min_subword_size`: The minimum length for any subword. If a subword is smaller than this size, it will not be split. (_Optional_) +- `min_subword_size`: The minimum length for any subword. If a subword is smaller than this size, it will not be split. (_Optional_) - `max_subword_size`: The maximum length for any subword. If a subword is longer than this size, it will not be split. (_Optional_) - `only_longest_match`: If set to `true`, only the longest matching subword will be returned. Default is `false` (_Optional_) From f91890c6b586d0195a86991bc17d73da4acd8d62 Mon Sep 17 00:00:00 2001 From: AntonEliatra Date: Thu, 12 Sep 2024 11:07:22 +0100 Subject: [PATCH 3/7] Update dictionary-decompounder.md Signed-off-by: AntonEliatra --- _analyzers/token-filters/dictionary-decompounder.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/_analyzers/token-filters/dictionary-decompounder.md b/_analyzers/token-filters/dictionary-decompounder.md index 03d0b54b33..3bbfbc2a74 100644 --- a/_analyzers/token-filters/dictionary-decompounder.md +++ b/_analyzers/token-filters/dictionary-decompounder.md @@ -50,7 +50,7 @@ PUT /decompound_example ## Generated tokens -Use the following request to examine the tokens generated using the created analyzer: +Use the following request to examine the tokens generated using the analyzer: ```json POST /decompound_example/_analyze From 48bb8a01abd4b5aaac271a6d2244f96a061ed432 Mon Sep 17 00:00:00 2001 From: Anton Rubin Date: Wed, 16 Oct 2024 19:30:09 +0100 Subject: [PATCH 4/7] updating parameter table Signed-off-by: Anton Rubin --- .../token-filters/dictionary-decompounder.md | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/_analyzers/token-filters/dictionary-decompounder.md b/_analyzers/token-filters/dictionary-decompounder.md index 3bbfbc2a74..c363cf3135 100644 --- a/_analyzers/token-filters/dictionary-decompounder.md +++ b/_analyzers/token-filters/dictionary-decompounder.md @@ -7,18 +7,20 @@ nav_order: 110 # Dictionary decompounder token filter -The `dictionary_decompounder` token filter in OpenSearch is used to split compound words into their constituent parts based on a predefined dictionary. This filter is particularly useful in languages like German, Dutch, or Finnish, where compound words are common, and breaking them down can improve search relevance. The `dictionary_decompounder` token filter works by taking each token (word) and checking if it can be split into smaller tokens based on a list of known words. If it finds a way to split the token into known words, it generates the sub-tokens. +The `dictionary_decompounder` token filter is used to split compound words into their constituent parts based on a predefined dictionary. This filter is particularly useful in languages like German, Dutch, or Finnish, where compound words are common, and breaking them down can improve search relevance. The `dictionary_decompounder` token filter works by taking each token (word) and checking if it can be split into smaller tokens based on a list of known words. If it finds a way to split the token into known words, it generates the sub-tokens. ## Parameters -The `dictionary_decompounder` token filter in OpenSearch has the following parameters: +The `dictionary_decompounder` token filter has the following parameters. -- `word_list`: The dictionary that the filter uses to split compound words. (_Required_ unless `word_list_path` is configured) -- `word_list_path`: A file path to a text file containing the dictionary words. (_Required_ unless `word_list` is configured) -- `min_word_size`: The minimum length of the entire compound word that will be considered for splitting. (_Optional_) -- `min_subword_size`: The minimum length for any subword. If a subword is smaller than this size, it will not be split. (_Optional_) -- `max_subword_size`: The maximum length for any subword. If a subword is longer than this size, it will not be split. (_Optional_) -- `only_longest_match`: If set to `true`, only the longest matching subword will be returned. Default is `false` (_Optional_) +Parameter | Required/Optional | Data type | Description +:--- | :--- | :--- | :--- +`word_list` | _Required_ unless `word_list_path` is configured | Array of strings | The dictionary that the filter uses to split compound words. +`word_list_path` | _Required_ unless `word_list` is configured | String | A file path to a text file containing the dictionary words. +`min_word_size` | Optional | Integer | The minimum length of the entire compound word that will be considered for splitting. Default is `5`. +`min_subword_size` | Optional | Integer | The minimum length for any subword. If a subword is smaller than this size, it will not be split. Default is `2`. +`max_subword_size` | Optional | Integer | The maximum length for any subword. If a subword is longer than this size, it will not be split. Default is `15`. +`only_longest_match` | Optional | Boolean | If set to `true`, only the longest matching subword will be returned. Default is `false`. ## Example From 7e117d08eeb1bc2173817ca889dd4ae689eee987 Mon Sep 17 00:00:00 2001 From: Anton Rubin Date: Wed, 16 Oct 2024 19:30:49 +0100 Subject: [PATCH 5/7] updating parameter table Signed-off-by: Anton Rubin --- _analyzers/token-filters/dictionary-decompounder.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/_analyzers/token-filters/dictionary-decompounder.md b/_analyzers/token-filters/dictionary-decompounder.md index c363cf3135..04b24b7dcf 100644 --- a/_analyzers/token-filters/dictionary-decompounder.md +++ b/_analyzers/token-filters/dictionary-decompounder.md @@ -15,8 +15,8 @@ The `dictionary_decompounder` token filter has the following parameters. Parameter | Required/Optional | Data type | Description :--- | :--- | :--- | :--- -`word_list` | _Required_ unless `word_list_path` is configured | Array of strings | The dictionary that the filter uses to split compound words. -`word_list_path` | _Required_ unless `word_list` is configured | String | A file path to a text file containing the dictionary words. +`word_list` | Required unless `word_list_path` is configured | Array of strings | The dictionary that the filter uses to split compound words. +`word_list_path` | Required unless `word_list` is configured | String | A file path to a text file containing the dictionary words. `min_word_size` | Optional | Integer | The minimum length of the entire compound word that will be considered for splitting. Default is `5`. `min_subword_size` | Optional | Integer | The minimum length for any subword. If a subword is smaller than this size, it will not be split. Default is `2`. `max_subword_size` | Optional | Integer | The maximum length for any subword. If a subword is longer than this size, it will not be split. Default is `15`. From 5d6b12ad4a502a6f5c482ead930a0fbb11867deb Mon Sep 17 00:00:00 2001 From: AntonEliatra Date: Thu, 7 Nov 2024 11:24:54 +0000 Subject: [PATCH 6/7] Apply suggestions from code review Co-authored-by: kolchfa-aws <105444904+kolchfa-aws@users.noreply.github.com> Signed-off-by: AntonEliatra --- _analyzers/token-filters/dictionary-decompounder.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/_analyzers/token-filters/dictionary-decompounder.md b/_analyzers/token-filters/dictionary-decompounder.md index 04b24b7dcf..fcb1528b11 100644 --- a/_analyzers/token-filters/dictionary-decompounder.md +++ b/_analyzers/token-filters/dictionary-decompounder.md @@ -7,7 +7,7 @@ nav_order: 110 # Dictionary decompounder token filter -The `dictionary_decompounder` token filter is used to split compound words into their constituent parts based on a predefined dictionary. This filter is particularly useful in languages like German, Dutch, or Finnish, where compound words are common, and breaking them down can improve search relevance. The `dictionary_decompounder` token filter works by taking each token (word) and checking if it can be split into smaller tokens based on a list of known words. If it finds a way to split the token into known words, it generates the sub-tokens. +The `dictionary_decompounder` token filter is used to split compound words into their constituent parts based on a predefined dictionary. This filter is particularly useful in languages like German, Dutch, or Finnish, where compound words are common and breaking them down can improve search relevance. The `dictionary_decompounder` token filter works by taking each token (word) and checking if it can be split into smaller tokens based on a list of known words. If the token can be split into known words, the filter generates the subtokens for the token. ## Parameters @@ -15,11 +15,11 @@ The `dictionary_decompounder` token filter has the following parameters. Parameter | Required/Optional | Data type | Description :--- | :--- | :--- | :--- -`word_list` | Required unless `word_list_path` is configured | Array of strings | The dictionary that the filter uses to split compound words. -`word_list_path` | Required unless `word_list` is configured | String | A file path to a text file containing the dictionary words. -`min_word_size` | Optional | Integer | The minimum length of the entire compound word that will be considered for splitting. Default is `5`. -`min_subword_size` | Optional | Integer | The minimum length for any subword. If a subword is smaller than this size, it will not be split. Default is `2`. -`max_subword_size` | Optional | Integer | The maximum length for any subword. If a subword is longer than this size, it will not be split. Default is `15`. +`word_list` | Required unless `word_list_path` is configured | Array of strings | The dictionary containing words that the filter uses to split compound words into. +`word_list_path` | Required unless `word_list` is configured | String | A file path to a text file containing the dictionary words. Accepts either an absolute path or a path relative to the `config` directory. The dictionary file must be UTF-8 encoded and must contain each word on its own line. +`min_word_size` | Optional | Integer | The minimum length of the entire compound word that will be considered for splitting. If a compound word is shorter than this value, it is not split. Default is `5`. +`min_subword_size` | Optional | Integer | The minimum length for any subword. If a subword is shorter than this value, it is not included in the output. Default is `2`. +`max_subword_size` | Optional | Integer | The maximum length for any subword. If a subword is longer than this value, it is not included in the output. Default is `15`. `only_longest_match` | Optional | Boolean | If set to `true`, only the longest matching subword will be returned. Default is `false`. ## Example From dff7d3a12ffa3fc7be0751cb44b7445039b96527 Mon Sep 17 00:00:00 2001 From: kolchfa-aws <105444904+kolchfa-aws@users.noreply.github.com> Date: Thu, 14 Nov 2024 14:54:03 -0500 Subject: [PATCH 7/7] Apply suggestions from code review Co-authored-by: Nathan Bower Signed-off-by: kolchfa-aws <105444904+kolchfa-aws@users.noreply.github.com> --- _analyzers/token-filters/dictionary-decompounder.md | 8 ++++---- _analyzers/token-filters/index.md | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/_analyzers/token-filters/dictionary-decompounder.md b/_analyzers/token-filters/dictionary-decompounder.md index fcb1528b11..ced6fd6fbc 100644 --- a/_analyzers/token-filters/dictionary-decompounder.md +++ b/_analyzers/token-filters/dictionary-decompounder.md @@ -7,7 +7,7 @@ nav_order: 110 # Dictionary decompounder token filter -The `dictionary_decompounder` token filter is used to split compound words into their constituent parts based on a predefined dictionary. This filter is particularly useful in languages like German, Dutch, or Finnish, where compound words are common and breaking them down can improve search relevance. The `dictionary_decompounder` token filter works by taking each token (word) and checking if it can be split into smaller tokens based on a list of known words. If the token can be split into known words, the filter generates the subtokens for the token. +The `dictionary_decompounder` token filter is used to split compound words into their constituent parts based on a predefined dictionary. This filter is particularly useful for languages like German, Dutch, or Finnish, in which compound words are common, so breaking them down can improve search relevance. The `dictionary_decompounder` token filter determines whether each token (word) can be split into smaller tokens based on a list of known words. If the token can be split into known words, the filter generates the subtokens for the token. ## Parameters @@ -15,8 +15,8 @@ The `dictionary_decompounder` token filter has the following parameters. Parameter | Required/Optional | Data type | Description :--- | :--- | :--- | :--- -`word_list` | Required unless `word_list_path` is configured | Array of strings | The dictionary containing words that the filter uses to split compound words into. -`word_list_path` | Required unless `word_list` is configured | String | A file path to a text file containing the dictionary words. Accepts either an absolute path or a path relative to the `config` directory. The dictionary file must be UTF-8 encoded and must contain each word on its own line. +`word_list` | Required unless `word_list_path` is configured | Array of strings | The dictionary of words that the filter uses to split compound words. +`word_list_path` | Required unless `word_list` is configured | String | A file path to a text file containing the dictionary words. Accepts either an absolute path or a path relative to the `config` directory. The dictionary file must be UTF-8 encoded, and each word must be listed on a separate line. `min_word_size` | Optional | Integer | The minimum length of the entire compound word that will be considered for splitting. If a compound word is shorter than this value, it is not split. Default is `5`. `min_subword_size` | Optional | Integer | The minimum length for any subword. If a subword is shorter than this value, it is not included in the output. Default is `2`. `max_subword_size` | Optional | Integer | The maximum length for any subword. If a subword is longer than this value, it is not included in the output. Default is `15`. @@ -24,7 +24,7 @@ Parameter | Required/Optional | Data type | Description ## Example -The following example request creates a new index named `decompound_example` and configures an analyzer with `dictionary_decompounder` filter: +The following example request creates a new index named `decompound_example` and configures an analyzer with the `dictionary_decompounder` filter: ```json PUT /decompound_example diff --git a/_analyzers/token-filters/index.md b/_analyzers/token-filters/index.md index 0620384e29..ba69e8aace 100644 --- a/_analyzers/token-filters/index.md +++ b/_analyzers/token-filters/index.md @@ -23,7 +23,7 @@ Token filter | Underlying Lucene token filter| Description `decimal_digit` | [DecimalDigitFilter](https://lucene.apache.org/core/9_10_0/analysis/common/org/apache/lucene/analysis/core/DecimalDigitFilter.html) | Converts all digits in the Unicode decimal number general category to basic Latin digits (0--9). `delimited_payload` | [DelimitedPayloadTokenFilter](https://lucene.apache.org/core/9_10_0/analysis/common/org/apache/lucene/analysis/payloads/DelimitedPayloadTokenFilter.html) | Separates a token stream into tokens with corresponding payloads, based on a provided delimiter. A token consists of all characters before the delimiter, and a payload consists of all characters after the delimiter. For example, if the delimiter is `|`, then for the string `foo|bar`, `foo` is the token and `bar` is the payload. [`delimited_term_freq`]({{site.url}}{{site.baseurl}}/analyzers/token-filters/delimited-term-frequency/) | [DelimitedTermFrequencyTokenFilter](https://lucene.apache.org/core/9_7_0/analysis/common/org/apache/lucene/analysis/miscellaneous/DelimitedTermFrequencyTokenFilter.html) | Separates a token stream into tokens with corresponding term frequencies, based on a provided delimiter. A token consists of all characters before the delimiter, and a term frequency is the integer after the delimiter. For example, if the delimiter is `|`, then for the string `foo|5`, `foo` is the token and `5` is the term frequency. -[`dictionary_decompounder`]({{site.url}}{{site.baseurl}}/analyzers/token-filters/dictionary-decompounder/) | [DictionaryCompoundWordTokenFilter](https://lucene.apache.org/core/9_10_0/analysis/common/org/apache/lucene/analysis/compound/DictionaryCompoundWordTokenFilter.html) | Decomposes compound words found in many Germanic languages. +[`dictionary_decompounder`]({{site.url}}{{site.baseurl}}/analyzers/token-filters/dictionary-decompounder/) | [DictionaryCompoundWordTokenFilter](https://lucene.apache.org/core/9_10_0/analysis/common/org/apache/lucene/analysis/compound/DictionaryCompoundWordTokenFilter.html) | Splits compound words into their constituent parts based on a predefined dictionary. Useful for many Germanic languages. `edge_ngram` | [EdgeNGramTokenFilter](https://lucene.apache.org/core/9_10_0/analysis/common/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.html) | Tokenizes the given token into edge n-grams (n-grams that start at the beginning of the token) of lengths between `min_gram` and `max_gram`. Optionally, keeps the original token. `elision` | [ElisionFilter](https://lucene.apache.org/core/9_10_0/analysis/common/org/apache/lucene/analysis/util/ElisionFilter.html) | Removes the specified [elisions](https://en.wikipedia.org/wiki/Elision) from the beginning of tokens. For example, changes `l'avion` (the plane) to `avion` (plane). `fingerprint` | [FingerprintFilter](https://lucene.apache.org/core/9_10_0/analysis/common/org/apache/lucene/analysis/miscellaneous/FingerprintFilter.html) | Sorts and deduplicates the token list and concatenates tokens into a single token.