From 848fb416e88e57fa41d6397143ed2ab908683c72 Mon Sep 17 00:00:00 2001 From: Ikko Eltociear Ashimine Date: Sat, 4 May 2024 15:03:40 +0900 Subject: [PATCH] Update tokenizer.py signficant -> significant --- eva_clip/tokenizer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/eva_clip/tokenizer.py b/eva_clip/tokenizer.py index 41482f8..01e9f9d 100644 --- a/eva_clip/tokenizer.py +++ b/eva_clip/tokenizer.py @@ -29,7 +29,7 @@ def bytes_to_unicode(): The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. - This is a signficant percentage of your normal, say, 32K bpe vocab. + This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings. And avoids mapping to whitespace/control characters the bpe code barfs on. """