Skip to content

Commit

Permalink
tests : add test-tokenizer-0.sh
Browse files Browse the repository at this point in the history
  • Loading branch information
ggerganov committed May 2, 2024
1 parent c4ec9c0 commit 9998b08
Show file tree
Hide file tree
Showing 5 changed files with 110 additions and 240 deletions.
117 changes: 0 additions & 117 deletions tests/test-tokenizer-0-bpe.py

This file was deleted.

114 changes: 0 additions & 114 deletions tests/test-tokenizer-0-spm.py

This file was deleted.

39 changes: 30 additions & 9 deletions tests/test-tokenizer-0.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -55,8 +55,10 @@
// return _k_tests;
//}

static std::map<std::string, std::vector<llama_token>> read_tests(const std::string & fname_inp, const std::string & fname_out) {
std::map<std::string, std::vector<llama_token>> tests;
using llama_tests = std::map<std::string, std::vector<llama_token>>;

static llama_tests read_tests(const std::string & fname_inp, const std::string & fname_out) {
llama_tests tests;

std::ifstream ifs_inp(fname_inp);
if (!ifs_inp) {
Expand Down Expand Up @@ -175,12 +177,20 @@ int main(int argc, char **argv) {

bool success = true;

const auto k_tests = read_tests(fname_inp, fname_out);
const auto k_tests = [&]() -> llama_tests {
if (!fname_text.empty()) {
return {};
}

if (k_tests.empty()) {
fprintf(stderr, "%s : error: no tests found\n", __func__);
return 1;
}
const auto res = read_tests(fname_inp, fname_out);

if (res.empty()) {
fprintf(stderr, "%s : error: no tests found\n", __func__);
exit(1);
}

return res;
}();

const bool add_special = false;

Expand Down Expand Up @@ -238,7 +248,17 @@ int main(int argc, char **argv) {

fprintf(stderr, "%s : text size: %zu\n", __func__, text.size());

const std::vector<llama_token> res = llama_tokenize(ctx, text, add_special);
std::vector<llama_token> res;

{
const auto t_start = ggml_time_us();

res = llama_tokenize(ctx, text, add_special);

const auto t_end = ggml_time_us();

fprintf(stderr, "%s : tokenized in %.3f ms (cpp)\n", __func__, (t_end - t_start) / 1000.0);
}

fprintf(stderr, "%s : tokens: %zu\n", __func__, res.size());

Expand All @@ -252,7 +272,8 @@ int main(int argc, char **argv) {
}

for (const auto & tok : res) {
ofs << tok << " '" << string_strip(llama_detokenize_bpe(ctx, std::vector<int>{tok})) << "'" << std::endl;
//ofs << tok << " '" << string_strip(llama_detokenize_bpe(ctx, std::vector<int>{tok})) << "'" << std::endl;
ofs << tok << "\n";
}
}

Expand Down
46 changes: 46 additions & 0 deletions tests/test-tokenizer-0.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
import time
import argparse

from transformers import AutoTokenizer

parser = argparse.ArgumentParser()
parser.add_argument("dir_tokenizer", help="directory containing 'tokenizer.model' file")
parser.add_argument("--fname-tok", help="path to a text file to tokenize", required=True)
args = parser.parse_args()

dir_tokenizer = args.dir_tokenizer
fname_tok = args.fname_tok

tokenizer = AutoTokenizer.from_pretrained(dir_tokenizer)

print('tokenizing file: ', fname_tok)
fname_out = fname_tok + '.tok'
with open(fname_tok, 'r', encoding='utf-8') as f:
lines = f.readlines()
s = ''.join(lines)
t_start = time.time()
res = tokenizer.encode(s, add_special_tokens=False)
t_end = time.time()
print('\nmain : tokenized in', "{:.3f}".format(1000.0*(t_end - t_start)), 'ms (py)')
with open(fname_out, 'w', encoding='utf-8') as f:
for x in res:
# LLaMA v3 for some reason strips the space for these tokens (and others)
# if x == 662:
# f.write(str(x) + ' \' ' + tokenizer.decode(x) + '\'\n')
# elif x == 1174:
# f.write(str(x) + ' \' ' + tokenizer.decode(x) + '\'\n')
# elif x == 2564:
# f.write(str(x) + ' \' ' + tokenizer.decode(x) + '\'\n')
# elif x == 758:
# f.write(str(x) + ' \' ' + tokenizer.decode(x) + '\'\n')
# elif x == 949:
# f.write(str(x) + ' \' ' + tokenizer.decode(x) + '\'\n')
# elif x == 5354:
# f.write(str(x) + ' \' ' + tokenizer.decode(x) + '\'\n')
# else:
# f.write(str(x) + ' \'' + tokenizer.decode(x) + '\'\n')
#f.write(str(x) + ' \'' + tokenizer.decode(x).strip() + '\'\n')
f.write(str(x) + '\n')
print('len(res): ', len(res))
print('len(lines): ', len(lines))
print('results written to: ', fname_out)
34 changes: 34 additions & 0 deletions tests/test-tokenizer-0.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
#!/bin/bash
#
# Usage:
#
# test-tokenizer-0.sh <name> <input>
#

if [ $# -ne 2 ]; then
printf "Usage: $0 <name> <input>\n"
exit 1
fi

name=$1
input=$2

make -j tests/test-tokenizer-0

printf "Testing %s on %s ...\n" $name $input

python3 ./tests/test-tokenizer-0.py ./models/tokenizers/$name --fname-tok $input > /tmp/test-tokenizer-0-$name-py.log 2>&1
cat /tmp/test-tokenizer-0-$name-py.log | grep "tokenized in"

./tests/test-tokenizer-0 ./models/ggml-vocab-$name.gguf $input > /tmp/test-tokenizer-0-$name-cpp.log 2>&1
cat /tmp/test-tokenizer-0-$name-cpp.log | grep "tokenized in"

diff $input.tok $input.tokcpp > /dev/null 2>&1

if [ $? -eq 0 ]; then
printf "Tokenization is correct!\n"
else
diff $input.tok $input.tokcpp | head -n 32

printf "Tokenization differs!\n"
fi

0 comments on commit 9998b08

Please sign in to comment.