fix ja bert path

This commit is contained in:
qinzy
2024-02-27 17:18:20 +00:00
parent 63869183f8
commit 29f9c65819
2 changed files with 3 additions and 7 deletions

View File

@@ -566,7 +566,7 @@ def distribute_phone(n_phone, n_word):
# tokenizer = AutoTokenizer.from_pretrained('cl-tohoku/bert-base-japanese-v3')
model_id = 'cl-tohoku/bert-base-japanese-v3'
model_id = 'tohoku-nlp/bert-base-japanese-v3'
tokenizer = AutoTokenizer.from_pretrained(model_id)
def g2p(norm_text):
@@ -644,4 +644,4 @@ if __name__ == "__main__":
# conv = kakasi.getConverter()
# katakana_text = conv.do('ええ、僕はおきなと申します。こちらの小さいわらべは杏子。ご挨拶が遅れてしまいすみません。あなたの名は?') # Replace with your Chinese text
# print(katakana_text) # Output: ニーハオセカイ
# print(katakana_text) # Output: ニーハオセカイ

View File

@@ -3,13 +3,9 @@ from transformers import AutoTokenizer, AutoModelForMaskedLM
import sys
# model = None
# model_id = 'cl-tohoku/bert-base-japanese-v3'
# tokenizer = AutoTokenizer.from_pretrained(model_id)
models = {}
tokenizers = {}
def get_bert_feature(text, word2ph, device=None, model_id='cl-tohoku/bert-base-japanese-v3'):
def get_bert_feature(text, word2ph, device=None, model_id='tohoku-nlp/bert-base-japanese-v3'):
global model
global tokenizer