fix ja bert path
This commit is contained in:
@@ -566,7 +566,7 @@ def distribute_phone(n_phone, n_word):
|
||||
|
||||
# tokenizer = AutoTokenizer.from_pretrained('cl-tohoku/bert-base-japanese-v3')
|
||||
|
||||
model_id = 'cl-tohoku/bert-base-japanese-v3'
|
||||
model_id = 'tohoku-nlp/bert-base-japanese-v3'
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
||||
def g2p(norm_text):
|
||||
|
||||
@@ -644,4 +644,4 @@ if __name__ == "__main__":
|
||||
# conv = kakasi.getConverter()
|
||||
# katakana_text = conv.do('ええ、僕はおきなと申します。こちらの小さいわらべは杏子。ご挨拶が遅れてしまいすみません。あなたの名は?') # Replace with your Chinese text
|
||||
|
||||
# print(katakana_text) # Output: ニーハオセカイ
|
||||
# print(katakana_text) # Output: ニーハオセカイ
|
||||
|
||||
@@ -3,13 +3,9 @@ from transformers import AutoTokenizer, AutoModelForMaskedLM
|
||||
import sys
|
||||
|
||||
|
||||
|
||||
# model = None
|
||||
# model_id = 'cl-tohoku/bert-base-japanese-v3'
|
||||
# tokenizer = AutoTokenizer.from_pretrained(model_id)
|
||||
models = {}
|
||||
tokenizers = {}
|
||||
def get_bert_feature(text, word2ph, device=None, model_id='cl-tohoku/bert-base-japanese-v3'):
|
||||
def get_bert_feature(text, word2ph, device=None, model_id='tohoku-nlp/bert-base-japanese-v3'):
|
||||
global model
|
||||
global tokenizer
|
||||
|
||||
|
||||
Reference in New Issue
Block a user