fix: Tokenizer dependency (#30)

This commit is contained in:
Loc Bui
2024-03-18 12:57:04 -07:00
committed by GitHub
parent a7b2b54f18
commit 0588abec77

View File

@@ -135,14 +135,14 @@ class LLMEvaluator {
enum LoadState { enum LoadState {
case idle case idle
case loaded(LLMModel, LLM.Tokenizer) case loaded(LLMModel, Tokenizers.Tokenizer)
} }
var loadState = LoadState.idle var loadState = LoadState.idle
/// load and return the model -- can be called multiple times, subsequent calls will /// load and return the model -- can be called multiple times, subsequent calls will
/// just return the loaded model /// just return the loaded model
func load() async throws -> (LLMModel, LLM.Tokenizer) { func load() async throws -> (LLMModel, Tokenizers.Tokenizer) {
switch loadState { switch loadState {
case .idle: case .idle:
// limit the buffer cache // limit the buffer cache