Skip to content

Commit

Permalink
fix: increase lru cache size to accomodate different models
Browse files Browse the repository at this point in the history
  • Loading branch information
jarvis8x7b committed Feb 25, 2024
1 parent 5f8e9b5 commit 59ad2ce
Showing 1 changed file with 5 additions and 2 deletions.
7 changes: 5 additions & 2 deletions commons/reward_model/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,13 +35,16 @@ class ModelZoo(StrEnum):


# adjust cache maxsize depending on however many models you have in your ModelZoo
@lru_cache(maxsize=10)
max_num_miners = 256


@lru_cache(maxsize=max_num_miners)
def get_cached_model(model_name: ModelName):
model = AutoModelForSequenceClassification.from_pretrained(model_name).eval()
return model


@lru_cache(maxsize=10)
@lru_cache(maxsize=max_num_miners)
def get_cached_tokenizer(
model_name: ModelName,
) -> PreTrainedTokenizerFast | PreTrainedTokenizer:
Expand Down

0 comments on commit 59ad2ce

Please sign in to comment.