Skip to content

Commit

Permalink
fix lints in cpp
Browse files Browse the repository at this point in the history
  • Loading branch information
wsxiaoys committed Sep 2, 2023
1 parent 36f6e06 commit 939d7ca
Showing 1 changed file with 1 addition and 16 deletions.
17 changes: 1 addition & 16 deletions crates/llama-cpp-bindings/src/engine.cc
Original file line number Diff line number Diff line change
Expand Up @@ -28,20 +28,6 @@ std::vector<llama_token> tokenize(struct llama_context * ctx, const std::string
return result;
}

std::string llama_token_to_piece(const struct llama_context * ctx, llama_token token) {
std::vector<char> result(8, 0);
const int n_tokens = llama_token_to_piece(ctx, token, result.data(), result.size());
if (n_tokens < 0) {
result.resize(-n_tokens);
int check = llama_token_to_piece(ctx, token, result.data(), result.size());
GGML_ASSERT(check == -n_tokens);
} else {
result.resize(n_tokens);
}

return std::string(result.data(), result.size());
}

class TextInferenceEngineImpl : public TextInferenceEngine {
public:
TextInferenceEngineImpl(owned<llama_model> model, owned<llama_context> ctx) :
Expand All @@ -51,13 +37,12 @@ class TextInferenceEngineImpl : public TextInferenceEngine {

uint32_t start(const rust::Str prompt) const override {
auto* ctx = ctx_.get();
std::vector<llama_token> tokens_list = tokenize(ctx, std::string(prompt), true);
std::vector<llama_token> tokens_list = tokenize(ctx, std::string(prompt), /* add_bos = */ true);
eval(tokens_list, /* reset = */ true);
return sample();
}

uint32_t step(uint32_t next_token_id) const override {
auto* ctx = ctx_.get();
eval({ static_cast<llama_token>(next_token_id) }, /* reset = */ false);
return sample();
}
Expand Down

0 comments on commit 939d7ca

Please sign in to comment.