forked from abetlen/llama-cpp-python
-
Notifications
You must be signed in to change notification settings - Fork 0
/
grammar_test.py
28 lines (25 loc) · 887 Bytes
/
grammar_test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
from llama_cpp import Llama
grammar = """root ::= nav eol (commands eol)*
commands ::= t | info
nav ::= "nav(\\"admin/" [a-z/]* "\\")"
info ::= "info(" setting ")"
t ::= "t(" setting ", " value ")"
value ::= color | string | number | boolean
color ::= "#" [0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f]
setting ::= "\\"" [a-z ]+ "\\""
string ::= "\\"" [ \\t!#-\\[\\]-~]* "\\""
number ::= [0-9]+
boolean ::= ("true" | "false")
eol ::= "\\n"
"""
llm = Llama(
model_path="/Users/alex/llama-7b.ggmlv3.q8_0.bin",
# lora_base="/Users/alex/llama-7b.ggml.f16.bin",
# python ~/llama.cpp/convert-lora-to-ggml.py .
# lora_path="/Users/alex/src/github.com/Shopify/sidekick-data/src/webapp/models/ggml-adapter-model.bin",
# n_gpu_layers=1000,
n_ctx=2048,
grammar=grammar,
)
import code
code.interact(local=globals())