Spaces:
Runtime error
Runtime error
Commit
·
b8b38b0
1
Parent(s):
fe4ee9e
Fix: Download model using hf hub api
Browse files
app.py
CHANGED
@@ -1,25 +1,31 @@
|
|
1 |
import gradio as gr
|
2 |
from llama_cpp import Llama
|
|
|
3 |
|
4 |
-
|
|
|
|
|
|
|
5 |
|
6 |
# These phrases/tokens indicate the start of a pass. For demonstration purposes, it's
|
7 |
# safe to assume that these should not be encountered in the output and represent a hallucination.
|
8 |
stop = ["@NODES", "@CODE", "@DECL"]
|
9 |
|
10 |
def generate(input_text):
|
|
|
|
|
11 |
output = model(input_text, max_tokens=128, stop=stop, echo=True)
|
12 |
return output['choices'][0]['text']
|
13 |
|
14 |
-
input_text = gr.inputs.Textbox(lines=
|
15 |
-
output_text = gr.
|
16 |
|
17 |
description = "Code generation for NLP++ with CodeLlama"
|
18 |
|
19 |
examples = [
|
20 |
-
'# Find concept named parent under root and print "num" val for each child attribute\n',
|
21 |
-
'L("iter") = getconcept(findroot(), L("parent_con"));\n',
|
22 |
-
'# Match node _noun when preceded by _noun\n'
|
23 |
]
|
24 |
|
25 |
gr.Interface(fn=generate, inputs=input_text, outputs=output_text, title="CodeLlama for NLP++", description=description, examples=examples).launch()
|
|
|
1 |
import gradio as gr
|
2 |
from llama_cpp import Llama
|
3 |
+
from huggingface_hub import hf_hub_download
|
4 |
|
5 |
+
CONST_REPO_ID = "AshtonIsNotHere/CodeLlama_7B_nlp_pp"
|
6 |
+
CONST_FILENAME = "CodeLlama_7B_nlp_pp_q8_0.gguf"
|
7 |
+
|
8 |
+
llm = Llama(model_path=hf_hub_download(repo_id=CONST_REPO_ID, filename=CONST_FILENAME))
|
9 |
|
10 |
# These phrases/tokens indicate the start of a pass. For demonstration purposes, it's
|
11 |
# safe to assume that these should not be encountered in the output and represent a hallucination.
|
12 |
stop = ["@NODES", "@CODE", "@DECL"]
|
13 |
|
14 |
def generate(input_text):
|
15 |
+
if input_text.strip().startwith("#") and not input_text.strip().endswith("\n"):
|
16 |
+
input_text+="\n"
|
17 |
output = model(input_text, max_tokens=128, stop=stop, echo=True)
|
18 |
return output['choices'][0]['text']
|
19 |
|
20 |
+
input_text = gr.inputs.Textbox(lines=5, label="Enter your code to autocomplete")
|
21 |
+
output_text = gr.Code(elem_id="q-output", lines=30, label="Output")
|
22 |
|
23 |
description = "Code generation for NLP++ with CodeLlama"
|
24 |
|
25 |
examples = [
|
26 |
+
['# Find concept named parent under root and print "num" val for each child attribute\n'],
|
27 |
+
['L("iter") = getconcept(findroot(), L("parent_con"));\n'],
|
28 |
+
['# Match node _noun when preceded by _noun\n']
|
29 |
]
|
30 |
|
31 |
gr.Interface(fn=generate, inputs=input_text, outputs=output_text, title="CodeLlama for NLP++", description=description, examples=examples).launch()
|