File size: 1,335 Bytes
3ff77f2
 
b8b38b0
3ff77f2
b8b38b0
 
 
 
3ff77f2
fe4ee9e
 
 
 
3ff77f2
b8b38b0
 
fe4ee9e
3ff77f2
 
b8b38b0
 
3ff77f2
 
 
 
b8b38b0
 
 
3ff77f2
 
fe4ee9e
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
import gradio as gr
from llama_cpp import Llama
from huggingface_hub import hf_hub_download

CONST_REPO_ID = "AshtonIsNotHere/CodeLlama_7B_nlp_pp"
CONST_FILENAME = "CodeLlama_7B_nlp_pp_q8_0.gguf"

llm = Llama(model_path=hf_hub_download(repo_id=CONST_REPO_ID, filename=CONST_FILENAME))

# These phrases/tokens indicate the start of a pass. For demonstration purposes, it's
# safe to assume that these should not be encountered in the output and represent a hallucination.
stop = ["@NODES", "@CODE", "@DECL"]

def generate(input_text):
    if input_text.strip().startwith("#") and not input_text.strip().endswith("\n"):
        input_text+="\n"
    output = model(input_text, max_tokens=128, stop=stop, echo=True)
    return output['choices'][0]['text']

input_text = gr.inputs.Textbox(lines=5, label="Enter your code to autocomplete")
output_text = gr.Code(elem_id="q-output", lines=30, label="Output")

description = "Code generation for NLP++ with CodeLlama"

examples = [
    ['# Find concept named parent under root and print "num" val for each child attribute\n'],
    ['L("iter") = getconcept(findroot(), L("parent_con"));\n'],
    ['# Match node _noun when preceded by _noun\n']
]

gr.Interface(fn=generate, inputs=input_text, outputs=output_text, title="CodeLlama for NLP++", description=description, examples=examples).launch()