zac's picture
Update app.py
a0c6b2f
raw
history blame
940 Bytes
import gradio as gr
import time
import ctypes #to run on C api directly
import llama_cpp
from llama_cpp import Llama
from huggingface_hub import hf_hub_download #load from huggingfaces
llm = Llama(model_path= hf_hub_download(repo_id="TheBloke/Vigogne-2-7B-Instruct-GGML", filename="vigogne-2-7b-instruct.ggmlv3.q4_1.bin"), n_ctx=2048) #download model from hf/ n_ctx=2048 for high ccontext length
description = " Bro i dont want learn! i just want results "
def generate_text(input_text):
output = llm(f"Q: {input_text} \n A:", max_tokens=521, stop=["Q:", "\n"], echo=True,)
return output['choices'][0]['text']
input_text = gr.inputs.Textbox(lines= 10, label="Enter your input text")
output_text = gr.outputs.Textbox(label="Output text")
demo = gr.Interface(fn=generate_text, inputs=input_text, outputs=output_text, title="Running Llama on CPU is Hard", description=description, examples=examples)
demo.queue()
demo.launch()