Spaces:
Runtime error
Runtime error
File size: 1,198 Bytes
06cf9c4 fabc6ce fa0a20e 4936389 06cf9c4 fa0a20e 06cf9c4 4936389 397a785 06cf9c4 fa0a20e 06cf9c4 d3a9044 06cf9c4 397a785 06cf9c4 d3a9044 397a785 d3a9044 06cf9c4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 |
import gradio as gr
import time
import ctypes #to run on C api directly
import llama_cpp
from llama_cpp import Llama
from huggingface_hub import hf_hub_download #load from huggingfaces
llm = Llama(model_path= hf_hub_download(repo_id="TheBloke/StableBeluga2-70B-GGML", filename="stablebeluga2-70b.ggmlv3.q2_K.bin"))
def generate_text(input_text):
output = llm(f"Q: {input_text} A:", max_tokens=521, stop=["Q:", "\n"], echo=True)
return output['choices'][0]['text']
input_text = gr.inputs.Textbox(lines= 10, label="Enter your input text")
output_text = gr.outputs.Textbox(label="Output text")
description = " currently running ggml models with llama.cpp implementation in python [https://github.com/abetlen/llama-cpp-python]"
examples = [
["What is the capital of France? ", "The capital of France is Paris."],
["Who wrote the novel 'Pride and Prejudice'?", "The novel 'Pride and Prejudice' was written by Jane Austen."],
["What is the square root of 64?", "The square root of 64 is 8."]
]
demo = gr.Interface(fn=generate_text, inputs=input_text, outputs=output_text, title="Llama Language Model", description=description, examples=examples)
demo.queue()
demo.launch()
|