zac's picture
Update app.py
0329016
raw
history blame
1.15 kB
import gradio as gr
import time
import ctypes #to run on C api directly
import llama_cpp
from llama_cpp import Llama
from huggingface_hub import hf_hub_download #load from huggingfaces
llm = Llama(model_path= hf_hub_download(repo_id="TheBloke/airoboros-l2-13b-gpt4-m2.0-GGML", filename="airoboros-l2-13b-gpt4-m2.0.ggmlv3.q6_K.bin"), n_ctx=2048) #download model from hf/ n_ctx=2048 for high ccontext length
history = []
def generate_text(input_text, history):
conversation_context = " ".join([f"{pair[0]} {pair[1]}" for pair in history])
full_conversation = f"{conversation_context} Q: {input_text} \n A:"
output = llm(full_conversation, max_tokens=1024, stop=["Q:", "\n"], echo=True)
response = output['choices'][0]['text']
history.append([input_text, response])
for i in range(len(response)):
time.sleep(0.3)
yield "You typed: " + message[: i+1]
with gr.Blocks() as demo:
chatbot = gr.Chatbot()
msg = gr.Textbox()
clear = gr.ClearButton([msg, chatbot])
demo = gr.ChatInterface(generate_text)
demo.queue(concurrency_count=1, max_size=5)
demo.launch()