File size: 1,128 Bytes
06cf9c4
ed5c4cc
 
 
06cf9c4
ed5c4cc
06cf9c4
ed5c4cc
 
360ead8
3402c51
 
06cf9c4
3402c51
 
 
 
 
 
 
 
ec04b94
00b813c
47e41bf
ec04b94
 
 
47e41bf
ec04b94
06cf9c4
ec04b94
ef70bbb
06cf9c4
117600f
 
ec04b94
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
import gradio as gr
import time
import ctypes #to run on C api directly 
import llama_cpp
from llama_cpp import Llama
from huggingface_hub import hf_hub_download #load from huggingfaces 


llm = Llama(model_path= hf_hub_download(repo_id="TheBloke/airoboros-l2-13b-gpt4-m2.0-GGML", filename="airoboros-l2-13b-gpt4-m2.0.ggmlv3.q6_K.bin"), n_ctx=2048) #download model from hf/ n_ctx=2048 for high ccontext length

history = []
history.append(["Hi there!", "Hello, how can I help you?"])

def generate_text(input_text, history):
    conversation_context = " ".join([f"{pair[0]} {pair[1]}" for pair in history])
    full_conversation = f"{conversation_context} Q: {input_text} \n A:"
    
    output = llm(full_conversation, max_tokens=1024, stop=["Q:", "\n"], echo=True)
    response = output['choices'][0]['text']
    history.append([input_text, response])
    
    return "", history

with gr.Blocks() as demo:
    chatbot = gr.Chatbot()
    msg = gr.Textbox()
    clear = gr.ClearButton([msg, chatbot])

    msg.submit(generate_text, [msg, chatbot], [msg, chatbot])

demo.queue(concurrency_count=1, max_size=5)
demo.launch()