File size: 1,295 Bytes
06cf9c4
ed5c4cc
 
 
06cf9c4
ed5c4cc
06cf9c4
ed5c4cc
 
360ead8
3402c51
 
06cf9c4
3402c51
 
 
 
 
 
 
 
2fd0ccb
00b813c
2fd0ccb
 
 
 
 
 
00b813c
47e41bf
66078e0
 
 
47e41bf
66078e0
06cf9c4
2fd0ccb
ef70bbb
06cf9c4
117600f
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
import gradio as gr
import time
import ctypes #to run on C api directly 
import llama_cpp
from llama_cpp import Llama
from huggingface_hub import hf_hub_download #load from huggingfaces 


llm = Llama(model_path= hf_hub_download(repo_id="TheBloke/airoboros-l2-13b-gpt4-m2.0-GGML", filename="airoboros-l2-13b-gpt4-m2.0.ggmlv3.q6_K.bin"), n_ctx=2048) #download model from hf/ n_ctx=2048 for high ccontext length

history = []
history.append(["Hi there!", "Hello, how can I help you?"])

def generate_text(input_text, history):
    conversation_context = " ".join([f"{pair[0]} {pair[1]}" for pair in history])
    full_conversation = f"{conversation_context} Q: {input_text} \n A:"
    
    output = llm(full_conversation, max_tokens=1024, stop=["Q:", "\n"], echo=True)
    response = output['choices'][0]['text']
    history.append([input_text, response])
    
    return "", response

def bot(response):
    history[-1][1] = ""
    for character in response:
        history[-1][1] += character
        time.sleep(0.05)
        yield history

with gr.Blocks() as demo:
     chatbot = gr.Chatbot()
     msg = gr.Textbox()
     clear = gr.ClearButton([msg, chatbot])

msg.submit(generate_text, [msg, chatbot], [msg, chatbot], queue=False).then(bot, chatbot, chatbot)

demo.queue()
demo.launch()