llama_2_chatbot / app.py
amancod's picture
Update app.py
b0b548a verified
from ctransformers import AutoModelForCausalLM
import gradio as gr
llm = AutoModelForCausalLM.from_pretrained("llama-2-7b-chat.Q4_K_S.gguf",
model_type='llama',
max_new_tokens = 1096,
threads = 3,
)
def stream(prompt, UL):
system_prompt = 'You are a helpful AI assistant'
E_INST = "</s>"
user, assistant = "<|user|>", "<|assistant|>"
prompt = f"{system_prompt}{E_INST}\n{user}\n{prompt.strip()}{E_INST}\n{assistant}\n"
return llm(prompt)
css = """
h1 {
text-align: center;
}
#duplicate-button {
margin: auto;
color: white;
background: #1565c0;
border-radius: 100vh;
}
.contain {
max-width: 900px;
margin: auto;
padding-top: 1.5rem;
}
"""
chat_interface = gr.ChatInterface(
fn=stream,
#additional_inputs_accordion_name = "Credentials",
#additional_inputs=[
# gr.Textbox(label="OpenAI Key", lines=1),
# gr.Textbox(label="Linkedin Access Token", lines=1),
#],
stop_btn=None,
examples=[
["explain Large language model"],
["what is quantum computing"]
],
)
with gr.Blocks(css=css) as demo:
chat_interface.render()
if __name__ == "__main__":
demo.queue(max_size=10).launch()