File size: 362 Bytes
37d5786
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
from llama_cpp import Llama
import gradio as gr
import time

llm = Llama(model_path="zephyr-7B-beta-GGUF/zephyr-7b-beta.Q4_K_M.gguf")

def predict(prompt,history):
    output = llm(prompt)
    response = output['choices'][0]['text']
    for i in range(len(response)):
      time.sleep(0.05)
      yield response[:i+1]

gr.ChatInterface(predict).queue().launch()