Aspik101 commited on
Commit
83c73ef
·
1 Parent(s): 36d7ef9

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +46 -0
  2. requirements.txt +3 -0
app.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import random
3
+ import time
4
+ from ctransformers import AutoModelForCausalLM
5
+ import datetime
6
+ import os
7
+
8
+
9
+ params = {
10
+ "max_new_tokens":512,
11
+ "stop":["<end>" ,"<|endoftext|>"],
12
+ "temperature":0.7,
13
+ "top_p":0.8,
14
+ "stream":True,
15
+ "batch_size": 8}
16
+
17
+
18
+ llm = AutoModelForCausalLM.from_pretrained("Aspik101/guanaco-7B-HF-pl-lora_GGML", model_type="llama")
19
+
20
+ with gr.Blocks() as demo:
21
+ chatbot = gr.Chatbot()
22
+ msg = gr.Textbox()
23
+ clear = gr.Button("Clear")
24
+
25
+ def user(user_message, history):
26
+ return "", history + [[user_message, None]]
27
+
28
+ def bot(history):
29
+ print("Question": history)
30
+ stream = llm(prompt = f"Jesteś AI assystentem. Odpowiadaj po polsku. <user>: {history}. <assistant>:", **params)
31
+ history[-1][1] = ""
32
+ answer_save = ""
33
+ for character in stream:
34
+ history[-1][1] += character
35
+ answer_save += character
36
+ time.sleep(0.005)
37
+ yield history
38
+ print("Answer": answer_save)
39
+
40
+ msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
41
+ bot, chatbot, chatbot
42
+ )
43
+ clear.click(lambda: None, None, chatbot, queue=False)
44
+
45
+ demo.queue()
46
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ torch
2
+ ctransformers
3
+ gradio