Samuel L Meyers commited on
Commit
fe36794
·
1 Parent(s): da8a172
Files changed (1) hide show
  1. app.py +26 -13
app.py CHANGED
@@ -6,6 +6,24 @@ import torch
6
 
7
  from conversation import get_default_conv_template
8
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
  talkers = {
11
  "m3b": {
@@ -15,20 +33,15 @@ talkers = {
15
  }
16
  }
17
 
 
 
 
 
18
  def m3b_talk(text):
19
- m3bconv = talkers["m3b"]["conv"]
20
- m3bconv.append_message(m3bconv.roles[0], text)
21
- m3bconv.append_message(m3bconv.roles[1], None)
22
- input_ids = talkers["m3b"]["tokenizer"]([text]).input_ids
23
- response_tokens = talkers["m3b"]["model"](
24
- torch.as_tensor(m3bconv.get_prompt()),
25
- do_sample=True,
26
- temperature=0.2,
27
- max_new_tokens=1024,
28
- )
29
- response_tokens = response_tokens[0][len(input_ids[0]):]
30
- response = talkers["m3b"]["tokenizer"].decode(response_tokens, skip_special_tokens=True).strip()
31
- return response
32
 
33
  def main():
34
  logging.basicConfig(level=logging.INFO)
 
6
 
7
  from conversation import get_default_conv_template
8
  import gradio as gr
9
+ from pyllamacpp.model import Model
10
+ from urllib import request, response, urlopen, error, parse, robotparser
11
+
12
+ """
13
+
14
+ model = Model(model_path='/path/to/model.bin')
15
+ while True:
16
+ try:
17
+ prompt = input("You: ", flush=True)
18
+ if prompt == '':
19
+ continue
20
+ print(f"AI:", end='')
21
+ for token in model.generate(prompt):
22
+ print(f"{token}", end='', flush=True)
23
+ print()
24
+ except KeyboardInterrupt:
25
+ break
26
+ """
27
 
28
  talkers = {
29
  "m3b": {
 
33
  }
34
  }
35
 
36
+ request.urlretrieve("https://huggingface.co/GGUF/MiniChat-3B/resolve/main/ggml-model-q8_0.bin", "minichat-3b-q8_0.gguf")
37
+
38
+ lcpp_model = Model(model_path="minichat-3b-q8_0.gguf")
39
+
40
  def m3b_talk(text):
41
+ resp = ""
42
+ for token in lcpp_model.generate(text):
43
+ resp += token
44
+ return resp
 
 
 
 
 
 
 
 
 
45
 
46
  def main():
47
  logging.basicConfig(level=logging.INFO)