nroggendorff commited on
Commit
8339753
·
verified ·
1 Parent(s): 310f92d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -4
app.py CHANGED
@@ -1,8 +1,7 @@
1
  import spaces
2
  import gradio as gr
3
  import torch
4
- from transformers import AutoModelForCausalLM, AutoTokenizer
5
- from transformers import StoppingCriteria, StoppingCriteriaList, TextIteratorStreamer
6
  from threading import Thread
7
 
8
  class StopOnTokens(StoppingCriteria):
@@ -18,11 +17,11 @@ def predict(message, history):
18
  torch.set_default_device("cuda")
19
 
20
  tokenizer = AutoTokenizer.from_pretrained(
21
- "macadeliccc/laser-dolphin-mixtral-2x7b-dpo",
22
  trust_remote_code=True
23
  )
24
  model = AutoModelForCausalLM.from_pretrained(
25
- "macadeliccc/laser-dolphin-mixtral-2x7b-dpo",
26
  torch_dtype="auto",
27
  load_in_4bit=True,
28
  trust_remote_code=True
 
1
  import spaces
2
  import gradio as gr
3
  import torch
4
+ from transformers import AutoModelForCausalLM, AutoTokenizer, StoppingCriteria, StoppingCriteriaList, TextIteratorStreamer
 
5
  from threading import Thread
6
 
7
  class StopOnTokens(StoppingCriteria):
 
17
  torch.set_default_device("cuda")
18
 
19
  tokenizer = AutoTokenizer.from_pretrained(
20
+ "cognitivecomputations/dolphin-2.8-mistral-7b-v02",
21
  trust_remote_code=True
22
  )
23
  model = AutoModelForCausalLM.from_pretrained(
24
+ "cognitivecomputations/dolphin-2.8-mistral-7b-v02",
25
  torch_dtype="auto",
26
  load_in_4bit=True,
27
  trust_remote_code=True