Jagad1234unique commited on
Commit
6153943
·
verified ·
1 Parent(s): 452bff9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -6
app.py CHANGED
@@ -1,16 +1,19 @@
1
  import gradio as gr
 
 
2
  # Use a pipeline as a high-level helper
3
  from transformers import pipeline
4
 
5
- pipe = pipeline("text-generation", model="tiiuae/falcon-7b", trust_remote_code=True)
6
- # Load model directly
7
- # Load model directly
8
- # Load model directly
 
9
  # Load model directly
10
  from transformers import AutoTokenizer, AutoModelForCausalLM
11
 
12
- tokenizer = AutoTokenizer.from_pretrained("tiiuae/falcon-7b", trust_remote_code=True)
13
- model = AutoModelForCausalLM.from_pretrained("tiiuae/falcon-7b", trust_remote_code=True)
14
  def respond(
15
  message,
16
  history: list[tuple[str, str]],
 
1
  import gradio as gr
2
+ # Use a pipeline as a high-level helper
3
+
4
  # Use a pipeline as a high-level helper
5
  from transformers import pipeline
6
 
7
+ messages = [
8
+ {"role": "user", "content": "Who are you?"},
9
+ ]
10
+ pipe = pipeline("text-generation", model="Qwen/Qwen2.5-1.5B")
11
+ pipe(messages)
12
  # Load model directly
13
  from transformers import AutoTokenizer, AutoModelForCausalLM
14
 
15
+ tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-1.5B")
16
+ model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2.5-1.5B")
17
  def respond(
18
  message,
19
  history: list[tuple[str, str]],