Jagad1234unique commited on
Commit
1ed1d34
·
verified ·
1 Parent(s): 48eed56

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -9
app.py CHANGED
@@ -1,22 +1,18 @@
1
  import gradio as gr
2
  # Use a pipeline as a high-level helper
3
  # Use a pipeline as a high-level helper
 
4
  from transformers import pipeline
5
 
6
- messages = [
7
- {"role": "user", "content": "Who are you?"},
8
- ]
9
- pipe = pipeline("text-generation", model="Qwen/Qwen-7B")
10
- pipe(messages)
11
  """
12
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
13
  """
14
  # Load model directly
15
  # Load model directly
16
- from transformers import AutoTokenizer, AutoModelForCausalLM
17
-
18
- tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen-7B")
19
- model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-7B")
20
 
21
  def respond(
22
  message,
 
1
  import gradio as gr
2
  # Use a pipeline as a high-level helper
3
  # Use a pipeline as a high-level helper
4
+ # Use a pipeline as a high-level helper
5
  from transformers import pipeline
6
 
7
+ pipe = pipeline("text-generation", model="Qwen/Qwen-7B", trust_remote_code=True)
 
 
 
 
8
  """
9
  For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
10
  """
11
  # Load model directly
12
  # Load model directly
13
+ # Load model directly
14
+ from transformers import AutoModelForCausalLM
15
+ model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-7B", trust_remote_code=True)
 
16
 
17
  def respond(
18
  message,