Jagad1234unique commited on
Commit
452bff9
·
verified ·
1 Parent(s): 1ed1d34

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -8
app.py CHANGED
@@ -1,19 +1,16 @@
1
  import gradio as gr
2
  # Use a pipeline as a high-level helper
3
- # Use a pipeline as a high-level helper
4
- # Use a pipeline as a high-level helper
5
  from transformers import pipeline
6
 
7
- pipe = pipeline("text-generation", model="Qwen/Qwen-7B", trust_remote_code=True)
8
- """
9
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
10
- """
11
  # Load model directly
12
  # Load model directly
13
  # Load model directly
14
- from transformers import AutoModelForCausalLM
15
- model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-7B", trust_remote_code=True)
16
 
 
 
17
  def respond(
18
  message,
19
  history: list[tuple[str, str]],
 
1
  import gradio as gr
2
  # Use a pipeline as a high-level helper
 
 
3
  from transformers import pipeline
4
 
5
+ pipe = pipeline("text-generation", model="tiiuae/falcon-7b", trust_remote_code=True)
6
+ # Load model directly
 
 
7
  # Load model directly
8
  # Load model directly
9
  # Load model directly
10
+ from transformers import AutoTokenizer, AutoModelForCausalLM
 
11
 
12
+ tokenizer = AutoTokenizer.from_pretrained("tiiuae/falcon-7b", trust_remote_code=True)
13
+ model = AutoModelForCausalLM.from_pretrained("tiiuae/falcon-7b", trust_remote_code=True)
14
  def respond(
15
  message,
16
  history: list[tuple[str, str]],