Azperia commited on
Commit
4fa773e
·
verified ·
1 Parent(s): 9835591

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -4
app.py CHANGED
@@ -1,17 +1,16 @@
1
  import gradio as gr
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
 
4
- # Charger le modèle (remplace "mistralai/Mistral-7B-Instruct" par ton modèle)
5
- model_name = "mistralai/Mistral-7B-Instruct"
6
  model = AutoModelForCausalLM.from_pretrained(model_name)
7
  tokenizer = AutoTokenizer.from_pretrained(model_name)
8
 
9
- # Fonction de chat
10
  def chatbot_response(user_input):
11
  inputs = tokenizer(user_input, return_tensors="pt")
12
  outputs = model.generate(**inputs, max_new_tokens=200)
13
  return tokenizer.decode(outputs[0], skip_special_tokens=True)
14
 
15
- # Interface Gradio
16
  iface = gr.Interface(fn=chatbot_response, inputs="text", outputs="text", title="Thought 1.0 Poet IQ150")
17
  iface.launch()
 
 
1
  import gradio as gr
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
 
4
+ # Remplacer par un modèle public
5
+ model_name = "gpt2"
6
  model = AutoModelForCausalLM.from_pretrained(model_name)
7
  tokenizer = AutoTokenizer.from_pretrained(model_name)
8
 
 
9
  def chatbot_response(user_input):
10
  inputs = tokenizer(user_input, return_tensors="pt")
11
  outputs = model.generate(**inputs, max_new_tokens=200)
12
  return tokenizer.decode(outputs[0], skip_special_tokens=True)
13
 
 
14
  iface = gr.Interface(fn=chatbot_response, inputs="text", outputs="text", title="Thought 1.0 Poet IQ150")
15
  iface.launch()
16
+