FridayMaster commited on
Commit
b6c96cc
·
verified ·
1 Parent(s): cecf77a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -5
app.py CHANGED
@@ -5,14 +5,15 @@ import torch
5
  # Load the model and tokenizer
6
  model_name = 'FridayMaster/fine_tune_embedding' # Replace with your model's repository name
7
  tokenizer = AutoTokenizer.from_pretrained(model_name)
8
- model = AutoModelForCausalLM.from_pretrained(model_name)
9
 
10
  # Define a function to generate responses
11
  def generate_response(prompt):
12
  inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True, max_length=512)
13
  with torch.no_grad():
14
- outputs = model.generate(inputs['input_ids'], max_length=150, num_return_sequences=1)
15
- response = tokenizer.decode(outputs[0], skip_special_tokens=True)
 
16
  return response
17
 
18
  # Create a Gradio interface
@@ -27,5 +28,3 @@ iface = gr.Interface(
27
  # Launch the Gradio app
28
  if __name__ == "__main__":
29
  iface.launch()
30
-
31
-
 
5
  # Load the model and tokenizer
6
  model_name = 'FridayMaster/fine_tune_embedding' # Replace with your model's repository name
7
  tokenizer = AutoTokenizer.from_pretrained(model_name)
8
+ model = AutoModelForSequenceClassification.from_pretrained(model_name) # Use the appropriate class
9
 
10
  # Define a function to generate responses
11
  def generate_response(prompt):
12
  inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True, max_length=512)
13
  with torch.no_grad():
14
+ outputs = model(**inputs)
15
+ # Customize the response generation as per your model's output
16
+ response = tokenizer.decode(outputs.logits.argmax(dim=-1), skip_special_tokens=True)
17
  return response
18
 
19
  # Create a Gradio interface
 
28
  # Launch the Gradio app
29
  if __name__ == "__main__":
30
  iface.launch()