YALCINKAYA commited on
Commit
8a9401d
·
1 Parent(s): bbf3ec5

dummy response from generate_response

Browse files
Files changed (1) hide show
  1. app.py +17 -15
app.py CHANGED
@@ -1,22 +1,25 @@
1
  from flask import Flask, jsonify, request
2
  from flask_cors import CORS
3
- from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
4
- from huggingface_hub import login
5
  import os
6
 
7
  app = Flask(__name__)
8
-
9
  # Enable CORS for specific origins
10
  CORS(app, resources={r"api/predict/*": {"origins": ["http://localhost:3000", "https://main.dbn2ikif9ou3g.amplifyapp.com"]}})
11
-
 
12
  model_id = "YALCINKAYA/opsgenius-large"
13
-
 
 
 
 
14
  def generate_response(user_input):
15
- prompt = formatted_prompt(user_input)
16
-
17
- response = prompt
18
- response
19
-
20
  def formatted_prompt(question) -> str:
21
  return f"<|im_start|>user\n{question}<|im_end|>\n<|im_start|>assistant:"
22
 
@@ -41,17 +44,16 @@ def handle_post_request():
41
  message = data.get("inputs", "No message provided.")
42
  new_token = os.getenv("HF_TOKEN")
43
 
44
- # Generate a response from the model
45
- #model_response = generate_response(message)
46
 
47
  # Return a JSON response including the generated response
48
  return jsonify({
49
- "received_message": message,
50
  "status": "POST request successful!"
51
  })
52
 
53
  # Note: Remove the app.run() call to let Hugging Face handle it
54
  # Launch the interface
55
-
56
  if __name__ == '__main__':
57
- app.run(host='0.0.0.0', port=7860)
 
1
  from flask import Flask, jsonify, request
2
  from flask_cors import CORS
3
+ from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
 
4
  import os
5
 
6
  app = Flask(__name__)
7
+
8
  # Enable CORS for specific origins
9
  CORS(app, resources={r"api/predict/*": {"origins": ["http://localhost:3000", "https://main.dbn2ikif9ou3g.amplifyapp.com"]}})
10
+
11
+ # Model setup
12
  model_id = "YALCINKAYA/opsgenius-large"
13
+
14
+ # Load the tokenizer and model
15
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
16
+ model = AutoModelForCausalLM.from_pretrained(model_id)
17
+
18
  def generate_response(user_input):
19
+ # Instead of generating a response from the model, return a dummy message
20
+ dummy_response = "This is a dummy response for the input: " + user_input
21
+ return dummy_response
22
+
 
23
  def formatted_prompt(question) -> str:
24
  return f"<|im_start|>user\n{question}<|im_end|>\n<|im_start|>assistant:"
25
 
 
44
  message = data.get("inputs", "No message provided.")
45
  new_token = os.getenv("HF_TOKEN")
46
 
47
+ # Generate a response from the dummy message instead of the model
48
+ model_response = generate_response(message)
49
 
50
  # Return a JSON response including the generated response
51
  return jsonify({
52
+ "received_message": model_response,
53
  "status": "POST request successful!"
54
  })
55
 
56
  # Note: Remove the app.run() call to let Hugging Face handle it
57
  # Launch the interface
 
58
  if __name__ == '__main__':
59
+ app.run(host='0.0.0.0', port=7860)