Canstralian commited on
Commit
01c75ae
·
verified ·
1 Parent(s): eca9523

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -6
app.py CHANGED
@@ -1,11 +1,24 @@
 
1
  from typing import List, Tuple
 
 
2
 
 
 
 
 
 
 
 
 
 
 
3
  def generate_response(
4
  user_input: str,
5
  history: List[Tuple[str, str]],
6
- max_tokens: int,
7
- temperature: float,
8
- top_p: float
9
  ) -> str:
10
  """
11
  Generates a response from the AI model.
@@ -33,12 +46,13 @@ def generate_response(
33
 
34
  # Generate response from the model
35
  response = ""
36
- for msg in client.chat_completion(
37
- messages,
 
38
  max_tokens=max_tokens,
39
- stream=True,
40
  temperature=temperature,
41
  top_p=top_p,
 
42
  ):
43
  # Check if 'choices' is present and non-empty
44
  if msg and 'choices' in msg and msg['choices']:
@@ -56,3 +70,27 @@ def generate_response(
56
  # Log the error for debugging purposes
57
  print(f"An error occurred: {e}")
58
  return "Error: An unexpected error occurred while processing your request."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
  from typing import List, Tuple
3
+ import openai # Assuming you're using OpenAI's API (make sure to install the OpenAI package)
4
+ from flask import Flask, request, jsonify
5
 
6
+ # Initialize Flask app
7
+ app = Flask(__name__)
8
+
9
+ # Set the OpenAI API key
10
+ openai.api_key = os.getenv("OPENAI_API_KEY")
11
+
12
+ # Define a system message
13
+ SYSTEM_MESSAGE = "You are a helpful assistant."
14
+
15
+ # Function to generate AI response
16
  def generate_response(
17
  user_input: str,
18
  history: List[Tuple[str, str]],
19
+ max_tokens: int = 150,
20
+ temperature: float = 0.7,
21
+ top_p: float = 1.0
22
  ) -> str:
23
  """
24
  Generates a response from the AI model.
 
46
 
47
  # Generate response from the model
48
  response = ""
49
+ for msg in openai.ChatCompletion.create(
50
+ model="gpt-3.5-turbo", # You can use any model you prefer
51
+ messages=messages,
52
  max_tokens=max_tokens,
 
53
  temperature=temperature,
54
  top_p=top_p,
55
+ stream=True
56
  ):
57
  # Check if 'choices' is present and non-empty
58
  if msg and 'choices' in msg and msg['choices']:
 
70
  # Log the error for debugging purposes
71
  print(f"An error occurred: {e}")
72
  return "Error: An unexpected error occurred while processing your request."
73
+
74
+ # Route to handle user input and generate responses
75
+ @app.route("/chat", methods=["POST"])
76
+ def chat():
77
+ try:
78
+ # Get user input from the request
79
+ user_input = request.json.get("user_input", "")
80
+ history = request.json.get("history", [])
81
+
82
+ # Generate the AI response
83
+ response = generate_response(
84
+ user_input=user_input,
85
+ history=history
86
+ )
87
+
88
+ # Return the response as JSON
89
+ return jsonify({"response": response})
90
+
91
+ except Exception as e:
92
+ return jsonify({"error": str(e)}), 500
93
+
94
+ if __name__ == "__main__":
95
+ # Run the app
96
+ app.run(debug=True, host="0.0.0.0", port=5000)