Docfile commited on
Commit
462dbaa
·
verified ·
1 Parent(s): 04e13ed

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +89 -136
app.py CHANGED
@@ -1,150 +1,103 @@
1
- from flask import Flask, render_template, request, jsonify, session
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  import google.generativeai as genai
3
- import os
4
  from dotenv import load_dotenv
5
- import http.client
6
- import json
7
- from werkzeug.utils import secure_filename
8
- import markdown2
9
 
 
 
 
 
10
  app = Flask(__name__)
11
- app.config['UPLOAD_FOLDER'] = 'temp'
12
- app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024 # 16MB max file size
13
- app.secret_key = 'your-secret-key-here' # Change this to a secure secret key
14
 
15
- load_dotenv()
 
 
16
 
17
- # Configure the API key
 
 
18
  genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
19
 
20
- safety_settings = [
21
- {"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE"},
22
- {"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE"},
23
- {"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_NONE"},
24
- {"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_NONE"},
25
- ]
26
-
27
- def get_chat_model():
28
- return genai.GenerativeModel('gemini-2.0-flash-exp',
29
- tools='code_execution',
30
- safety_settings=safety_settings,
31
- system_instruction="Tu es un assistant intelligent. ton but est d'assister au mieux que tu peux. tu as été créé par Aenir et tu t'appelles Mariam")
32
-
33
- def perform_web_search(query):
34
- conn = http.client.HTTPSConnection("google.serper.dev")
35
- payload = json.dumps({"q": query})
36
- headers = {
37
- 'X-API-KEY': '9b90a274d9e704ff5b21c0367f9ae1161779b573',
38
- 'Content-Type': 'application/json'
39
- }
40
- try:
41
- conn.request("POST", "/search", payload, headers)
42
- res = conn.getresponse()
43
- data = json.loads(res.read().decode("utf-8"))
44
- return data
45
- except Exception as e:
46
- return {"error": str(e)}
47
- finally:
48
- conn.close()
49
-
50
- def format_search_results(data):
51
- if not data:
52
- return "Aucun résultat trouvé"
53
-
54
- result = ""
55
-
56
- if 'knowledgeGraph' in data:
57
- kg = data['knowledgeGraph']
58
- result += f"### {kg.get('title', '')}\n"
59
- result += f"*{kg.get('type', '')}*\n\n"
60
- result += f"{kg.get('description', '')}\n\n"
61
-
62
- if 'organic' in data:
63
- result += "### Résultats principaux:\n"
64
- for item in data['organic'][:3]:
65
- result += f"- **{item['title']}**\n"
66
- result += f" {item['snippet']}\n"
67
- result += f" [Lien]({item['link']})\n\n"
68
-
69
- return result
70
-
71
- @app.route('/')
72
- def home():
73
- if 'chat_history' not in session:
74
- session['chat_history'] = []
75
- return render_template('index.html', chat_history=session['chat_history'])
76
 
77
  @app.route('/chat', methods=['POST'])
78
  def chat():
 
 
 
 
 
 
 
 
 
 
 
 
 
79
  data = request.json
80
- prompt = data.get('message')
81
- web_search_enabled = data.get('web_search', False)
82
-
83
- if 'chat' not in session:
84
- session['chat'] = get_chat_model().start_chat(history=[])
85
-
86
- try:
87
- web_results = None
88
- if web_search_enabled:
89
- web_results = perform_web_search(prompt)
90
- if web_results and 'error' not in web_results:
91
- formatted_results = format_search_results(web_results)
92
- prompt = f"""Question: {prompt}\n\nRésultats de recherche web:\n{formatted_results}\n\nPourrais-tu analyser ces informations et me donner une réponse complète?"""
93
-
94
- chat = session['chat']
95
- response = chat.send_message(prompt)
96
-
97
- # Convert Markdown to HTML for the response
98
- response_html = markdown2.markdown(response.text, extras=["fenced-code-blocks", "tables"])
99
-
100
- # Update chat history
101
- if 'chat_history' not in session:
102
- session['chat_history'] = []
103
-
104
- session['chat_history'].append({
105
- 'role': 'user',
106
- 'content': prompt
107
- })
108
- session['chat_history'].append({
109
- 'role': 'assistant',
110
- 'content': response.text,
111
- 'content_html': response_html
112
- })
113
- session.modified = True
114
-
115
- return jsonify({
116
- "response": response.text,
117
- "response_html": response_html
118
- })
119
-
120
- except Exception as e:
121
- return jsonify({"error": str(e)}), 500
122
-
123
- @app.route('/upload', methods=['POST'])
124
- def upload_file():
125
- if 'file' not in request.files:
126
- return jsonify({"error": "No file part"}), 400
127
-
128
- file = request.files['file']
129
- if file.filename == '':
130
- return jsonify({"error": "No selected file"}), 400
131
-
132
- if file:
133
- filename = secure_filename(file.filename)
134
- filepath = os.path.join(app.config['UPLOAD_FOLDER'], filename)
135
- file.save(filepath)
136
-
137
- try:
138
- gemini_file = genai.upload_file(filepath)
139
- return jsonify({"success": True, "filename": filename})
140
- except Exception as e:
141
- return jsonify({"error": str(e)}), 500
142
-
143
- @app.route('/clear', methods=['POST'])
144
- def clear_history():
145
- session.clear()
146
- return jsonify({"success": True})
147
 
 
148
  if __name__ == '__main__':
149
- os.makedirs("temp", exist_ok=True)
150
- app.run(debug=True)
 
1
+ # Copyright 2024 Google LLC
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from flask import (
15
+ Flask,
16
+ request,
17
+ Response,
18
+ stream_with_context
19
+ )
20
+ from flask_cors import CORS
21
  import google.generativeai as genai
 
22
  from dotenv import load_dotenv
23
+ import os
 
 
 
24
 
25
+ # Load environment variables from a .env file located in the same directory.
26
+ load_dotenv()
27
+
28
+ # Initialize a Flask application. Flask is used to create and manage the web server.
29
  app = Flask(__name__)
 
 
 
30
 
31
+ # Apply CORS to the Flask app which allows it to accept requests from all domains.
32
+ # This is especially useful during development and testing.
33
+ CORS(app)
34
 
35
+ # WARNING: Do not share code with you API key hard coded in it.
36
+ # Configure the Google Generative AI's Google API key obtained
37
+ # from the environment variable. This key authenticates requests to the Gemini API.
38
  genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
39
 
40
+ # Initialize the generative model with the specified model name.
41
+ # This model will be used to process user inputs and generate responses.
42
+ model = genai.GenerativeModel(
43
+ model_name="gemini-1.5-flash"
44
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
 
46
  @app.route('/chat', methods=['POST'])
47
  def chat():
48
+ """Processes user input and returns AI-generated responses.
49
+
50
+ This function handles POST requests to the '/chat' endpoint. It expects a JSON payload
51
+ containing a user message and an optional conversation history. It returns the AI's
52
+ response as a JSON object.
53
+
54
+ Args:
55
+ None (uses Flask `request` object to access POST data)
56
+
57
+ Returns:
58
+ A JSON object with a key "text" that contains the AI-generated response.
59
+ """
60
+ # Parse the incoming JSON data into variables.
61
  data = request.json
62
+ msg = data.get('chat', '')
63
+ chat_history = data.get('history', [])
64
+
65
+ # Start a chat session with the model using the provided history.
66
+ chat_session = model.start_chat(history=chat_history)
67
+
68
+ # Send the latest user input to the model and get the response.
69
+ response = chat_session.send_message(msg)
70
+
71
+ return {"text": response.text}
72
+
73
+ @app.route("/stream", methods=["POST"])
74
+ def stream():
75
+ """Streams AI responses for real-time chat interactions.
76
+
77
+ This function initiates a streaming session with the Gemini AI model,
78
+ continuously sending user inputs and streaming back the responses. It handles
79
+ POST requests to the '/stream' endpoint with a JSON payload similar to the
80
+ '/chat' endpoint.
81
+
82
+ Args:
83
+ None (uses Flask `request` object to access POST data)
84
+
85
+ Returns:
86
+ A Flask `Response` object that streams the AI-generated responses.
87
+ """
88
+ def generate():
89
+ data = request.json
90
+ msg = data.get('chat', '')
91
+ chat_history = data.get('history', [])
92
+
93
+ chat_session = model.start_chat(history=chat_history)
94
+ response = chat_session.send_message(msg, stream=True)
95
+
96
+ for chunk in response:
97
+ yield f"{chunk.text}"
98
+
99
+ return Response(stream_with_context(generate()), mimetype="text/event-stream")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
100
 
101
+ # Configure the server to run on port 9000.
102
  if __name__ == '__main__':
103
+ app.run(port=os.getenv("PORT"))