codewithdark commited on
Commit
7cd4c4f
ยท
verified ยท
1 Parent(s): 47086a6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +64 -35
app.py CHANGED
@@ -1,16 +1,17 @@
1
  import streamlit as st
 
2
  from g4f.client import Client
3
  import sqlite3
4
  import google.generativeai as genai
5
  # import pyttsx3
6
- # import pyperclip
7
  import requests
8
- import cv2
9
- import numpy as np
10
 
11
 
12
  API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-xl-base-1.0"
13
- headers = {"Authorization": "Bearer Your_hugging_face_Api_key"}
14
 
15
  def local_css(file_name):
16
  with open(file_name) as f:
@@ -34,12 +35,14 @@ except Exception as e:
34
  def generate_image_from_model(prompt):
35
  response = requests.post(API_URL, headers=headers, json={"inputs": prompt})
36
  image_bytes = response.content
37
- # Convert image bytes to a NumPy array
38
- nparr = np.frombuffer(image_bytes, np.uint8)
39
- # Decode the image array
40
- image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
41
  return image
42
 
 
 
 
 
 
43
 
44
  # Streamlit app
45
  def main():
@@ -54,7 +57,6 @@ def main():
54
  "๐Ÿš€ Airoboros 70B": "airoboros-70b",
55
  "๐Ÿ”ฎ Gemini Pro": "gemini-pro",
56
  "๐Ÿ“ท StabilityAI": "stabilityai/stable-diffusion-xl-base-1.0"
57
-
58
  }
59
 
60
  columns = st.columns(3) # Split the layout into three columns
@@ -98,29 +100,54 @@ def main():
98
  if user_input:
99
  if selected_model == "gemini-pro":
100
  try:
101
- GOOGLE_API_KEY = "your_Gemini_Api_key"
102
- genai.configure(api_key=GOOGLE_API_KEY)
103
- model = genai.GenerativeModel('gemini-pro')
104
- prompt = user_input
105
- response = model.generate_content(prompt)
106
- bot_response = response.candidates[0].content.parts[0].text
107
-
108
- st.session_state.chat_history.append({"role": "user", "content": user_input})
109
- st.session_state.chat_history.append({"role": "bot", "content": bot_response})
110
-
111
- # Store chat in the database
112
- for chat in st.session_state.chat_history:
113
- c.execute("INSERT INTO chat_history VALUES (?, ?, ?)",
114
- (st.session_state.conversation_id, chat["role"], chat["content"]))
115
- conn.commit()
116
-
117
- for index, chat in enumerate(st.session_state.chat_history):
118
- with st.chat_message(chat["role"]):
119
- if chat["role"] == "user":
120
- st.markdown(chat["content"])
121
- elif chat["role"] == "bot":
122
- st.markdown(chat["content"])
123
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
124
 
125
  except Exception as e:
126
  st.error(f"An error occurred: {e}")
@@ -128,10 +155,12 @@ def main():
128
  elif selected_model == "stabilityai/stable-diffusion-xl-base-1.0":
129
  prompt = user_input
130
  generated_image = generate_image_from_model(prompt)
131
- # Display the image using OpenCV
132
- cv2.imshow("Generated Image", generated_image)
133
- cv2.waitKey(0)
134
- cv2.destroyAllWindows()
 
 
135
 
136
  else:
137
  try:
 
1
  import streamlit as st
2
+ import g4f
3
  from g4f.client import Client
4
  import sqlite3
5
  import google.generativeai as genai
6
  # import pyttsx3
7
+ import pyperclip
8
  import requests
9
+ from PIL import Image
10
+ import io
11
 
12
 
13
  API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-xl-base-1.0"
14
+ headers = {"Authorization": "Bearer Your_huggingface_Api_key"}
15
 
16
  def local_css(file_name):
17
  with open(file_name) as f:
 
35
  def generate_image_from_model(prompt):
36
  response = requests.post(API_URL, headers=headers, json={"inputs": prompt})
37
  image_bytes = response.content
38
+ image = Image.open(io.BytesIO(image_bytes))
 
 
 
39
  return image
40
 
41
+ def generate_image(prompt):
42
+ response = requests.post(API_URL, headers=headers, json={"inputs": prompt})
43
+ image_bytes = response.content
44
+ image = Image.open(io.BytesIO(image_bytes))
45
+ return image
46
 
47
  # Streamlit app
48
  def main():
 
57
  "๐Ÿš€ Airoboros 70B": "airoboros-70b",
58
  "๐Ÿ”ฎ Gemini Pro": "gemini-pro",
59
  "๐Ÿ“ท StabilityAI": "stabilityai/stable-diffusion-xl-base-1.0"
 
60
  }
61
 
62
  columns = st.columns(3) # Split the layout into three columns
 
100
  if user_input:
101
  if selected_model == "gemini-pro":
102
  try:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103
 
104
+ if user_input.startswith("/image"):
105
+ prompt = user_input[len("/image"):].strip() # Extract prompt after "/image"
106
+
107
+ # Use Gemini Pro to generate content based on the prompt
108
+ GOOGLE_API_KEY = "AIzaSyC8_gwU5LSVQJk3iIXyj5xJ94ArNK11dXU"
109
+ genai.configure(api_key=GOOGLE_API_KEY)
110
+ model = genai.GenerativeModel('gemini-1.0-pro')
111
+ response = model.generate_content(prompt)
112
+ bot_response = response.candidates[0].content.parts[0].text
113
+
114
+ # Generate image based on the generated text prompt
115
+ generated_image = generate_image(bot_response)
116
+
117
+ st.session_state.chat_history.append({"role": "user", "content": user_input})
118
+ st.session_state.chat_history.append({"role": "bot", "content": generated_image})
119
+
120
+ # Display the generated image
121
+ for index, chat in enumerate(st.session_state.chat_history):
122
+ with st.chat_message(chat["role"]):
123
+ if chat["role"] == "user":
124
+ st.markdown(user_input)
125
+ elif chat["role"] == "bot":
126
+ st.image(generated_image, width=400)
127
+
128
+ else:
129
+ GOOGLE_API_KEY = "your_Gemini_Api_key"
130
+ genai.configure(api_key=GOOGLE_API_KEY)
131
+ model = genai.GenerativeModel('gemini-1.0-pro')
132
+ prompt = user_input
133
+ response = model.generate_content(prompt)
134
+ bot_response = response.candidates[0].content.parts[0].text
135
+
136
+ st.session_state.chat_history.append({"role": "user", "content": user_input})
137
+ st.session_state.chat_history.append({"role": "bot", "content": bot_response})
138
+
139
+ # Store chat in the database
140
+ for chat in st.session_state.chat_history:
141
+ c.execute("INSERT INTO chat_history VALUES (?, ?, ?)",
142
+ (st.session_state.conversation_id, chat["role"], chat["content"]))
143
+ conn.commit()
144
+
145
+ for index, chat in enumerate(st.session_state.chat_history):
146
+ with st.chat_message(chat["role"]):
147
+ if chat["role"] == "user":
148
+ st.markdown(chat["content"])
149
+ elif chat["role"] == "bot":
150
+ st.markdown(chat["content"])
151
 
152
  except Exception as e:
153
  st.error(f"An error occurred: {e}")
 
155
  elif selected_model == "stabilityai/stable-diffusion-xl-base-1.0":
156
  prompt = user_input
157
  generated_image = generate_image_from_model(prompt)
158
+ for index, chat in enumerate(st.session_state.chat_history):
159
+ with st.chat_message(chat["role"]):
160
+ if chat["role"] == "user":
161
+ st.markdown(user_input)
162
+ elif chat["role"] == "bot":
163
+ st.image(generated_image, width=400)
164
 
165
  else:
166
  try: