codewithdark commited on
Commit
2b27d13
ยท
verified ยท
1 Parent(s): eff3649

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -27
app.py CHANGED
@@ -2,16 +2,21 @@ import streamlit as st
2
  from g4f.client import Client
3
  import sqlite3
4
  import google.generativeai as genai
5
- from diffusers import DiffusionPipeline
6
- import matplotlib.pyplot as plt
7
- import torch
8
  # import pyttsx3
9
  # import pyperclip
 
 
 
 
 
 
 
10
 
11
  def local_css(file_name):
12
  with open(file_name) as f:
13
  st.markdown(f"<style>{f.read()}</style>", unsafe_allow_html=True)
14
 
 
15
  local_css("style.css")
16
 
17
  # Create a connection to the database
@@ -25,24 +30,16 @@ try:
25
  conn.commit()
26
  except Exception as e:
27
  st.error(f"An error occurred: {e}")
28
-
29
-
30
- def generate_image(pipe, prompt, params):
31
- img = pipe(prompt, **params).images
32
 
33
- num_images = len(img)
34
- if num_images>1:
35
- fig, ax = plt.subplots(nrows=1, ncols=num_images)
36
- for i in range(num_images):
37
- ax[i].imshow(img[i]);
38
- ax[i].axis('off');
 
 
39
 
40
- else:
41
- fig = plt.figure()
42
- plt.imshow(img[0]);
43
- plt.axis('off');
44
- plt.tight_layout()
45
-
46
 
47
  # Streamlit app
48
  def main():
@@ -57,7 +54,7 @@ def main():
57
  "๐Ÿš€ Airoboros 70B": "airoboros-70b",
58
  "๐Ÿ”ฎ Gemini Pro": "gemini-pro",
59
  "๐Ÿ“ท StabilityAI": "stabilityai/stable-diffusion-xl-base-1.0"
60
-
61
  }
62
 
63
  columns = st.columns(3) # Split the layout into three columns
@@ -101,7 +98,7 @@ def main():
101
  if user_input:
102
  if selected_model == "gemini-pro":
103
  try:
104
- GOOGLE_API_KEY = "Gemini"
105
  genai.configure(api_key=GOOGLE_API_KEY)
106
  model = genai.GenerativeModel('gemini-pro')
107
  prompt = user_input
@@ -127,14 +124,15 @@ def main():
127
 
128
  except Exception as e:
129
  st.error(f"An error occurred: {e}")
130
-
131
  elif selected_model == "stabilityai/stable-diffusion-xl-base-1.0":
132
- pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float32, use_safetensors=True, variant="fp16")
133
- pipe.to("cpu")
 
 
 
 
134
 
135
- params = {'num_inference_steps': 100, 'num_images_per_prompt': 2}
136
- generate_image(pipe, user_input, params)
137
-
138
  else:
139
  try:
140
  client = Client()
@@ -170,6 +168,7 @@ def main():
170
  except Exception as e:
171
  st.error(f"An error occurred: {e}")
172
 
 
173
  def display_conversation(conversation_id):
174
  c.execute("SELECT * FROM chat_history WHERE conversation_id=?", (conversation_id,))
175
  chats = c.fetchall()
@@ -178,5 +177,6 @@ def display_conversation(conversation_id):
178
  st.markdown(f"{chat[1]}")
179
  st.markdown(f"{chat[2]}")
180
 
 
181
  if __name__ == "__main__":
182
  main()
 
2
  from g4f.client import Client
3
  import sqlite3
4
  import google.generativeai as genai
 
 
 
5
  # import pyttsx3
6
  # import pyperclip
7
+ import requests
8
+ import cv2
9
+ import numpy as np
10
+
11
+
12
+ API_URL = "https://api-inference.huggingface.co/models/stabilityai/stable-diffusion-xl-base-1.0"
13
+ headers = {"Authorization": "Bearer Your_hugging_face_Api_key"}
14
 
15
  def local_css(file_name):
16
  with open(file_name) as f:
17
  st.markdown(f"<style>{f.read()}</style>", unsafe_allow_html=True)
18
 
19
+
20
  local_css("style.css")
21
 
22
  # Create a connection to the database
 
30
  conn.commit()
31
  except Exception as e:
32
  st.error(f"An error occurred: {e}")
 
 
 
 
33
 
34
+ def generate_image_from_model(prompt):
35
+ response = requests.post(API_URL, headers=headers, json={"inputs": prompt})
36
+ image_bytes = response.content
37
+ # Convert image bytes to a NumPy array
38
+ nparr = np.frombuffer(image_bytes, np.uint8)
39
+ # Decode the image array
40
+ image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
41
+ return image
42
 
 
 
 
 
 
 
43
 
44
  # Streamlit app
45
  def main():
 
54
  "๐Ÿš€ Airoboros 70B": "airoboros-70b",
55
  "๐Ÿ”ฎ Gemini Pro": "gemini-pro",
56
  "๐Ÿ“ท StabilityAI": "stabilityai/stable-diffusion-xl-base-1.0"
57
+
58
  }
59
 
60
  columns = st.columns(3) # Split the layout into three columns
 
98
  if user_input:
99
  if selected_model == "gemini-pro":
100
  try:
101
+ GOOGLE_API_KEY = "your_Gemini_Api_key"
102
  genai.configure(api_key=GOOGLE_API_KEY)
103
  model = genai.GenerativeModel('gemini-pro')
104
  prompt = user_input
 
124
 
125
  except Exception as e:
126
  st.error(f"An error occurred: {e}")
127
+
128
  elif selected_model == "stabilityai/stable-diffusion-xl-base-1.0":
129
+ prompt = user_input
130
+ generated_image = generate_image_from_model(prompt)
131
+ # Display the image using OpenCV
132
+ cv2.imshow("Generated Image", generated_image)
133
+ cv2.waitKey(0)
134
+ cv2.destroyAllWindows()
135
 
 
 
 
136
  else:
137
  try:
138
  client = Client()
 
168
  except Exception as e:
169
  st.error(f"An error occurred: {e}")
170
 
171
+
172
  def display_conversation(conversation_id):
173
  c.execute("SELECT * FROM chat_history WHERE conversation_id=?", (conversation_id,))
174
  chats = c.fetchall()
 
177
  st.markdown(f"{chat[1]}")
178
  st.markdown(f"{chat[2]}")
179
 
180
+
181
  if __name__ == "__main__":
182
  main()