Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,9 +1,21 @@
|
|
1 |
import streamlit as st
|
|
|
2 |
import requests
|
3 |
import json
|
4 |
|
5 |
-
|
6 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
|
8 |
# Page Title
|
9 |
st.title("LemmaTeks: AI-Powered Text Generator")
|
@@ -28,16 +40,22 @@ with st.sidebar:
|
|
28 |
text_length = st.slider("Text Length (words):", min_value=50, max_value=1000, step=50)
|
29 |
creativity_level = st.slider("Creativity Level:", min_value=0.0, max_value=1.0, step=0.1)
|
30 |
|
31 |
-
# Number of Responses
|
32 |
-
num_responses = st.number_input("Number of Responses:", min_value=1, max_value=5, step=1)
|
33 |
-
|
34 |
# Checkboxes for Features
|
35 |
creative_mode = st.checkbox("Enable Creative Mode")
|
36 |
fact_checking = st.checkbox("Enable Fact-Checking")
|
37 |
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
# Text Input Field
|
39 |
user_prompt = st.text_area("Enter Your Prompt Here:")
|
40 |
|
|
|
|
|
|
|
41 |
# Submit Button
|
42 |
if st.button("Generate"):
|
43 |
if user_prompt.strip() == "":
|
@@ -47,32 +65,17 @@ if st.button("Generate"):
|
|
47 |
st.write("Generating responses...")
|
48 |
|
49 |
try:
|
50 |
-
# Set up the Gemini API endpoint (this is an example and should be adjusted)
|
51 |
-
api_url = "https://gemini.google.com/app" # Replace with actual URL
|
52 |
-
headers = {
|
53 |
-
"Authorization": f"Bearer {gemini_api_key}",
|
54 |
-
"Content-Type": "application/json"
|
55 |
-
}
|
56 |
-
|
57 |
-
# Set up the payload for the API request
|
58 |
-
payload = {
|
59 |
-
"model": "gemini", # Replace with the correct Gemini model name
|
60 |
-
"prompt": f"Format: {output_format}\nTone: {tone_style}\nPrompt: {user_prompt}",
|
61 |
-
"max_tokens": text_length,
|
62 |
-
"temperature": creativity_level,
|
63 |
-
"num_responses": num_responses
|
64 |
-
}
|
65 |
-
|
66 |
-
# Make the API request
|
67 |
-
response = requests.post(api_url, headers=headers, json=payload)
|
68 |
-
response_data = response.json()
|
69 |
-
|
70 |
-
# Display Responses
|
71 |
-
for i, choice in enumerate(response_data["choices"]):
|
72 |
-
st.subheader(f"Response {i + 1}")
|
73 |
-
st.write(choice["text"].strip())
|
74 |
|
75 |
-
|
76 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
77 |
except Exception as e:
|
78 |
-
st.error(f"
|
|
|
|
1 |
import streamlit as st
|
2 |
+
import google.generativeai as genai
|
3 |
import requests
|
4 |
import json
|
5 |
|
6 |
+
MODEL_ID = "gemini-2.0-flash-exp"
|
7 |
+
api_key = os.getenv("GEMINI_API_KEY")
|
8 |
+
model_id = MODEL_ID
|
9 |
+
genai.configure(api_key=api_key)
|
10 |
+
|
11 |
+
if "model" not in st.session_state:
|
12 |
+
st.session_state.model = genai.GenerativeModel(MODEL_ID)
|
13 |
+
|
14 |
+
model = st.session_state.model
|
15 |
+
chat = model.start_chat()
|
16 |
+
|
17 |
+
creative_prompt = ""
|
18 |
+
factcheck_prompt = ""
|
19 |
|
20 |
# Page Title
|
21 |
st.title("LemmaTeks: AI-Powered Text Generator")
|
|
|
40 |
text_length = st.slider("Text Length (words):", min_value=50, max_value=1000, step=50)
|
41 |
creativity_level = st.slider("Creativity Level:", min_value=0.0, max_value=1.0, step=0.1)
|
42 |
|
|
|
|
|
|
|
43 |
# Checkboxes for Features
|
44 |
creative_mode = st.checkbox("Enable Creative Mode")
|
45 |
fact_checking = st.checkbox("Enable Fact-Checking")
|
46 |
|
47 |
+
#modify prompt based on settings
|
48 |
+
if creative_mode:
|
49 |
+
creative_prompt = " Optimize the creativity of your response. "
|
50 |
+
if fact_checking:
|
51 |
+
factcheck_prompt = "Support your answer with evidences. "
|
52 |
+
|
53 |
# Text Input Field
|
54 |
user_prompt = st.text_area("Enter Your Prompt Here:")
|
55 |
|
56 |
+
# Append the creative and fact checking
|
57 |
+
user_prompt = user_prompt + creative_prompt + factcheck_prompt
|
58 |
+
|
59 |
# Submit Button
|
60 |
if st.button("Generate"):
|
61 |
if user_prompt.strip() == "":
|
|
|
65 |
st.write("Generating responses...")
|
66 |
|
67 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
68 |
|
69 |
+
response = model.generate_content(
|
70 |
+
f"Format: {output_format}\nTone: {tone_style}\nPrompt: {user_prompt}",
|
71 |
+
generation_config = genai.GenerationConfig(
|
72 |
+
max_output_tokens=1000,
|
73 |
+
temperature=creativity_level,
|
74 |
+
)
|
75 |
+
)
|
76 |
+
|
77 |
+
st.markdown(response.text)
|
78 |
+
|
79 |
except Exception as e:
|
80 |
+
st.error(f"Exception occured: {e}")
|
81 |
+
|