Spaces:
Running
Running
File size: 3,846 Bytes
2221a1f e73a724 2221a1f 06b2c0f e73a724 b3eb52a e73a724 b3eb52a e73a724 bc06525 0a5b8fd e73a724 b3eb52a f2416ec 92ff4de f2416ec fb73f18 b3eb52a 8b3d40e 4ec3e61 e73a724 4ec3e61 e73a724 92ff4de 7dafa85 d104111 e73a724 ee054d1 e73a724 ee054d1 1aba979 ee054d1 b3eb52a e73a724 4e05833 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 |
import streamlit as st
import requests
import os
# Set page title and layout
st.set_page_config(page_title="AI Generator", layout="wide")
# API key from environment variable
API_KEY = os.environ.get("NEBIUS_API_KEY")
if not API_KEY:
st.error("API key not found. Please set the `NEBIUS_API_KEY` environment variable.")
# Function to call Nebius API
def generate_response(prompt, api_key):
api_url = "https://api.studio.nebius.ai/v1/chat/completions"
headers = {"Authorization": f"Bearer {api_key}"}
payload = {
"model": "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
"messages": [
{"role": "system", "content": """You are a Prompt Generator designed to create task-specific prompts for various user requests. Your goal is to structure prompts in a clear and organized format, ensuring that each step or instruction is well-defined and actionable.
Generate Prompt:
Task:
[Specify the task or action the user needs help with, always give it a persona like you are a {task based on user input} generator eg- article generator, your is to generate {Define Task in simple way}]
Objective:
[Define the goal or purpose of the task, including what the user aims to achieve]
Steps:
[List the steps or instructions required to complete the task]
Considerations (Optional):
[Include any additional factors the user should consider, such as limitations, preferences, or specific conditions]
Output Format (Optional):
[Describe the desired output format, whether it's a report, image, text, or other deliverables]
Guidelines for Task-Specific Prompts:
Structure the task prompt clearly with numbered steps or bullet points for easy understanding.
Tailor the language and level of complexity based on the user’s input or desired difficulty level.
Ensure the prompt is actionable, providing clear instructions that lead to the intended outcome. Don't write anything right now wait for my command. """},
{"role": "user", "content": prompt}
],
"temperature": 0.6,
"max_tokens": 512,
"top_p": 0.9,
"top_k": 50
}
response = requests.post(api_url, headers=headers, json=payload)
if response.status_code == 200:
return response.json()
else:
st.error(f"Error: {response.status_code}, {response.text}")
return None
# Custom CSS for centering
st.markdown(
"""
<style>
.title-container {
text-align: center;
margin-bottom: 20px;
}
</style>
""",
unsafe_allow_html=True
)
# Centered title
#st.markdown('<div class="title-container"><h1>AI Title Generator</h1></div>', unsafe_allow_html=True)
# Input bar for user prompt
user_input = st.text_area(
label="Your Text Goes Here:",
placeholder="Type or Paste Your Input..."
)
if st.button("Generate", use_container_width=True):
if user_input.strip():
with st.spinner("Generating... Please wait!"):
result = generate_response(user_input, API_KEY)
if result:
try:
# Extracting generated titles
assistant_message = result["choices"][0]["message"]["content"]
# Enhanced Output with Markdown
st.markdown(
f"""
<div style="background-color:#000; padding:15px; border-radius:8px;">
<pre style="color:#000; font-family:monospace; white-space:pre-wrap;">{assistant_message}</pre>
</div>
""",
unsafe_allow_html=True
)
except KeyError as e:
st.error(f"Unexpected response format: {e}")
else:
st.warning("Please provide input before clicking Generate.")
st.markdown('</div>', unsafe_allow_html=True)
|