Upload folder using huggingface_hub
Browse files- .gradio/certificate.pem +31 -0
- NextGenAgile.py +208 -0
- README.md +3 -9
.gradio/certificate.pem
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
-----BEGIN CERTIFICATE-----
|
2 |
+
MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
|
3 |
+
TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
|
4 |
+
cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
|
5 |
+
WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
|
6 |
+
ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
|
7 |
+
MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
|
8 |
+
h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
|
9 |
+
0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
|
10 |
+
A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
|
11 |
+
T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
|
12 |
+
B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
|
13 |
+
B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
|
14 |
+
KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
|
15 |
+
OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
|
16 |
+
jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
|
17 |
+
qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
|
18 |
+
rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
|
19 |
+
HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
|
20 |
+
hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
|
21 |
+
ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
|
22 |
+
3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
|
23 |
+
NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
|
24 |
+
ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
|
25 |
+
TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
|
26 |
+
jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
|
27 |
+
oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
|
28 |
+
4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
|
29 |
+
mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
|
30 |
+
emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
|
31 |
+
-----END CERTIFICATE-----
|
NextGenAgile.py
ADDED
@@ -0,0 +1,208 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import langchain_mistralai
|
3 |
+
from langchain.agents import AgentType, initialize_agent
|
4 |
+
import gradio as gr
|
5 |
+
from mistralai import Mistral
|
6 |
+
import pandas as pd
|
7 |
+
from collections import Counter
|
8 |
+
from openai import AzureOpenAI
|
9 |
+
import time
|
10 |
+
import re
|
11 |
+
|
12 |
+
# Memory per persona
|
13 |
+
structured_memory = {
|
14 |
+
"π¨βπΌ Scrum Master": {},
|
15 |
+
"π‘ Product Owner": {},
|
16 |
+
"π¨βπ» Developer": {},
|
17 |
+
"π¨ Designer": {},
|
18 |
+
}
|
19 |
+
selected_items = []
|
20 |
+
|
21 |
+
def prepare_model(selected_model):
|
22 |
+
if selected_model == 'o4-mini':
|
23 |
+
client = AzureOpenAI(
|
24 |
+
api_version="2024-12-01-preview",
|
25 |
+
azure_endpoint="https://nextgenagilehu4401821853.openai.azure.com/",
|
26 |
+
api_key="2ziHxIi2Pz511IscFVVjBpgHkm2nDXNfPAvXyFWpsmCHlpHwAOuJJQQJ99BEACHYHv6XJ3w3AAAAACOGmC8m" # Use a string instead of AzureKeyCredential
|
27 |
+
)
|
28 |
+
elif selected_model == 'o3-mini':
|
29 |
+
client = AzureOpenAI(
|
30 |
+
api_version="2024-12-01-preview",
|
31 |
+
azure_endpoint="https://nextgenagilehu4401821853.openai.azure.com/",
|
32 |
+
api_key="2ziHxIi2Pz511IscFVVjBpgHkm2nDXNfPAvXyFWpsmCHlpHwAOuJJQQJ99BEACHYHv6XJ3w3AAAAACOGmC8m" # Use a string instead of AzureKeyCredential
|
33 |
+
)
|
34 |
+
return client
|
35 |
+
|
36 |
+
def clear_selections():
|
37 |
+
global selected_items
|
38 |
+
selected_items.clear()
|
39 |
+
return ""
|
40 |
+
|
41 |
+
def add_selection(option):
|
42 |
+
global selected_items
|
43 |
+
selected_items += [option]
|
44 |
+
flat_list = [item for sublist in selected_items for item in sublist]
|
45 |
+
counts = Counter(flat_list)
|
46 |
+
formatted_output = ", ".join(f"{count} {option.lower()}" for option, count in counts.items())
|
47 |
+
return formatted_output
|
48 |
+
|
49 |
+
# Persona system prompts with emojis
|
50 |
+
persona_prompts = {
|
51 |
+
"π¨βπΌ Scrum Master": "You are an experienced Scrum Master responsible for creating detailed JIRA tickets across parameters like EPIC, Feature, Story, Tasks, Subtasks, assignee based on job designation,sprint and story points",
|
52 |
+
"π‘ Product Owner": "You are a Product Owner focused on defining product features, value delivery, and aligning JIRA tickets with business outcomes. Depending on requirements you can focus upon these aspects: Description,Business Context,User Value,Scope,Out of Scope,Dependencies,Acceptance Criteria,Definition of Done,User Impact Metrics,Next Steps",
|
53 |
+
"π¨βπ» Developer": "You are a Software Developer breaking down technical objectives into implementation tasks, bug fixes, and engineering subtasks.Depending on requirements you can focus upon these aspects: Description, Acceptance Criteria, Technical Details:Frontend,Backend,Database,Integrations,Logging & Monitoring, Component/s, Definition of Done, Sub-tasks,Story Points",
|
54 |
+
"π¨ Designer": "You are a UX/UI Designer structuring JIRA tickets around user flows, UI components, and design assets."
|
55 |
+
}
|
56 |
+
|
57 |
+
# Default prompt examples
|
58 |
+
persona_default_prompts = {
|
59 |
+
"π¨βπΌ Scrum Master": "Objective: Automate JIRA story creation for a new agile project",
|
60 |
+
"π‘ Product Owner": "Objective: Define user stories for a travel booking feature",
|
61 |
+
"π¨βπ» Developer": "Objective: Break down the user login system into dev tasks",
|
62 |
+
"π¨ Designer": "Objective: Create tasks for designing the homepage UI"
|
63 |
+
}
|
64 |
+
|
65 |
+
model_select = {
|
66 |
+
"O-4 Mini": "o4-mini",
|
67 |
+
"O-3 Mini" : "o3-mini"
|
68 |
+
}
|
69 |
+
# Process function with memory per persona
|
70 |
+
def process_files(message, chat_history, persona,model):
|
71 |
+
selected_model = model_select.get(model)
|
72 |
+
client = prepare_model(selected_model)
|
73 |
+
system_prompt = persona_prompts.get(persona)
|
74 |
+
memory = structured_memory[persona]
|
75 |
+
|
76 |
+
if message.lower().startswith("objective:"):
|
77 |
+
objective = message.split(":", 1)[-1].strip()
|
78 |
+
if len(selected_items) > 0:
|
79 |
+
flat_list = [item for sublist in selected_items for item in sublist]
|
80 |
+
counts = Counter(flat_list)
|
81 |
+
formatted_output = ", ".join(f"{count} {option.lower()}" for option, count in counts.items())
|
82 |
+
full_prompt = f"Create a complete JIRA ticket for the {objective} where your team structure is: {formatted_output}"
|
83 |
+
else:
|
84 |
+
full_prompt = f"Create a complete JIRA ticket for the {objective}"
|
85 |
+
|
86 |
+
chat_response = client.chat.completions.create(
|
87 |
+
messages=[
|
88 |
+
{"role": "system", "content": system_prompt},
|
89 |
+
{"role": "user", "content": full_prompt}
|
90 |
+
],
|
91 |
+
max_completion_tokens=100000,
|
92 |
+
model=selected_model
|
93 |
+
)
|
94 |
+
bot_message = chat_response.choices[0].message.content
|
95 |
+
memory["current"] = bot_message
|
96 |
+
|
97 |
+
elif any(keyword in message.lower() for keyword in ["improve", "update", "change"]):
|
98 |
+
if "current" not in memory:
|
99 |
+
bot_message = "Please provide a project objective first using 'Objective: <your project>'."
|
100 |
+
else:
|
101 |
+
previous_output = memory["current"]
|
102 |
+
improvement_prompt = f"Improve the following JIRA ticket structure based on the user's instruction.\n\nStructure:\n{previous_output}\n\nUser Request:\n{message}"
|
103 |
+
chat_response = client.chat.completions.create(
|
104 |
+
messages=[
|
105 |
+
{"role": "system", "content": system_prompt},
|
106 |
+
{"role": "user", "content": improvement_prompt}
|
107 |
+
],
|
108 |
+
max_completion_tokens=100000,
|
109 |
+
model=selected_model
|
110 |
+
)
|
111 |
+
bot_message = chat_response.choices[0].message.content
|
112 |
+
memory["current"] = bot_message
|
113 |
+
else:
|
114 |
+
bot_message = "Please start with 'Objective: <project>' or a request to update something."
|
115 |
+
|
116 |
+
chat_history.append({"role": "user", "content": message})
|
117 |
+
chat_history.append({"role": "assistant", "content": bot_message})
|
118 |
+
time.sleep(1)
|
119 |
+
return "", chat_history
|
120 |
+
|
121 |
+
def insert_prefix(prefix_text, textbox_content):
|
122 |
+
if not textbox_content.lower().startswith(prefix_text.lower()):
|
123 |
+
return f"{prefix_text} {textbox_content}"
|
124 |
+
else:
|
125 |
+
return textbox_content
|
126 |
+
|
127 |
+
def set_default_prompt(persona):
|
128 |
+
return persona_default_prompts.get(persona, "")
|
129 |
+
|
130 |
+
# Gradio App
|
131 |
+
with gr.Blocks() as demo:
|
132 |
+
with gr.Row():
|
133 |
+
with gr.Column(scale=1): # Sidebar
|
134 |
+
gr.Image("/afh/projects/NextGenAgileTool-aefe44c3-e188-4674-9bd7-fd55842e362e/data/logo-removebg-preview.png", width=135, height=100,show_download_button=False , container= False,show_fullscreen_button=False, show_label=False)
|
135 |
+
# gr.Markdown("### *NextGenAgile* β AI Jira Assistant")
|
136 |
+
gr.Markdown("### π½ Choose Model")
|
137 |
+
|
138 |
+
model = gr.Dropdown(
|
139 |
+
choices=[
|
140 |
+
"O-4 Mini",
|
141 |
+
"O-3 Mini"
|
142 |
+
],
|
143 |
+
value="",
|
144 |
+
label=""
|
145 |
+
)
|
146 |
+
|
147 |
+
gr.Markdown("### π§ Choose Persona")
|
148 |
+
|
149 |
+
persona = gr.Dropdown(
|
150 |
+
choices=[
|
151 |
+
"π¨βπΌ Scrum Master",
|
152 |
+
"π‘ Product Owner",
|
153 |
+
"π¨βπ» Developer",
|
154 |
+
"π¨ Designer"
|
155 |
+
],
|
156 |
+
value="π¨βπΌ Scrum Master",
|
157 |
+
label="π§ Persona"
|
158 |
+
)
|
159 |
+
|
160 |
+
with gr.Column(scale=9):
|
161 |
+
|
162 |
+
chatbot = gr.Chatbot(
|
163 |
+
label="Chat History",
|
164 |
+
type="messages",
|
165 |
+
height=500,
|
166 |
+
value=[
|
167 |
+
{"role": "assistant", "content": "Hello! π Start with an 'Objective' or ask to 'Improve' an earlier ticket."}
|
168 |
+
]
|
169 |
+
)
|
170 |
+
|
171 |
+
with gr.Row():
|
172 |
+
objective_btn = gr.Button("β‘οΈ Objective")
|
173 |
+
improve_btn = gr.Button("βοΈ Improve")
|
174 |
+
update_btn = gr.Button("β»οΈ Update")
|
175 |
+
change_btn = gr.Button("π Change")
|
176 |
+
|
177 |
+
with gr.Row():
|
178 |
+
msg = gr.Textbox(
|
179 |
+
placeholder="Type your request here...",
|
180 |
+
label="Your Prompt",
|
181 |
+
lines=2,
|
182 |
+
submit_btn=True
|
183 |
+
)
|
184 |
+
|
185 |
+
clear = gr.ClearButton([msg, chatbot])
|
186 |
+
|
187 |
+
# Hook logic
|
188 |
+
msg.submit(process_files, [msg, chatbot, persona,model], [msg, chatbot])
|
189 |
+
persona.change(set_default_prompt, persona, msg)
|
190 |
+
|
191 |
+
objective_btn.click(insert_prefix, inputs=[gr.State("Objective:"), msg], outputs=msg)
|
192 |
+
improve_btn.click(insert_prefix, inputs=[gr.State("Improve:"), msg], outputs=msg)
|
193 |
+
update_btn.click(insert_prefix, inputs=[gr.State("Update:"), msg], outputs=msg)
|
194 |
+
change_btn.click(insert_prefix, inputs=[gr.State("Change:"), msg], outputs=msg)
|
195 |
+
|
196 |
+
# if __name__ == "__main__":
|
197 |
+
# demo.launch(debug=True, share=True)
|
198 |
+
|
199 |
+
|
200 |
+
server_info = demo.launch(share=True)
|
201 |
+
|
202 |
+
# Extract the public URL using regex
|
203 |
+
match = re.search(r"Running on public URL: (https?://[^\s]+)", server_info)
|
204 |
+
public_url = match.group(1) if match else "URL not found"
|
205 |
+
|
206 |
+
# Save only the public URL to a file
|
207 |
+
with open("gradio_link.txt", "w") as f:
|
208 |
+
f.write(public_url)
|
README.md
CHANGED
@@ -1,12 +1,6 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
|
4 |
-
colorFrom: purple
|
5 |
-
colorTo: indigo
|
6 |
sdk: gradio
|
7 |
-
sdk_version: 5.
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
---
|
2 |
+
title: flask_app
|
3 |
+
app_file: NextGenAgile.py
|
|
|
|
|
4 |
sdk: gradio
|
5 |
+
sdk_version: 5.29.0
|
|
|
|
|
6 |
---
|
|
|
|