Haseeb-001 commited on
Commit
9d29b06
Β·
verified Β·
1 Parent(s): bd2552c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +50 -126
app.py CHANGED
@@ -1,140 +1,64 @@
1
-
2
- import os
3
  import streamlit as st
4
- from groq import Groq
5
-
6
- # Set the API key
7
- GROQ_API_KEY = "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"
8
-
9
- # Initialize the Groq client
10
- client = Groq(api_key=GROQ_API_KEY)
11
 
12
- # Initialize session state for conversation history
13
- if 'conversation' not in st.session_state:
14
- st.session_state.conversation = []
 
15
 
16
- # Streamlit UI setup
17
- st.set_page_config(page_title="Code Generator", layout="wide")
18
- st.title("Code Generator")
 
 
19
 
20
- # Custom CSS for neon theme
21
- st.markdown(
22
- """
23
- <style>
24
- body {
25
- background-color: #000;
26
- color: #fff;
27
- font-family: 'Courier New', Courier, monospace;
28
- }
29
- .stButton > button {
30
- background-color: #5e72e4;
31
- color: white;
32
- border: none;
33
- padding: 10px 20px;
34
- border-radius: 5px;
35
- cursor: pointer;
36
- transition: background-color 0.3s ease;
37
- }
38
- .stButton > button:hover {
39
- background-color: #324ab2;
40
- }
41
- .stTextInput > div > div > input {
42
- background-color: #1a1a1a;
43
- color: #fff;
44
- border: 1px solid #5e72e4;
45
- border-radius: 5px;
46
- padding: 10px;
47
- }
48
- .stMarkdown {
49
- color: #5e72e4;
50
- }
51
- </style>
52
- """,
53
- unsafe_allow_html=True
54
- )
55
 
56
- # Function to generate code
57
- def generate_code(query):
58
- messages = [
59
- {"role": "user", "content": query}
60
- ]
61
- messages = st.session_state.conversation + messages
62
- completion = client.chat.completions.create(
63
- model="qwen-2.5-coder-32b",
64
- messages=messages,
65
- temperature=0.6,
66
- max_completion_tokens=4096,
67
- top_p=0.95,
68
- stream=False,
69
- stop=None,
 
 
 
70
  )
71
- return completion.choices[0].message.content
72
 
73
- # Function to check code correctness
74
- def check_code_correctness(code):
75
- # Placeholder for code correctness check
76
- # This can be replaced with an actual code checking mechanism
77
- return True
78
-
79
- # Function to explain code
80
  def explain_code(code):
81
- messages = [
82
- {"role": "user", "content": f"Explain the following code:\n{code}"}
83
- ]
84
- completion = client.chat.completions.create(
85
- model="llama-3.3-70b-versatile",
86
- messages=messages,
87
- temperature=0.6,
88
- max_completion_tokens=4096,
89
- top_p=0.95,
90
- stream=False,
91
- stop=None,
92
- )
93
- return completion.choices[0].message.content
94
 
95
- # Main UI components
96
- query = st.text_area("Enter your query/requirement/demand for the app:", height=100)
 
 
97
 
98
- if st.button("Generate Code"):
99
- try:
100
- generated_code = generate_code(query)
101
- st.session_state.conversation.append({"role": "assistant", "content": generated_code})
102
- st.code(generated_code, language="python")
103
-
104
- # Placeholder for multiple solutions
105
- # You can replace this with actual logic to fetch multiple solutions
106
- alternative_solutions = ["Alternative Solution 1", "Alternative Solution 2"]
107
- if alternative_solutions:
108
- st.sidebar.header("Alternative Solutions")
109
- for idx, solution in enumerate(alternative_solutions, start=1):
110
- st.sidebar.write(f"**Solution {idx}:** {solution}")
111
 
112
- # Check code correctness
113
- if check_code_correctness(generated_code):
114
- st.success("The generated code is correct.")
115
- else:
116
- st.error("The generated code may contain errors.")
117
-
118
- except Exception as e:
119
- st.error(f"An error occurred: {e}")
120
 
121
- if st.button("Explain Code"):
122
- try:
123
- code_to_explain = st.text_area("Enter the code you want to explain:", height=200)
124
- explanation = explain_code(code_to_explain)
125
- st.markdown(f"### Explanation:\n{explanation}")
126
- except Exception as e:
127
- st.error(f"An error occurred: {e}")
128
 
129
- if st.button("New Code"):
130
- st.session_state.conversation = []
131
- st.experimental_rerun()
132
 
133
- # Display conversation history
134
- if st.session_state.conversation:
135
- st.sidebar.header("Conversation History")
136
- for message in st.session_state.conversation:
137
- if message["role"] == "user":
138
- st.sidebar.markdown(f"**User:** {message['content']}")
139
- else:
140
- st.sidebar.markdown(f"**Assistant:** {message['content']}")
 
 
 
1
  import streamlit as st
2
+ import torch
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer
4
+ import groq
 
 
 
 
5
 
6
+ # Constants
7
+ PRIMARY_MODEL = "Qwen/CodeQwen1.5-7B-Chat"
8
+ BACKUP_MODEL = "llama-3.3-70b-versatile"
9
+ DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
10
 
11
+ # Load Primary Model & Tokenizer
12
+ def load_model():
13
+ tokenizer = AutoTokenizer.from_pretrained(PRIMARY_MODEL)
14
+ model = AutoModelForCausalLM.from_pretrained(PRIMARY_MODEL, device_map="auto")
15
+ return model, tokenizer
16
 
17
+ model, tokenizer = load_model()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
 
19
+ def generate_code(prompt):
20
+ """Generate code from prompt using the primary model."""
21
+ try:
22
+ inputs = tokenizer(prompt, return_tensors="pt").to(DEVICE)
23
+ outputs = model.generate(**inputs, max_length=512)
24
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
25
+ except Exception as e:
26
+ st.error("Primary model failed. Switching to backup...")
27
+ return generate_code_backup(prompt)
28
+
29
+ def generate_code_backup(prompt):
30
+ """Use Groq API for backup model code generation."""
31
+ client = groq.Client(api_key="YOUR_GROQ_API_KEY")
32
+ response = client.chat.completions.create(
33
+ model=BACKUP_MODEL,
34
+ messages=[{"role": "system", "content": "You are a helpful AI."},
35
+ {"role": "user", "content": prompt}]
36
  )
37
+ return response.choices[0].message.content
38
 
 
 
 
 
 
 
 
39
  def explain_code(code):
40
+ """Generate a detailed explanation of the given code."""
41
+ prompt = f"Explain this code in detail:\n{code}"
42
+ return generate_code(prompt)
 
 
 
 
 
 
 
 
 
 
43
 
44
+ # Streamlit UI
45
+ st.set_page_config(page_title="AI Code Generator", layout="wide")
46
+ st.title("πŸš€ AI Code Generator App")
47
+ st.markdown("### Generate and edit code with AI!")
48
 
49
+ # Sidebar for Alternative Solutions
50
+ st.sidebar.header("⚑ Alternative Solutions")
 
 
 
 
 
 
 
 
 
 
 
51
 
52
+ user_query = st.text_area("Enter your app idea:", height=150)
 
 
 
 
 
 
 
53
 
54
+ if st.button("Generate Code"):
55
+ with st.spinner("Generating code..."):
56
+ generated_code = generate_code(user_query)
57
+ st.code(generated_code, language="python")
58
+ st.session_state["generated_code"] = generated_code
 
 
59
 
60
+ if "generated_code" in st.session_state and st.button("πŸ“œ Explain Code"):
61
+ explanation = explain_code(st.session_state["generated_code"])
62
+ st.text_area("Explanation:", explanation, height=250)
63
 
64
+ st.sidebar.button("πŸ”„ New Code", on_click=lambda: st.experimental_rerun())