Update app.py
Browse files
app.py
CHANGED
@@ -36,7 +36,7 @@ client = None
|
|
36 |
def load_llm():
|
37 |
# Instantiate the SambaNova model
|
38 |
llm = SambaNovaCloud(
|
39 |
-
model="Meta-Llama-3.1-405B-Instruct",
|
40 |
context_window=100000,
|
41 |
max_tokens=1024,
|
42 |
temperature=0.7,
|
@@ -45,17 +45,6 @@ def load_llm():
|
|
45 |
)
|
46 |
return llm
|
47 |
|
48 |
-
def reset_chat():
|
49 |
-
st.session_state.messages = []
|
50 |
-
st.session_state.context = None
|
51 |
-
gc.collect()
|
52 |
-
|
53 |
-
def process_with_gitingets(github_url):
|
54 |
-
# or from URL
|
55 |
-
summary, tree, content = ingest(github_url)
|
56 |
-
return summary, tree, content
|
57 |
-
|
58 |
-
|
59 |
with st.sidebar:
|
60 |
st.header(f"Add your GitHub repository!")
|
61 |
|
@@ -70,6 +59,7 @@ with st.sidebar:
|
|
70 |
file_key = f"{session_id}-{repo_name}"
|
71 |
|
72 |
if file_key not in st.session_state.get('file_cache', {}):
|
|
|
73 |
if os.path.exists(temp_dir):
|
74 |
summary, tree, content = process_with_gitingets(github_url)
|
75 |
|
@@ -91,7 +81,7 @@ with st.sidebar:
|
|
91 |
docs = loader.load_data()
|
92 |
|
93 |
# setup llm & embedding model
|
94 |
-
llm
|
95 |
|
96 |
# Mixedbread AI embedding setup
|
97 |
embed_model = MixedbreadAIEmbedding(
|
@@ -142,7 +132,6 @@ with st.sidebar:
|
|
142 |
st.error(f"An error occurred: {e}")
|
143 |
st.stop()
|
144 |
|
145 |
-
|
146 |
col1, col2 = st.columns([6, 1])
|
147 |
|
148 |
with col1:
|
@@ -193,7 +182,7 @@ if prompt := st.chat_input("What's up?"):
|
|
193 |
# Handle streaming response
|
194 |
if hasattr(response, 'response_gen'):
|
195 |
for chunk in response.response_gen:
|
196 |
-
if isinstance(chunk, str):
|
197 |
full_response += chunk
|
198 |
message_placeholder.markdown(full_response + "▌")
|
199 |
else:
|
@@ -208,4 +197,4 @@ if prompt := st.chat_input("What's up?"):
|
|
208 |
message_placeholder.markdown(full_response)
|
209 |
|
210 |
# Add assistant response to chat history
|
211 |
-
st.session_state.messages.append({"role": "assistant", "content": full_response})
|
|
|
36 |
def load_llm():
|
37 |
# Instantiate the SambaNova model
|
38 |
llm = SambaNovaCloud(
|
39 |
+
model="Meta-Llama-3.1-405B-Instruct",
|
40 |
context_window=100000,
|
41 |
max_tokens=1024,
|
42 |
temperature=0.7,
|
|
|
45 |
)
|
46 |
return llm
|
47 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
48 |
with st.sidebar:
|
49 |
st.header(f"Add your GitHub repository!")
|
50 |
|
|
|
59 |
file_key = f"{session_id}-{repo_name}"
|
60 |
|
61 |
if file_key not in st.session_state.get('file_cache', {}):
|
62 |
+
|
63 |
if os.path.exists(temp_dir):
|
64 |
summary, tree, content = process_with_gitingets(github_url)
|
65 |
|
|
|
81 |
docs = loader.load_data()
|
82 |
|
83 |
# setup llm & embedding model
|
84 |
+
llm=load_llm()
|
85 |
|
86 |
# Mixedbread AI embedding setup
|
87 |
embed_model = MixedbreadAIEmbedding(
|
|
|
132 |
st.error(f"An error occurred: {e}")
|
133 |
st.stop()
|
134 |
|
|
|
135 |
col1, col2 = st.columns([6, 1])
|
136 |
|
137 |
with col1:
|
|
|
182 |
# Handle streaming response
|
183 |
if hasattr(response, 'response_gen'):
|
184 |
for chunk in response.response_gen:
|
185 |
+
if isinstance(chunk, str):
|
186 |
full_response += chunk
|
187 |
message_placeholder.markdown(full_response + "▌")
|
188 |
else:
|
|
|
197 |
message_placeholder.markdown(full_response)
|
198 |
|
199 |
# Add assistant response to chat history
|
200 |
+
st.session_state.messages.append({"role": "assistant", "content": full_response})
|