Spaces:
Sleeping
Sleeping
good morning commit
Browse files- app.py +22 -6
- proper_main.py +7 -8
app.py
CHANGED
@@ -1,6 +1,8 @@
|
|
1 |
import streamlit as st
|
2 |
from resource import *
|
3 |
from proper_main import *
|
|
|
|
|
4 |
|
5 |
def main():
|
6 |
st.title("GPT4All Chatbot")
|
@@ -10,12 +12,26 @@ def main():
|
|
10 |
|
11 |
# Generate response
|
12 |
if st.button("Submit"):
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
|
20 |
if __name__ == "__main__":
|
21 |
main()
|
|
|
1 |
import streamlit as st
|
2 |
from resource import *
|
3 |
from proper_main import *
|
4 |
+
from resource import llm_chain
|
5 |
+
import time
|
6 |
|
7 |
def main():
|
8 |
st.title("GPT4All Chatbot")
|
|
|
12 |
|
13 |
# Generate response
|
14 |
if st.button("Submit"):
|
15 |
+
try:
|
16 |
+
repos,status=web_scrape(user_url)
|
17 |
+
|
18 |
+
task_progress = st.progress(0)
|
19 |
+
|
20 |
+
task_progress.progress("Tools is taking action please wait")
|
21 |
+
|
22 |
+
if status == 0:
|
23 |
+
repo_path = data_cloning(repos)
|
24 |
+
data_cleaning(repo_path)
|
25 |
+
query = analyse()
|
26 |
+
response_gpt = llm_chain([str(query)])
|
27 |
+
# Display the response
|
28 |
+
st.text_area("Bot Response:", value=response_gpt, height=100)
|
29 |
+
except Exception as e:
|
30 |
+
output=st.empty()
|
31 |
+
output.error(f"Error occured please contact the admin",e)
|
32 |
+
time.sleep(5)
|
33 |
+
st.experimental_rerun()
|
34 |
+
|
35 |
|
36 |
if __name__ == "__main__":
|
37 |
main()
|
proper_main.py
CHANGED
@@ -20,11 +20,12 @@ except Exception as e:
|
|
20 |
os.system("pip install -r " + curr_dir + "/requirements.txt")
|
21 |
|
22 |
|
23 |
-
|
24 |
|
25 |
|
26 |
def web_scrape(user_url):
|
27 |
base_url = "https://www.github.com"
|
|
|
28 |
user_url = user_url
|
29 |
|
30 |
if user_url.endswith("/"):
|
@@ -33,26 +34,24 @@ def web_scrape(user_url):
|
|
33 |
try:
|
34 |
response = requests.get(user_url + "?tab=repositories")
|
35 |
except Exception as e:
|
36 |
-
|
37 |
-
web_scrape()
|
38 |
|
39 |
if response.status_code != 200:
|
40 |
-
|
41 |
-
web_scrape()
|
42 |
|
43 |
make_soup = BeautifulSoup(response.text, 'html.parser')
|
44 |
li = make_soup.findAll('div', class_='d-inline-block mb-1')
|
45 |
if len(li) == 0:
|
46 |
-
|
47 |
-
web_scrape()
|
48 |
|
49 |
for _, i in enumerate(li):
|
50 |
for a in i.findAll('a'):
|
51 |
new_url = base_url + a['href']
|
52 |
repos.append(new_url)
|
|
|
53 |
|
54 |
|
55 |
-
def data_cloning():
|
56 |
os.mkdir("/tmp/repos")
|
57 |
os.chdir("/tmp/repos")
|
58 |
for i in repos:
|
|
|
20 |
os.system("pip install -r " + curr_dir + "/requirements.txt")
|
21 |
|
22 |
|
23 |
+
|
24 |
|
25 |
|
26 |
def web_scrape(user_url):
|
27 |
base_url = "https://www.github.com"
|
28 |
+
repos = []
|
29 |
user_url = user_url
|
30 |
|
31 |
if user_url.endswith("/"):
|
|
|
34 |
try:
|
35 |
response = requests.get(user_url + "?tab=repositories")
|
36 |
except Exception as e:
|
37 |
+
return ("Please provide a valid link:", 1)
|
|
|
38 |
|
39 |
if response.status_code != 200:
|
40 |
+
return ("Please provide a valid link.",1)
|
|
|
41 |
|
42 |
make_soup = BeautifulSoup(response.text, 'html.parser')
|
43 |
li = make_soup.findAll('div', class_='d-inline-block mb-1')
|
44 |
if len(li) == 0:
|
45 |
+
return ("Please Provide the Valid Link",1)
|
|
|
46 |
|
47 |
for _, i in enumerate(li):
|
48 |
for a in i.findAll('a'):
|
49 |
new_url = base_url + a['href']
|
50 |
repos.append(new_url)
|
51 |
+
return (respo,0)
|
52 |
|
53 |
|
54 |
+
def data_cloning(repos):
|
55 |
os.mkdir("/tmp/repos")
|
56 |
os.chdir("/tmp/repos")
|
57 |
for i in repos:
|