Not-Grim-Refer commited on
Commit
a875418
·
1 Parent(s): 88bd7aa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -42
app.py CHANGED
@@ -1,43 +1,40 @@
1
  import os
2
- import logging
3
- import gradio as gr
4
- import requests
5
- from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
6
-
7
- debug = True
8
- logging_level = logging.DEBUG if debug else logging.INFO
9
- logging.basicConfig(level=logging_level)
10
-
11
- # Initialize the CodeBERT model and tokenizer
12
- tokenizer = AutoTokenizer.from_pretrained("microsoft/CodeBERT-base")
13
- model = AutoModelForSeq2SeqLM.from_pretrained("microsoft/CodeBERT-base")
14
-
15
- def fetch_repo_contents(repo_url):
16
- username, repo_name = repo_url.split("github.com/")[-1].split("/")
17
- api_url = f"https://api.github.com/repos/{username}/{repo_name}/contents"
18
- response = requests.get(api_url)
19
- response.raise_for_status()
20
- return response.json()
21
-
22
- def generate_chatbot_response(repo_url, question):
23
- repo_contents = fetch_repo_contents(repo_url)
24
- prompt = f"Answer the question about the repository {repo_url}: {question}\n\n"
25
- for item in repo_contents:
26
- prompt += f"{item['name']}:\n{item['download_url']}\n\n"
27
- inputs = tokenizer.encode(prompt, return_tensors="pt", max_length=1024, truncation=True)
28
- outputs = model.generate(inputs, max_length=150, num_return_sequences=1)
29
- response = tokenizer.decode(outputs[0], skip_special_tokens=True)
30
- return response
31
-
32
- # Gradio UI
33
- repo_url_input = gr.inputs.Text(label="GitHub Repository URL")
34
- question_input = gr.inputs.Text(label="Question")
35
- output_text = gr.outputs.Text(label="Answer")
36
-
37
- gr.Interface(
38
- generate_chatbot_response,
39
- inputs=[repo_url_input, question_input],
40
- outputs=output_text,
41
- title="Create a Conversational AI Chatbot for Your Public GitHub Repository Codebase",
42
- theme="huggingface_dark",
43
- ).launch()
 
1
  import os
2
+ import streamlit as st
3
+ from langchain import HuggingFaceHub, LLMChain
4
+ from git import Repo
5
+
6
+ # Run pip freeze and pip install -r requirements.txt
7
+ os.system("pip freeze > requirements.txt")
8
+ os.system("pip install -r requirements.txt")
9
+
10
+ st.set_page_config(layout="wide", initial_sidebar_state="auto", theme="dark")
11
+ st.title("Hugging Face Space Demo")
12
+
13
+ repository_url = st.text_input("Enter GitHub repository URL:", "")
14
+ access_token = st.text_input("Enter GitHub access token (optional):", "")
15
+ debug_logging = st.checkbox("Enable debug logging")
16
+
17
+ if st.button("Run"):
18
+ if debug_logging:
19
+ import logging
20
+ logging.basicConfig(filename='log.txt', level=logging.DEBUG, format='%(asctime)s %(message)s')
21
+ logging.debug('Starting the process')
22
+
23
+ # Clone the repository
24
+ local_path = "/tmp/repository"
25
+ Repo.clone_from(repository_url, local_path, branch="main", env={"GIT_TERMINAL_PROMPT": "0", "GIT_SSL_NO_VERIFY": "true"})
26
+
27
+ # Initialize Hugging Face model
28
+ os.environ['HUGGINGFACEHUB_API_TOKEN'] = access_token
29
+ hub_llm = HuggingFaceHub(repo_id='google/flan-t5-xl', model_kwargs={'temperature': 1e-10})
30
+
31
+ # Create a prompt template and LLM chain
32
+ prompt = "What is the main purpose of the repository at {}?".format(repository_url)
33
+ llm_chain = LLMChain(prompt=prompt, llm=hub_llm)
34
+
35
+ # Get the result
36
+ answer = llm_chain.run()
37
+ st.write("Answer:", answer)
38
+
39
+ if debug_logging:
40
+ logging.debug('Finished the process')