Spaces:
Runtime error
Runtime error
File size: 2,844 Bytes
4f7080f c8c0b44 a36c7d4 8e98aef a36c7d4 8e98aef a36c7d4 e9cd936 a36c7d4 8e98aef a36c7d4 8e98aef a36c7d4 e9cd936 a36c7d4 e9cd936 a36c7d4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 |
import os
os.system("pip install -r requirements.txt")
os.system("pip freeze")
import torch
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
import gradio as gr
# Load pretrained model and tokenizer
model_name = "salesforce/codet5-base"
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
#Define function to analyze input code
def analyze_code(input_code):
# Format code into strings and sentences for NLP
code_str = " ".join(input_code.split())
sentences = [s.strip() for s in code_str.split(".") if s.strip()]
#Extract relevant info and intent from code
variables = []
functions = []
logic = []
for sentence in sentences:
if "=" in sentence:
variables.append(sentence.split("=")[0].strip())
elif "(" in sentence:
functions.append(sentence.split("(")[0].strip())
else:
logic.append(sentence)
#Return info and intent in dictionary
return {"variables": variables, "functions": functions, "logic": logic}
# Define function to generate prompt from analyzed code
def generate_prompt(code_analysis):
prompt = f"Generate code with the following: \n\n"
prompt += f"Variables: {', '.join(code_analysis['variables'])} \n\n"
prompt += f"Functions: {', '.join(code_analysis['functions'])} \n\n"
prompt += f"Logic: {' '.join(code_analysis['logic'])}"
return prompt
# Generate code from model and prompt
def generate_code(prompt):
generated_code = model.generate(prompt, max_length=100, num_beams=5, early_stopping=True)
return generated_code
# Suggest improvements to code
def suggest_improvements(code):
suggestions = ["Use more descriptive variable names", "Add comments to explain complex logic", "Refactor duplicated code into functions"]
return suggestions
# Define Gradio interface
interface = gr.Interface(fn=generate_code, inputs=["textbox"], outputs=["textbox"])
# Have a conversation about the code
input_code = """x = 10
y = 5
def add(a, b):
return a + b
result = add(x, y)"""
code_analysis = analyze_code(input_code)
prompt = generate_prompt(code_analysis)
reply = f"{prompt}\n\n{generate_code(prompt)}\n\nSuggested improvements: {', '.join(suggest_improvements(input_code))}"
print(reply)
while True:
change = input("Would you like to make any changes to the code? (Y/N) ")
if change == "Y":
new_code = input("Enter the updated code: ")
code_analysis = analyze_code(new_code)
prompt = generate_prompt(code_analysis)
reply = f"{prompt}\n\n{generate_code(prompt)}\n\nSuggested improvements: {', '.join(suggest_improvements(new_code))}"
print(reply)
elif change == "N":
print("OK, conversation ended.")
break |