import gradio as gr import torch from transformers import LLaMAForCausalLM, LLaMATokenizer # Load model and tokenizer model_name = "llama-3.1" tokenizer = LLaMATokenizer.from_pretrained(model_name) model = LLaMAForCausalLM.from_pretrained(model_name) # Define code analysis function def analyze_code(code): inputs = tokenizer.encode_plus(code, return_tensors="pt") outputs = model.generate(inputs["input_ids"], max_length=512) return outputs.last_hidden_state # Define GitHub API function def get_repo_code(repo_link): GITHUB_TOKEN = "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" GITHUB_API_URL = "https://api.github.com" owner, repo = repo_link.split("/")[-2:] repo_url = f"{GITHUB_API_URL}/repos/{owner}/{repo}/contents" headers = {"Authorization": f"Bearer {GITHUB_TOKEN}"} response = requests.get(repo_url, headers=headers) return response.json() # Create Gradio interface demo = gr.Interface( fn=analyze_code, inputs=gr.Textbox(label="Enter code to analyze"), outputs=gr.JSON(label="Analyzed code representation"), title="Code Analysis with LLaMA 3.1", description="Enter code to analyze and get its representation using LLaMA 3.1 model." ) # Create Gradio interface for GitHub repository link github_demo = gr.Interface( fn=get_repo_code, inputs=gr.Textbox(label="Enter GitHub repository link"), outputs=gr.JSON(label="Repository code"), title="Get GitHub Repository Code", description="Enter GitHub repository link to get its code." ) # Launch Gradio app gr.TabbedInterface([demo, github_demo], ["Analyze Code", "Get Repository Code"]).launch()