File size: 4,037 Bytes
3632dc7
c0e81fb
3632dc7
c0e81fb
3632dc7
 
c0e81fb
 
3632dc7
 
 
 
 
 
 
 
 
 
 
 
c0e81fb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71342e9
c0e81fb
 
 
ecda74c
c0e81fb
 
 
 
 
 
 
 
 
 
 
 
 
 
ecda74c
 
71342e9
ecda74c
 
71342e9
ecda74c
 
 
c0e81fb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
import google.generativeai as genai
class LLMClient:
    def __init__(self, api_client,google_api_key):
        self.client = api_client
        self.google_api_key = google_api_key  # Google API key for Gemini
        genai.configure(api_key=self.google_api_key)

    def chat_completion(self, messages, model="llama3-8b-8192"):
        if model == "gemini-pro":
                gemini_model = genai.GenerativeModel('gemini-pro')
                response = gemini_model.generate_content([messages[1]['content']])
                return response.text # Return the Gemini result
        else:
                completion = self.client.chat.completions.create(
                    messages=messages,
                    model=model,
                    temperature=0.5,
                    stream=False,
                )
                return completion.choices[0].message.content


class CodeProcessor:
    def __init__(self, llm_obj):
        self.llm_obj = llm_obj

    def process_code(self, code_text, llm_model):
        summary_generation = [
            {
                "role": "system",
                "content": "You are a knowledgeable and friendly programming assistant. Your task is to provide concise summaries of code snippets, explaining their purpose, required packages, and the underlying logic in a clear and approachable manner."
            },
            {
                "role": "user",
                "content": f"Please summarize the following code {code_text}, including its purpose, required packages, and the main logic behind it.",
            }
        ]

        detail_generation = [
            {
                "role": "system",
                "content": "You are a knowledgeable and friendly programming assistant. Your task is to explain each line of code clearly and concisely, providing a brief explanation for what each line does. Strictly explain each line"
            },
            {
                "role": "user",
                "content": f"Please explain each line of the following code snippet {code_text}, providing a brief explanation of what each line does",
            }
        ]

        prompt_for_generating_similar_code = [
            {
                "role": "system",
                "content": "You are a code generation assistant. Your task is to analyze the provided code snippet and identify the problem it addresses. Create a prompt that generates similar code with the same functionality step by step. Try to understand what is the application we are building. Focus on understanding the core features, the logic behind the implementation, and any required libraries. Ensure that the prompt captures the essence of what the code is solving without requiring the user to specify the problem."
            },
            {
                "role": "user",
                "content": f"Here is the code I want to replicate: {code_text}. Please generate a prompt that can produce similar code, focusing on understanding the problem being solved, core features, required packages, and the overall logic behind the implementation without giving a single line of code"
            }
        ]

        summary_response = self.llm_obj.chat_completion(summary_generation, model=llm_model)
        print("Summary generation completed............")

        detail_generation_response = self.llm_obj.chat_completion(detail_generation, model=llm_model)
        print("Detail generation completed............")

        prompt_for_generating_similar_code_response = self.llm_obj.chat_completion(prompt_for_generating_similar_code,
                                                                                   model=llm_model)
        print("Prompt generation completed............")

        markdown_output = f"""
# Summary of Code Snippet
{summary_response}
    
# Detailed Code Explanation for Snippet
{detail_generation_response}
    
# Similar Code Generation Prompt for Code Snippets
{prompt_for_generating_similar_code_response}
"""
        return markdown_output