Tanveerooooooo commited on
Commit
57fe1bc
Β·
verified Β·
1 Parent(s): 75befea

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -42
app.py CHANGED
@@ -1,65 +1,56 @@
1
- # Copyright (c) 2008-2021, Hazelcast, Inc. All Rights Reserved.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
-
15
  import gradio as gr
16
  import torch
17
- from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
18
 
19
- # Model configuration
20
- model_name = "Salesforce/codet5p-770m"
21
- tokenizer = AutoTokenizer.from_pretrained(model_name)
22
- model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
23
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
24
  model.to(device)
25
 
26
- # Prompts for different languages
27
- language_prompts = {
28
- "Python": "Fix this Python code:\n",
29
- "C": "Fix this C code:\n",
30
- "C++": "Fix this C++ code:\n",
31
- "JavaScript": "Fix this JavaScript code:\n"
32
  }
33
 
34
- # Debugging logic
35
  def eternos_debugger(code, error, language):
36
  if not code.strip():
37
  return "❌ Please provide code."
38
-
39
- prompt = f"{language_prompts[language]}{code}\nError:\n{error}\nCorrected code:\n"
40
  inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=512).to(device)
41
  outputs = model.generate(
42
  **inputs,
43
  max_new_tokens=256,
44
- temperature=0.2,
45
- do_sample=False
 
 
46
  )
47
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
48
  return response.strip()
49
 
50
- # UI setup
51
- with gr.Blocks(theme=gr.themes.Soft(), css=".gradio-container {background-color: #cbedec;}") as demo:
52
- gr.Markdown("## βš™οΈ Eternos β€” AI Code Debugger")
53
- gr.Markdown("Supports Python, C, C++, JavaScript β€” powered by CodeT5p")
 
 
 
 
 
54
 
55
- with gr.Row():
56
- code_input = gr.Textbox(label="πŸ“ Your Code", lines=12)
57
- error_input = gr.Textbox(label="⚠️ Error Message (optional)", lines=4)
58
 
59
- language_input = gr.Dropdown(["Python", "C", "C++", "JavaScript"], label="🌐 Language", value="Python")
60
- output_code = gr.Code(label="βœ… Suggested Fix")
61
- run_btn = gr.Button("πŸ› οΈ Fix Code")
62
 
63
- run_btn.click(fn=eternos_debugger, inputs=[code_input, error_input, language_input], outputs=output_code)
64
 
65
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
  import torch
3
+ from transformers import AutoTokenizer, AutoModelForCausalLM
4
 
5
+ # Model configuration: CodeGemma 1.1B
6
+ MODEL_NAME = "google/codegemma-1.1b"
7
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
8
+ model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32)
9
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
10
  model.to(device)
11
 
12
+ # Supported languages and their prompt templates
13
+ LANGUAGE_PROMPTS = {
14
+ "Python": "Fix this Python code:",
15
+ "C": "Fix this C code:",
16
+ "C++": "Fix this C++ code:",
17
+ "JavaScript": "Fix this JavaScript code:"
18
  }
19
 
 
20
  def eternos_debugger(code, error, language):
21
  if not code.strip():
22
  return "❌ Please provide code."
23
+ prompt = f"{LANGUAGE_PROMPTS[language]}\n{code}\nError:\n{error}\nCorrected code:\n"
 
24
  inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=512).to(device)
25
  outputs = model.generate(
26
  **inputs,
27
  max_new_tokens=256,
28
+ temperature=0.1,
29
+ top_p=0.9,
30
+ do_sample=False,
31
+ pad_token_id=tokenizer.eos_token_id
32
  )
33
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
34
  return response.strip()
35
 
36
+ # Gradio user interface
37
+ def create_interface():
38
+ with gr.Blocks(theme=gr.themes.Soft(), css=".gradio-container {background-color: #cbedec;}") as demo:
39
+ gr.Markdown("## βš™οΈ Eternos β€” AI Code Debugger")
40
+ gr.Markdown("Supports Python, C, C++, JavaScript β€” powered by CodeGemma 1.1B")
41
+
42
+ with gr.Row():
43
+ code_input = gr.Textbox(label="πŸ“ Your Code", lines=12)
44
+ error_input = gr.Textbox(label="⚠️ Error Message (optional)", lines=4)
45
 
46
+ language_input = gr.Dropdown(["Python", "C", "C++", "JavaScript"], label="🌐 Language", value="Python")
47
+ output_code = gr.Code(label="βœ… Suggested Fix")
48
+ run_btn = gr.Button("πŸ› οΈ Fix Code")
49
 
50
+ run_btn.click(fn=eternos_debugger, inputs=[code_input, error_input, language_input], outputs=output_code)
 
 
51
 
52
+ return demo
53
 
54
+ if __name__ == "__main__":
55
+ demo = create_interface()
56
+ demo.launch()