Mrudangam2004 commited on
Commit
869089b
·
verified ·
1 Parent(s): 30b3fea

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -16
app.py CHANGED
@@ -1,25 +1,43 @@
1
  from transformers import AutoTokenizer, AutoModelForCausalLM
2
  import gradio as gr
 
3
 
4
- # Load the tokenizer and model
5
- tokenizer = AutoTokenizer.from_pretrained("Salesforce/codegen-350M-multi")
6
- model = AutoModelForCausalLM.from_pretrained("Salesforce/codegen-350M-multi")
 
 
 
 
 
 
 
 
 
 
 
 
 
7
 
8
  def generate_code(prompt):
9
- # Tokenize the input text
10
- input_ids = tokenizer(prompt, return_tensors="pt").input_ids
 
11
 
12
- # Generate code based on the input text
13
- generated_ids = model.generate(
14
- input_ids,
15
- max_length=200, # Adjust as needed
16
- num_return_sequences=1, # Number of generated sequences to return
17
- pad_token_id=tokenizer.eos_token_id # Handle padding tokens
18
- )
19
 
20
- # Decode the generated tokens to text
21
- generated_code = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
22
- return generated_code
 
 
 
23
 
24
  # Define the Gradio interface
25
  iface = gr.Interface(
@@ -32,4 +50,7 @@ iface = gr.Interface(
32
 
33
  # Launch the Gradio app
34
  if __name__ == "__main__":
35
- iface.launch()
 
 
 
 
1
  from transformers import AutoTokenizer, AutoModelForCausalLM
2
  import gradio as gr
3
+ import logging
4
 
5
+ # Setup logging for better debugging
6
+ logging.basicConfig(level=logging.INFO)
7
+ logger = logging.getLogger(__name__)
8
+
9
+ def load_model_and_tokenizer():
10
+ try:
11
+ # Load the tokenizer and model
12
+ tokenizer = AutoTokenizer.from_pretrained("Salesforce/codegen-350M-multi")
13
+ model = AutoModelForCausalLM.from_pretrained("Salesforce/codegen-350M-multi")
14
+ return tokenizer, model
15
+ except Exception as e:
16
+ logger.error(f"Failed to load model or tokenizer: {e}")
17
+ raise
18
+
19
+ # Initialize tokenizer and model
20
+ tokenizer, model = load_model_and_tokenizer()
21
 
22
  def generate_code(prompt):
23
+ try:
24
+ # Tokenize the input text
25
+ input_ids = tokenizer(prompt, return_tensors="pt").input_ids
26
 
27
+ # Generate code based on the input text
28
+ generated_ids = model.generate(
29
+ input_ids,
30
+ max_length=200, # Adjust as needed
31
+ num_return_sequences=1, # Number of generated sequences to return
32
+ pad_token_id=tokenizer.eos_token_id # Handle padding tokens
33
+ )
34
 
35
+ # Decode the generated tokens to text
36
+ generated_code = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
37
+ return generated_code
38
+ except Exception as e:
39
+ logger.error(f"Error during code generation: {e}")
40
+ return "Error generating code. Please check the logs."
41
 
42
  # Define the Gradio interface
43
  iface = gr.Interface(
 
50
 
51
  # Launch the Gradio app
52
  if __name__ == "__main__":
53
+ try:
54
+ iface.launch()
55
+ except Exception as e:
56
+ logger.error(f"Error launching the Gradio app: {e}")