Vaishnavi-15 commited on
Commit
8ed1440
·
verified ·
1 Parent(s): c1a040e

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -0
app.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
+
4
+ # Load model and tokenizer
5
+ model_name = "meta-llama/CodeLlama-7b-hf"
6
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
7
+ model = AutoModelForCausalLM.from_pretrained(model_name)
8
+
9
+ # Define the inference function
10
+ def generate_code(prompt):
11
+ # Tokenize the input prompt
12
+ inputs = tokenizer(prompt, return_tensors="pt", truncation=True, padding=True)
13
+
14
+ # Generate code using the model
15
+ outputs = model.generate(inputs["input_ids"], max_length=100, num_return_sequences=1)
16
+
17
+ # Decode the generated output to a string
18
+ generated_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
19
+ return generated_code
20
+
21
+ # Create Gradio interface
22
+ interface = gr.Interface(
23
+ fn=generate_code,
24
+ inputs="text",
25
+ outputs="text",
26
+ title="CodeLlama-7b Python Code Generator",
27
+ description="Generate Python code using the CodeLlama-7b model. Simply input a prompt and get back the generated code.",
28
+ )
29
+
30
+ # Launch the Gradio interface
31
+ interface.launch()