vaniagrawal commited on
Commit
b4c09b1
·
verified ·
1 Parent(s): 7b3007d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -15
app.py CHANGED
@@ -9,21 +9,21 @@ if hf_token:
9
  else:
10
  print("Failed to retrieve Hugging Face token.")
11
 
12
- # Load the model and tokenizer
13
- model_name = "meta-llama/CodeLlama-7b-hf"
14
- model = AutoModelForCausalLM.from_pretrained(model_name)
15
- tokenizer = AutoTokenizer.from_pretrained(model_name)
16
 
17
 
18
- def generate_code(prompt):
19
- inputs = tokenizer(prompt, return_tensors="pt")
20
- outputs = model.generate(inputs["input_ids"], max_length=200)
21
- code = tokenizer.decode(outputs[0], skip_special_tokens=True)
22
- return code
23
 
24
- # Set up the Gradio interface
25
- demo = gr.Interface(fn=generate_code,
26
- inputs="text",
27
- outputs="text",
28
- title="CodeLlama 7B Model",
29
- description="Generate code with CodeLlama-7b-hf.").launch()
 
9
  else:
10
  print("Failed to retrieve Hugging Face token.")
11
 
12
+ # # Load the model and tokenizer
13
+ # model_name = "meta-llama/CodeLlama-7b-hf"
14
+ # model = AutoModelForCausalLM.from_pretrained(model_name)
15
+ # tokenizer = AutoTokenizer.from_pretrained(model_name)
16
 
17
 
18
+ # def generate_code(prompt):
19
+ # inputs = tokenizer(prompt, return_tensors="pt")
20
+ # outputs = model.generate(inputs["input_ids"], max_length=200)
21
+ # code = tokenizer.decode(outputs[0], skip_special_tokens=True)
22
+ # return code
23
 
24
+ # # Set up the Gradio interface
25
+ # demo = gr.Interface(fn=generate_code,
26
+ # inputs="text",
27
+ # outputs="text",
28
+ # title="CodeLlama 7B Model",
29
+ # description="Generate code with CodeLlama-7b-hf.").launch()