vaniagrawal commited on
Commit
79342f8
·
verified ·
1 Parent(s): b4c09b1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -20
app.py CHANGED
@@ -3,27 +3,23 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import os
4
 
5
  # Check if the token is being accessed
6
- hf_token = os.getenv("HF_HOME")
7
- if hf_token:
8
- print("Successfully retrieved Hugging Face token.")
9
- else:
10
- print("Failed to retrieve Hugging Face token.")
11
 
12
- # # Load the model and tokenizer
13
- # model_name = "meta-llama/CodeLlama-7b-hf"
14
- # model = AutoModelForCausalLM.from_pretrained(model_name)
15
- # tokenizer = AutoTokenizer.from_pretrained(model_name)
16
 
17
 
18
- # def generate_code(prompt):
19
- # inputs = tokenizer(prompt, return_tensors="pt")
20
- # outputs = model.generate(inputs["input_ids"], max_length=200)
21
- # code = tokenizer.decode(outputs[0], skip_special_tokens=True)
22
- # return code
23
 
24
- # # Set up the Gradio interface
25
- # demo = gr.Interface(fn=generate_code,
26
- # inputs="text",
27
- # outputs="text",
28
- # title="CodeLlama 7B Model",
29
- # description="Generate code with CodeLlama-7b-hf.").launch()
 
3
  import os
4
 
5
  # Check if the token is being accessed
6
+ hf_token = os.environ.get("HF_HOME", None)
 
 
 
 
7
 
8
+ # Load the model and tokenizer
9
+ model_name = "meta-llama/CodeLlama-7b-hf"
10
+ model = AutoModelForCausalLM.from_pretrained(model_name, token=hf_token)
11
+ tokenizer = AutoTokenizer.from_pretrained(model_name, token=hf_token)
12
 
13
 
14
+ def generate_code(prompt):
15
+ inputs = tokenizer(prompt, return_tensors="pt")
16
+ outputs = model.generate(inputs["input_ids"], max_length=200)
17
+ code = tokenizer.decode(outputs[0], skip_special_tokens=True)
18
+ return code
19
 
20
+ # Set up the Gradio interface
21
+ demo = gr.Interface(fn=generate_code,
22
+ inputs="text",
23
+ outputs="text",
24
+ title="CodeLlama 7B Model",
25
+ description="Generate code with CodeLlama-7b-hf.").launch()