code-llama / app.py
vaniagrawal's picture
Update app.py
b4c09b1 verified
raw
history blame
977 Bytes
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
import os
# Check if the token is being accessed
hf_token = os.getenv("HF_HOME")
if hf_token:
print("Successfully retrieved Hugging Face token.")
else:
print("Failed to retrieve Hugging Face token.")
# # Load the model and tokenizer
# model_name = "meta-llama/CodeLlama-7b-hf"
# model = AutoModelForCausalLM.from_pretrained(model_name)
# tokenizer = AutoTokenizer.from_pretrained(model_name)
# def generate_code(prompt):
# inputs = tokenizer(prompt, return_tensors="pt")
# outputs = model.generate(inputs["input_ids"], max_length=200)
# code = tokenizer.decode(outputs[0], skip_special_tokens=True)
# return code
# # Set up the Gradio interface
# demo = gr.Interface(fn=generate_code,
# inputs="text",
# outputs="text",
# title="CodeLlama 7B Model",
# description="Generate code with CodeLlama-7b-hf.").launch()