Spaces:
Sleeping
Sleeping
File size: 948 Bytes
416fd50 0aaac91 416fd50 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 |
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
# Load the model and tokenizer
model_name = "meta-llama/CodeLlama-7b-hf"
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)
import os
# Check if the token is being accessed
hf_token = os.getenv("HF_HOME")
if hf_token:
print("Successfully retrieved Hugging Face token.")
else:
print("Failed to retrieve Hugging Face token.")
def generate_code(prompt):
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(inputs["input_ids"], max_length=200)
code = tokenizer.decode(outputs[0], skip_special_tokens=True)
return code
# Set up the Gradio interface
demo = gr.Interface(fn=generate_code,
inputs="text",
outputs="text",
title="CodeLlama 7B Model",
description="Generate code with CodeLlama-7b-hf.").launch()
|