Spaces:
Sleeping
Sleeping
from fastapi import FastAPI | |
from transformers import AutoTokenizer, AutoModelForCausalLM | |
import torch | |
import os | |
# Set cache directory for Hugging Face Transformers | |
os.environ["TRANSFORMERS_CACHE"] = "/home/user/.cache" | |
# Load the tokenizer and model | |
tokenizer = AutoTokenizer.from_pretrained("matsant01/STEMerald-2b") | |
model = AutoModelForCausalLM.from_pretrained("matsant01/STEMerald-2b") | |
# Initialize FastAPI app | |
app = FastAPI() | |
def read_root(): | |
return {"message": "Welcome to the STEMerald-2b API"} | |
#@app.post("/generate/") | |
#def generate_text(prompt: str): | |
# inputs = tokenizer(prompt, return_tensors="pt") | |
# outputs = model.generate(inputs["input_ids"], max_length=50) | |
# generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
# return {"generated_text": generated_text} |