File size: 3,897 Bytes
5efb178 9e1ad54 5efb178 9e1ad54 5efb178 f7807b8 3d7c61d 94d28d9 3d7c61d 94d28d9 3d7c61d 1b3b718 3d7c61d 1b3b718 3d7c61d f7807b8 5efb178 f7807b8 3d7c61d f7807b8 5efb178 32e4fb8 5efb178 9e1ad54 5efb178 9e1ad54 5efb178 72ff94c 5efb178 678ca1e 5efb178 9e1ad54 5efb178 72ff94c 5efb178 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 |
import os
import torch
import multiprocessing
from fastapi import FastAPI, Request
from pydantic import BaseModel
from transformers import AutoModelForCausalLM, AutoTokenizer
from dotenv import load_dotenv
from accelerate import Accelerator
# Load environment variables from a .env file (useful for local development)
load_dotenv()
# HTML for the Buy Me a Coffee badge
html_content = """
<!DOCTYPE html>
<html>
<head>
<title>Llama-3.2-1B-Instruct-API</title>
</head>
<body>
<div style="text-align: center;">
<a href="https://buymeacoffee.com/xxparthparekhxx" target="_blank">
<img src="https://cdn.buymeacoffee.com/buttons/v2/default-yellow.png"
alt="Buy Me A Coffee"
height="40px">
</a>
<h2>Please Chill Out! π</h2>
<p>This API takes around <strong>5.62 minutes</strong> to process a single request due to current hardware limitations.</p>
<h3>Want Faster Responses? Help Me Out! π</h3>
<p>If you'd like to see this API running faster on high-performance <strong>A100</strong> hardware, please consider buying me a coffee. β Your support will go towards upgrading to <strong>Hugging Face Pro</strong>, which will allow me to run A100-powered spaces for everyone! π</p>
<h4>Instructions to Clone and Run Locally:</h4>
<ol>
<li><strong>Clone the Repository:</strong>
<pre>
git clone https://huggingface.co/spaces/xxparthparekhxx/llama-3.2-1B-FastApi
cd llama-3.2-1B-FastApi
</pre>
</li>
<li><strong>Run the Docker container:</strong>
<pre>
docker build -t llama-api .
docker run -p 7860:7860 llama-api
</pre>
</li>
<li><strong>Access the API locally:</strong>
<p>Open <a href="http://localhost:7860">http://localhost:7860</a> to access the API docs locally.</p>
</li>
</ol>
</div>
</body>
</html>
"""
# FastAPI app with embedded Buy Me a Coffee badge and instructions
app = FastAPI(
title="Llama-3.2-1B-Instruct-API",
description= html_content,
docs_url="/", # URL for Swagger docs
redoc_url="/doc" # URL for ReDoc docs
)
# Set your Hugging Face token from environment variable
HF_TOKEN = os.getenv("HF_TOKEN")
MODEL = "meta-llama/Llama-3.2-1B-Instruct"
# Auto-select CPU or GPU
device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"Using device: {device}")
# Set PyTorch to use all available CPU cores if running on CPU
torch.set_num_threads(multiprocessing.cpu_count())
# Initialize Accelerator for managing device allocation
accelerator = Accelerator()
# Load model and tokenizer
tokenizer = AutoTokenizer.from_pretrained(MODEL, token=HF_TOKEN, use_fast=True)
model = AutoModelForCausalLM.from_pretrained(
MODEL,
token=HF_TOKEN,
torch_dtype=torch.float16,
device_map="auto"
)
# Prepare model for multi-device setup with accelerate
model, tokenizer = accelerator.prepare(model, tokenizer)
# Pydantic model for input
class PromptRequest(BaseModel):
prompt: str
max_new_tokens: int = 100
temperature: float = 0.7
@app.post("/generate/")
async def generate_text(request: PromptRequest):
inputs = tokenizer(request.prompt, return_tensors="pt").to(device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=request.max_new_tokens,
temperature=request.temperature,
do_sample=False,
pad_token_id=tokenizer.eos_token_id
)
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
return {"response": response}
|