File size: 1,845 Bytes
64c1f09 1ea6737 9959186 64c1f09 9959186 cb35691 9959186 64c1f09 9959186 1ea6737 64c1f09 1ea6737 64c1f09 1ea6737 64c1f09 1ea6737 64c1f09 1ea6737 64c1f09 1ea6737 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 |
from fastapi import FastAPI
from pydantic import BaseModel
import transformers
from fastapi.middleware.cors import CORSMiddleware
import os
from huggingface_hub import login
# Get access token from environment variable
access_token_read = os.getenv('DS4')
print(access_token_read)
# Login to Hugging Face Hub
login(token=access_token_read)
# Define the FastAPI app
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_methods=["*"],
allow_headers=["*"],
)
# Load the model and tokenizer from Hugging Face, set device to CPU
model_id = "meta-llama/Meta-Llama-3.1-8B-Instruct" # Replace with an appropriate model
tokenizer = transformers.AutoTokenizer.from_pretrained(model_id)
model = transformers.AutoModelForCausalLM.from_pretrained(
model_id,
# Removed device_map and low_cpu_mem_usage to avoid the need for 'accelerate'
)
# Set up the text generation pipeline for CPU
pipeline = transformers.pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
max_new_tokens=150,
temperature=0.7,
device=-1 # Force CPU usage
)
# Define the request model for email input
class EmailRequest(BaseModel):
subject: str
sender: str
recipients: str
body: str
# Helper function to create the email prompt
def create_email_prompt(subject, sender, recipients, body):
prompt = f"Subject: {subject}\nFrom: {sender}\nTo: {recipients}\n\n{body}\n\nSummarize this email."
return prompt
# Define the FastAPI endpoint for email summarization
@app.post("/summarize-email/")
async def summarize_email(email: EmailRequest):
prompt = create_email_prompt(email.subject, email.sender, email.recipients, email.body)
# Use the pipeline to generate the summary
summary = pipeline(prompt)[0]["generated_text"]
return {"summary": summary}
|