from fastapi import FastAPI, Request from pydantic import BaseModel import transformers import torch from fastapi.middleware.cors import CORSMiddleware import os access_token_read = os.getenv('DS4') print(access_token_read) from huggingface_hub import login login(token = access_token_read) # Define the FastAPI app app = FastAPI() app.add_middleware( CORSMiddleware, allow_origins=["*"], allow_methods=["*"], allow_headers=["*"], ) # Load the model and tokenizer from Hugging Face model_id = "meta-llama/Meta-Llama-3.1-8B-Instruct" # Replace with an appropriate model tokenizer = transformers.AutoTokenizer.from_pretrained(model_id) model = transformers.AutoModelForCausalLM.from_pretrained( model_id, device_map="auto", torch_dtype=torch.bfloat16 ) pipeline = transformers.pipeline( "text-generation", model=model, tokenizer=tokenizer, max_new_tokens=150, temperature=0.7, device_map="auto", ) # Define the request model for email input class EmailRequest(BaseModel): subject: str sender: str recipients: str body: str # Define the FastAPI endpoint for email summarization @app.post("/summarize-email/") async def summarize_email(email: EmailRequest): prompt = create_email_prompt(email.subject, email.sender, email.recipients, email.body) # Use the pipeline to generate the summary summary = pipeline(prompt)[0]["generated_text"] return {"summary": summary}