astro21 commited on
Commit
1ea6737
·
verified ·
1 Parent(s): 719948f

Create main.py

Browse files
Files changed (1) hide show
  1. main.py +48 -0
main.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, Request
2
+ from pydantic import BaseModel
3
+ import transformers
4
+ import torch
5
+ from fastapi.middleware.cors import CORSMiddleware
6
+
7
+
8
+ # Define the FastAPI app
9
+ app = FastAPI()
10
+
11
+ app.add_middleware(
12
+ CORSMiddleware,
13
+ allow_origins=["*"],
14
+ allow_methods=["*"],
15
+ allow_headers=["*"],
16
+ )
17
+
18
+ # Load the model and tokenizer from Hugging Face
19
+ model_id = "meta-llama/Meta-Llama-3.1-8B-Instruct" # Replace with an appropriate model
20
+ tokenizer = transformers.AutoTokenizer.from_pretrained(model_id)
21
+ model = transformers.AutoModelForCausalLM.from_pretrained(
22
+ model_id, device_map="auto", torch_dtype=torch.bfloat16
23
+ )
24
+ pipeline = transformers.pipeline(
25
+ "text-generation",
26
+ model=model,
27
+ tokenizer=tokenizer,
28
+ max_new_tokens=150,
29
+ temperature=0.7,
30
+ device_map="auto",
31
+ )
32
+
33
+ # Define the request model for email input
34
+ class EmailRequest(BaseModel):
35
+ subject: str
36
+ sender: str
37
+ recipients: str
38
+ body: str
39
+
40
+ # Define the FastAPI endpoint for email summarization
41
+ @app.post("/summarize-email/")
42
+ async def summarize_email(email: EmailRequest):
43
+ prompt = create_email_prompt(email.subject, email.sender, email.recipients, email.body)
44
+
45
+ # Use the pipeline to generate the summary
46
+ summary = pipeline(prompt)[0]["generated_text"]
47
+
48
+ return {"summary": summary}