Anwar11234 commited on
Commit
8ceb2a8
1 Parent(s): 8cd45e7

first commit

Browse files
Files changed (3) hide show
  1. Dockerfile +13 -0
  2. app.py +49 -0
  3. requirements.txt +5 -0
Dockerfile ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.10.9
2
+ COPY . .
3
+
4
+ WORKDIR /
5
+
6
+ RUN mkdir -p /.cache/huggingface/hub
7
+
8
+ # Separate RUN command for chmod
9
+ RUN chmod -R 777 /.cache/huggingface
10
+
11
+ RUN pip install --no-cache-dir --upgrade -r /requirements.txt
12
+
13
+ CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
app.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, Body
2
+ from typing import Dict
3
+ from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
4
+ from peft import PeftModel, PeftConfig
5
+ from fastapi.middleware.cors import CORSMiddleware
6
+
7
+ app = FastAPI()
8
+
9
+ app.add_middleware(
10
+ CORSMiddleware,
11
+ allow_origins=['*'],
12
+ allow_credentials=True,
13
+ allow_methods=["*"],
14
+ allow_headers=["*"]
15
+ )
16
+
17
+ def load_model():
18
+ peft_model_id = "ANWAR101/lora-bart-base-youtube-cnn"
19
+ config = PeftConfig.from_pretrained(peft_model_id)
20
+ model = AutoModelForSeq2SeqLM.from_pretrained(config.base_model_name_or_path)
21
+ tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)
22
+ model = PeftModel.from_pretrained(model, peft_model_id)
23
+ return model , tokenizer
24
+
25
+
26
+ model , tokenizer = load_model()
27
+
28
+ @app.post("/summarize")
29
+ async def summarize(data: Dict[str, str] = Body(...)):
30
+ """Summarize a text using the loaded Peft model."""
31
+
32
+ text = data.get("text")
33
+
34
+ # Check for missing text
35
+ if not text:
36
+ return {"error": "Missing text in request body"}, 400
37
+
38
+ # Preprocess the text
39
+ inputs = tokenizer(text, truncation=True, return_tensors="pt")
40
+
41
+ # Generate summary using the model
42
+ outputs = model.generate(
43
+ **inputs, max_length=300, min_length=50, do_sample=True, num_beams=3,
44
+ no_repeat_ngram_size=2, temperature=0.6, length_penalty=1.0
45
+ )
46
+
47
+ summary = tokenizer.decode(outputs[0], skip_special_tokens=True)
48
+ response = {"summary": summary}
49
+ return response
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ transformers==4.38.2
2
+ peft
3
+ fastapi
4
+ uvicorn
5
+ huggingface_hub