Update app.py
Browse files
app.py
CHANGED
@@ -1,38 +1,16 @@
|
|
1 |
-
import os
|
2 |
-
|
3 |
-
# Fix PyTorch Inductor cache directory and HF cache permission issues on Hugging Face Spaces
|
4 |
-
os.environ["TORCHINDUCTOR_CACHE_DIR"] = "/tmp"
|
5 |
-
os.environ["HF_HOME"] = "/tmp"
|
6 |
-
|
7 |
from fastapi import FastAPI
|
8 |
from pydantic import BaseModel
|
9 |
-
from transformers import
|
10 |
-
from transformers.models.vits.modeling_vits import VitsForConditionalGeneration
|
11 |
-
import torch
|
12 |
-
import io
|
13 |
-
from fastapi.responses import StreamingResponse
|
14 |
-
import soundfile as sf
|
15 |
|
16 |
app = FastAPI()
|
17 |
|
18 |
-
|
19 |
-
|
20 |
-
model = VitsForConditionalGeneration.from_pretrained(model_name)
|
21 |
-
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
22 |
-
model.to(device)
|
23 |
|
24 |
-
class
|
25 |
inputs: str
|
26 |
|
27 |
-
@app.post("/
|
28 |
-
async def
|
29 |
-
|
30 |
-
|
31 |
-
audio = model.generate(**inputs)
|
32 |
-
audio = audio.squeeze().cpu().numpy()
|
33 |
-
|
34 |
-
buf = io.BytesIO()
|
35 |
-
sf.write(buf, audio, samplerate=22050, format="WAV")
|
36 |
-
buf.seek(0)
|
37 |
-
|
38 |
-
return StreamingResponse(buf, media_type="audio/wav")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
from fastapi import FastAPI
|
2 |
from pydantic import BaseModel
|
3 |
+
from transformers import pipeline
|
|
|
|
|
|
|
|
|
|
|
4 |
|
5 |
app = FastAPI()
|
6 |
|
7 |
+
# Load your model pipeline once on startup
|
8 |
+
summarizer = pipeline("text2text-generation", model="zakihassan04/tacab_ai_beero")
|
|
|
|
|
|
|
9 |
|
10 |
+
class TextRequest(BaseModel):
|
11 |
inputs: str
|
12 |
|
13 |
+
@app.post("/generate")
|
14 |
+
async def generate_text(request: TextRequest):
|
15 |
+
out = summarizer(request.inputs, max_length=200, do_sample=False)
|
16 |
+
return {"generated_text": out[0]["generated_text"]}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|