nurfarah57 commited on
Commit
b103320
·
verified ·
1 Parent(s): 1a0b3f6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -30
app.py CHANGED
@@ -1,38 +1,16 @@
1
- import os
2
-
3
- # Fix PyTorch Inductor cache directory and HF cache permission issues on Hugging Face Spaces
4
- os.environ["TORCHINDUCTOR_CACHE_DIR"] = "/tmp"
5
- os.environ["HF_HOME"] = "/tmp"
6
-
7
  from fastapi import FastAPI
8
  from pydantic import BaseModel
9
- from transformers import AutoProcessor
10
- from transformers.models.vits.modeling_vits import VitsForConditionalGeneration
11
- import torch
12
- import io
13
- from fastapi.responses import StreamingResponse
14
- import soundfile as sf
15
 
16
  app = FastAPI()
17
 
18
- model_name = "Somali-tts/somali_tts_model"
19
- processor = AutoProcessor.from_pretrained(model_name)
20
- model = VitsForConditionalGeneration.from_pretrained(model_name)
21
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
22
- model.to(device)
23
 
24
- class TextInput(BaseModel):
25
  inputs: str
26
 
27
- @app.post("/synthesize")
28
- async def synthesize_tts(data: TextInput):
29
- inputs = processor(data.inputs, return_tensors="pt").to(device)
30
- with torch.no_grad():
31
- audio = model.generate(**inputs)
32
- audio = audio.squeeze().cpu().numpy()
33
-
34
- buf = io.BytesIO()
35
- sf.write(buf, audio, samplerate=22050, format="WAV")
36
- buf.seek(0)
37
-
38
- return StreamingResponse(buf, media_type="audio/wav")
 
 
 
 
 
 
 
1
  from fastapi import FastAPI
2
  from pydantic import BaseModel
3
+ from transformers import pipeline
 
 
 
 
 
4
 
5
  app = FastAPI()
6
 
7
+ # Load your model pipeline once on startup
8
+ summarizer = pipeline("text2text-generation", model="zakihassan04/tacab_ai_beero")
 
 
 
9
 
10
+ class TextRequest(BaseModel):
11
  inputs: str
12
 
13
+ @app.post("/generate")
14
+ async def generate_text(request: TextRequest):
15
+ out = summarizer(request.inputs, max_length=200, do_sample=False)
16
+ return {"generated_text": out[0]["generated_text"]}