Update app.py
Browse files
app.py
CHANGED
@@ -1,32 +1,4 @@
|
|
1 |
-
|
2 |
-
# from pydantic import BaseModel
|
3 |
-
# from transformers import pipeline
|
4 |
-
# import os
|
5 |
-
# from huggingface_hub import login
|
6 |
-
|
7 |
-
# # Log in to Hugging Face
|
8 |
-
# access_token = os.environ.get("ACCESS_TOKEN_1")
|
9 |
-
# login(token=access_token, add_to_git_credential=True)
|
10 |
-
|
11 |
-
# app = FastAPI()
|
12 |
-
|
13 |
-
# # Load the model and tokenizer from the Hugging Face Hub
|
14 |
-
# model_name = "MHULO/yembaner"
|
15 |
-
# nlp = pipeline("ner", model=model_name, tokenizer=model_name)
|
16 |
-
|
17 |
-
# class TextRequest(BaseModel):
|
18 |
-
# text: str
|
19 |
-
|
20 |
-
# @app.post("/predict/")
|
21 |
-
# def predict(request: TextRequest):
|
22 |
-
# ner_results = nlp(request.text)
|
23 |
-
# return ner_results
|
24 |
-
|
25 |
-
# if __name__ == "__main__":
|
26 |
-
# import uvicorn
|
27 |
-
# uvicorn.run(app, host="0.0.0.0", port=80)
|
28 |
-
|
29 |
-
|
30 |
from fastapi import FastAPI
|
31 |
from pydantic import BaseModel
|
32 |
from transformers import pipeline
|
@@ -34,6 +6,9 @@ import os
|
|
34 |
|
35 |
app = FastAPI()
|
36 |
|
|
|
|
|
|
|
37 |
# Load the model and tokenizer from the Hugging Face Hub
|
38 |
model_name = "MHULO/yembaner"
|
39 |
nlp = pipeline("ner", model=model_name, tokenizer=model_name)
|
@@ -46,7 +21,7 @@ def predict(request: TextRequest):
|
|
46 |
ner_results = nlp(request.text)
|
47 |
return ner_results
|
48 |
|
49 |
-
if __name__ == "__main__":
|
50 |
-
import uvicorn
|
51 |
-
uvicorn.run(app, host="0.0.0.0", port=8000)
|
52 |
|
|
|
|
|
|
|
|
1 |
+
from huggingface_hub import login
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
from fastapi import FastAPI
|
3 |
from pydantic import BaseModel
|
4 |
from transformers import pipeline
|
|
|
6 |
|
7 |
app = FastAPI()
|
8 |
|
9 |
+
access_token = os.environ.get("ACCESS_TOKEN_1")
|
10 |
+
login(token=access_token, add_to_git_credential=True)
|
11 |
+
|
12 |
# Load the model and tokenizer from the Hugging Face Hub
|
13 |
model_name = "MHULO/yembaner"
|
14 |
nlp = pipeline("ner", model=model_name, tokenizer=model_name)
|
|
|
21 |
ner_results = nlp(request.text)
|
22 |
return ner_results
|
23 |
|
|
|
|
|
|
|
24 |
|
25 |
+
@app.get("/")
|
26 |
+
def root():
|
27 |
+
return {"prediction url": "/predict/"}
|