File size: 1,578 Bytes
db8fc47
a851bc3
 
c36717c
 
 
 
 
 
 
db8fc47
 
 
c36717c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
db8fc47
c36717c
db8fc47
 
12b19b8
c36717c
12b19b8
 
c36717c
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
from fastapi import FastAPI
import uvicorn
import spaces
import torch
from pydantic import BaseModel
from transformers import RobertaTokenizer, RobertaForSequenceClassification

if torch.cuda.is_available():
    tokenizer = RobertaTokenizer.from_pretrained('Dzeniks/roberta-fact-check')
    fc_model = RobertaForSequenceClassification.from_pretrained('Dzeniks/roberta-fact-check')

app = FastAPI()

# ------------------------------------------------------------------------
class Item(BaseModel):
    claim: str
    evidence: str


@app.post("/predict/")
@spaces.GPU
def fact_checking(item: Item):
    # # claim = item['claim']
    # # source = item['source']
    # claim = item.claim
    # source = item.source

    claim = item.claim
    evidence = item.evidence
    # claim = item['claim']
    # evidence = item['evidence']

    input = tokenizer.encode_plus(claim, evidence, return_tensors="pt")
    fc_model.eval()
    with torch.no_grad():
        outputs = fc_model(**input)

    label = torch.argmax(outputs[0]).item()

    return  {"Verdict": label}


@app.get("/")
@spaces.GPU
def greet_json():
    return {"Hello": "World!"}


if __name__ == "__main__":
    uvicorn.run(app, host="0.0.0.0", port=7860)


# if __name__ == "__main__":
#     item = {
#         "claim": "Albert Einstein work in the field of computer science.",
#         "evidence": "Albert Einstein was a German-born theoretical physicist, widely acknowledged to be one of the greatest and most influential physicists of all time.",
#     }
#
#     results = fact_checking(item)
#
#     print(results)