File size: 1,737 Bytes
0217fc8
2c62878
049dd49
59f93f7
2c62878
0217fc8
 
 
 
 
 
 
0b5f433
0217fc8
 
 
 
 
ed22e94
0217fc8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
#uvicorn main:app --reload 
import os
os.environ['HF_HOME'] = 'src/cache'
os.environ['SENTENCE_TRANSFORMERS_HOME'] = 'src/cache'

from fastapi import FastAPI, status
from fastapi.responses import Response, JSONResponse
from pydantic import BaseModel

from typing import List

import os
# import json
import time

from src.myNLI import FactChecker
from src.crawler import MyCrawler


#request body
class Claim(BaseModel):
    claim: str

class ScrapeBase(BaseModel):
    id: int
    name: str
    scraping_url: str

class ScrapeList(BaseModel):
    data: List[ScrapeBase]

app = FastAPI()

# load model
t_0 = time.time()
fact_checker = FactChecker()
t_load = time.time() - t_0
print("time load model: {}".format(t_load))

crawler = MyCrawler()

label_code = {
    "REFUTED": 0,
    "SUPPORTED": 1,
    "NEI": 2
}

@app.get("/")
async def root():
    return {"msg": "This is for interacting with Fact-checking AI Model"}

@app.post("/ai-fact-check")
async def get_claim(req: Claim):
    claim = req.claim
    result = fact_checker.predict(claim)
    print(result)

    if not result:
        return Response(status_code=status.HTTP_204_NO_CONTENT)

    return { "claim": claim,
            "final_label": label_code[result["label"]],
            "evidence": result["evidence"],
            "provider": result["provider"],
            "url": result["url"]
        }

@app.post("/scraping-check")
async def get_claim(req: ScrapeList):
    response = []
    for ele in req.data:
        response.append({
            "id": ele.id,
            "name": ele.name,
            "scraping_url": ele.scraping_url,
            "status": crawler.scraping(ele.scraping_url)
        })
        

    return JSONResponse({
        "list": response
    })