File size: 1,573 Bytes
1d1251b
 
 
 
 
 
 
 
 
 
 
57dc994
1d1251b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3532024
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
import pandas as pd
import numpy as np
import json
import gradio as gr
from keras.models import load_model
from keras.preprocessing.text import tokenizer_from_json, Tokenizer
from keras.preprocessing.sequence import pad_sequences
import spacy
from string import punctuation
import re

nlp = spacy.load('en_core_web_sm')

stopwords = nlp.Defaults.stop_words

def clean_text(text):

    text = text.translate(punctuation)

    text = re.sub(r"[^\w\s]", " ",text)
    text = re.sub(r"[^A-Za-z0-9^,!.\/'+-=]", " ",text)

    doc = nlp(text)
    
    text = [token for token in doc if str(token) not in stopwords]
    lemmatized = [token.lemma_ for token in text]
    
    text = ' '.join(lemmatized)

    return text

with open("tokenizer.json", "r") as read_file:
   tokenizer = json.load(read_file)

tokenizer = tokenizer_from_json(tokenizer)   

model = load_model('tweets_disaster_model.h5')

def tweets_predictions(text):
    text = clean_text(text)
    text = tokenizer.texts_to_sequences([text])
    text = pad_sequences(text, padding='post', maxlen=50)
    pred = model.predict(text.reshape(1,-1)).tolist()[0]
    dic = {}
    dic['No disaster'] = 1 - pred[0]
    dic['Disaster'] = pred[0]
    return dic

interface = gr.Interface(fn=tweets_predictions, inputs='textbox', outputs='label', theme='darkdefault',
                title='Tweets Disaster', description='Ecrire une phrase en anglais et cliquer sur "Submit". Le modèle retourne la probabilité que le message annonce une catastrophe').launch(share=True)