File size: 5,386 Bytes
62ab214
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3487527
 
62ab214
 
3487527
62ab214
 
 
 
 
539c75a
62ab214
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
import gradio as gr
import tensorflow as tf
import numpy as np
import pickle
# Load model, including its weights and the optimizer
model = tf.keras.models.load_model('core4.h5')
# load tokenizer
with open('tokenizer.pickle', 'rb') as handle:
    tokenize = pickle.load(handle)
text_labels = ['How to apply', 'how much can I get', 'who can apply']
# model.summary() # model architecture
def greet(string):
  tokenizedText = tokenize.texts_to_matrix([string])
  prediction = model.predict(np.array([tokenizedText[0]]))
  predicted_label = text_labels[np.argmax(prediction)]
  print(prediction[0][np.argmax(prediction)])
  print("Predicted label: " + predicted_label + "\n")
  
  ###################
  import requests as rs
  import pandas as pd
  
  spreadsheet_id = '1vjWnYsnGc0J6snT67NVbA-NWSGZ5b0eDBVHmg9lbf9s'  # Please set the Spreadsheet ID.
  csv_url='https://docs.google.com/spreadsheets/d/' + spreadsheet_id + '/export?format=csv&id=' + spreadsheet_id + '&gid=0'
  
  res=rs.get(url=csv_url)
  open('google.csv', 'wb').write(res.content)
  df = pd.read_csv('google.csv')
  
  import json
  import requests
  
  spreadsheet_id = '1vjWnYsnGc0J6snT67NVbA-NWSGZ5b0eDBVHmg9lbf9s'  # Please set the Spreadsheet ID.  
  url = 'https://script.google.com/macros/s/AKfycbwXP5fsDgOXJ9biZQC293o6bTBL3kDOJ07PlmxKjabzdTej6WYdC8Yos6NpDVqAJeVM/exec?spreadsheetId=' + spreadsheet_id
  body = {
      "arguments": {"range": "Sheet1!A"+str(len(df)+2), "valueInputOption": "USER_ENTERED"},
      "body": {"values": [[string]]}
  }
  res = requests.post(url, json.dumps(body), headers={'Content-Type': 'application/json'})
  
  body = {
      "arguments": {"range": "Sheet1!B"+str(len(df)+2), "valueInputOption": "USER_ENTERED"},
      "body": {"values": [[predicted_label]]}
  }
  res = requests.post(url, json.dumps(body), headers={'Content-Type': 'application/json'})
  
  import datetime
  current_time = datetime.datetime.now()
  body = {
      "arguments": {"range": "Sheet1!C"+str(len(df)+2), "valueInputOption": "USER_ENTERED"},
      "body": {"values": [[str(current_time)]]}
  }
  res = requests.post(url, json.dumps(body), headers={'Content-Type': 'application/json'})
  #print(res.text)
  #######################
  return predicted_label
#One testing case

###################################################
import gradio as gr
from transformers import pipeline
from datetime import datetime
import pandas as pd
import requests
from bs4 import BeautifulSoup
import re
benefits = [
  {"benefitName": "Universal Credit", "coreName": "what is this benefit", "link": "https://www.gov.uk/universal-credit/"},
  {"benefitName": "Universal Credit", "coreName": "who can apply", "link": "https://www.gov.uk/universal-credit/eligibility"},
  {"benefitName": "Universal Credit", "coreName": "how much can I get", "link": "https://www.gov.uk/universal-credit/what-youll-get,https://www.gov.uk/universal-credit/how-youre-paid"},
  {"benefitName": "Universal Credit", "coreName": "How to apply", "link": "https://www.gov.uk/universal-credit/how-to-claim"}
]
def requestPage(link):
  page = requests.get(link)
  # print(page.text)
  soup = BeautifulSoup(page.content, "html.parser")
  return soup
  
def scrapeTable(table):
  columns = [col.text.strip() for col in table.thead.tr.find_all()]
  columns
  rows = table.tbody.find_all(recursive=False)
  clean_rows = ""
  for row in rows:
    elements = ["{}: {}".format(columns[index], element.text.strip()) for index, element in enumerate(row.find_all(recursive=False))]
    elements = " ".join(elements)
    # print(elements)
    clean_rows += elements + "\n"
  return clean_rows
def scrapePage(page):
  # Scrape the text
  corpus = ""
  # starting from the main page
  content = page.find('div', {"id":"guide-contents"})
  title = content.find('h1', {"class":"part-title"})
  title = title.text.strip()
  corpus += title +"\n\n"
  print(title)
  content = content.find('div', {"class":"gem-c-govspeak"})
  fragments = content.find_all(recursive=False)
  for frag in fragments:
    text= frag.text.strip()
    if frag.name == 'ul':
      clean = re.sub('\n+', "{;}", text)
      corpus += "{;}" + clean
    elif frag.name == 'table':
      corpus += scrapeTable(frag)
    else:
      corpus += text 
    corpus += "\n"
  # print(corpus)
  return corpus
  
  
for benefit in benefits:
  links = benefit['link'].split(',')
  print(benefit['benefitName'], benefit['coreName'], len(links))
  context = ""
  for link in links:
    page = requestPage(link)
    context += scrapePage(page)
  benefit['context'] = context
  benefit['contextLen'] = len(context)
  print("--------------------------------")
benefitsClasses = list(set(list(map(lambda x: x['benefitName'], benefits))))
core4Classes = list(set(list(map(lambda x: x['coreName'], benefits))))
# contexts
benefitsClasses, core4Classes
question_answerer = pipeline("question-answering")


def testQA(question):
  predictedBenefit = "Universal Credit"
  coreName = greet(string)
  predictedCore = coreName
  time = datetime.now()
  context = list(filter(lambda x: x['benefitName']==predictedBenefit and x['coreName']==predictedCore, benefits))[0]
  answer = question_answerer(question = question, context = context['context'])['answer']
  time3 = (datetime.now() - time).total_seconds()
  return coreName + ': ' + answer
  
iface = gr.Interface(fn=testQA, inputs="text", outputs="text")
iface.launch()