walter1 commited on
Commit
25357b7
·
1 Parent(s): 5e848f1

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -177
app.py DELETED
@@ -1,177 +0,0 @@
1
- import itertools
2
- import os
3
-
4
- #%matplotlib inline
5
- import matplotlib.pyplot as plt
6
- import numpy as np
7
- import pandas as pd
8
- import tensorflow as tf
9
-
10
- from sklearn.preprocessing import LabelBinarizer, LabelEncoder
11
- from sklearn.metrics import confusion_matrix
12
-
13
- from tensorflow import keras
14
- from keras.models import Sequential
15
- from keras.layers import Dense, Activation, Dropout
16
- from keras.preprocessing import text, sequence
17
- from keras import utils
18
- from tensorflow.keras.utils import to_categorical
19
-
20
-
21
-
22
-
23
-
24
-
25
-
26
-
27
-
28
-
29
-
30
-
31
-
32
- from google.colab import auth
33
- auth.authenticate_user()
34
- import gspread
35
- from oauth2client.client import GoogleCredentials
36
-
37
- # setup
38
- gc = gspread.authorize(GoogleCredentials.get_application_default())
39
-
40
- # read data and put it in a dataframe
41
- gsheets = gc.open_by_url('https://docs.google.com/spreadsheets/d/15XNk8vY1pL6bzUo16AHWrx7Gws1Mz5JxOCGvkTnAczA/edit#gid=0')
42
- sheets = gsheets.worksheet('Sheet1').get_all_values()
43
- df = pd.DataFrame(sheets[1:], columns=sheets[0])
44
-
45
-
46
-
47
-
48
-
49
-
50
-
51
-
52
-
53
-
54
-
55
-
56
-
57
- train_size = int(len(df) * 0.7)
58
- train_posts = df['post'][:train_size]
59
- train_tags = df['tags'][:train_size]
60
-
61
- test_posts = df['post'][train_size:]
62
- test_tags = df['tags'][train_size:]
63
-
64
- max_words = 1000
65
- tokenize = text.Tokenizer(num_words=max_words, char_level=False)
66
- tokenize.fit_on_texts(train_posts) # only fit on train
67
-
68
- x_train = tokenize.texts_to_matrix(train_posts)
69
- x_test = tokenize.texts_to_matrix(test_posts)
70
-
71
- encoder = LabelEncoder()
72
- encoder.fit(train_tags)
73
- y_train = encoder.transform(train_tags)
74
- y_test = encoder.transform(test_tags)
75
-
76
- num_classes = np.max(y_train) + 1
77
- y_train = keras.utils.to_categorical(y_train, num_classes)
78
- y_test = keras.utils.to_categorical(y_test, num_classes)
79
-
80
- batch_size = 2 # Normally it should be 32 or 64, smaller better but slower. Since sample size is too small, use small batch size can get better predicting result and still run very fast
81
- epochs = 10 # was 2.
82
-
83
- # Build the model
84
- model = Sequential()
85
- model.add(Dense(512, input_shape=(max_words,)))
86
- model.add(Activation('relu'))
87
- model.add(Dropout(0.5))
88
-
89
- model.add(Dense(512))
90
- model.add(Activation('sigmoid'))
91
- model.add(Dropout(0.5))
92
-
93
- model.add(Dense(512))
94
- model.add(Activation('relu'))
95
- model.add(Dropout(0.5))
96
-
97
- model.add(Dense(num_classes))
98
- model.add(Activation('softmax'))
99
-
100
- model.compile(loss='categorical_crossentropy',
101
- optimizer='adam',
102
- metrics=['accuracy'])
103
-
104
- history = model.fit(x_train, y_train,
105
- batch_size=batch_size,
106
- epochs=epochs,
107
- verbose=1,
108
- validation_split=0.1)
109
-
110
-
111
-
112
-
113
-
114
-
115
-
116
-
117
-
118
-
119
-
120
-
121
- predicted=[]
122
- score=[]
123
- questn=[]
124
-
125
- print(f'Number of training {train_size} rows is loaded')
126
-
127
- for i in range(len(test_posts)):
128
- classifyTest(test_posts.iloc[i])
129
-
130
- df = pd.DataFrame({'Question':questn, 'Predicted Answer':predicted, 'Score':score, 'Real Answer':test_tags})
131
- ansIs=df['Predicted Answer']==df['Real Answer']
132
- df = pd.DataFrame({'Question':questn, 'Predicted Answer':predicted, 'Score':score, 'Real Answer':test_tags, 'Answer is ':ansIs})
133
-
134
-
135
-
136
-
137
-
138
-
139
-
140
-
141
-
142
-
143
-
144
-
145
-
146
-
147
- def classify(string):
148
- a=[string]
149
-
150
- #print(a)
151
- questn.append(string)
152
- x_test_new = tokenize.texts_to_matrix(a)
153
-
154
- # Here's how to generate a prediction on individual examples
155
- text_labels = encoder.classes_
156
-
157
- prediction = model.predict(np.array([x_test_new[0]]))
158
- predicted_label = text_labels[np.argmax(prediction)]
159
- #print(test_posts.iloc[0][:50], "...")
160
- #print('Actual label:' + test_tags.iloc[0])
161
-
162
- print(prediction[0][np.argmax(prediction)])
163
- #score.append(prediction[0][np.argmax(prediction)])
164
-
165
- print("Predicted label: " + predicted_label + "\n")
166
- #predicted.append(predicted_label)
167
- return predicted_label
168
-
169
-
170
-
171
- import gradio as gr
172
-
173
- def greet(name):
174
- return classify(name)
175
-
176
- iface = gr.Interface(fn=greet, inputs="text", outputs="text")
177
- iface.launch()