Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -59,19 +59,40 @@ def predict_for_example(sentence, tags, model):
|
|
59 |
|
60 |
return model.predict_tags(x)
|
61 |
|
62 |
-
def
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
75 |
|
76 |
|
77 |
model2 = PRNN() # Instantiate a model
|
@@ -94,13 +115,37 @@ P_best4, W_best4 = process_CVresults(CVresults_dict=model_dict4, summarize=False
|
|
94 |
model4.params = P_best4
|
95 |
model4.w = W_best4
|
96 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
97 |
def demo_(sentence):
|
98 |
sentence, tags = tokens_and_tags(sentence)
|
|
|
|
|
99 |
preds2=predict_for_example(sentence=sentence, tags=tags, model=model2)
|
100 |
preds4=predict_for_example(sentence=sentence, tags=tags, model=model4)
|
101 |
-
return str(preds2),str(preds4),str(
|
102 |
|
103 |
title="POS-Tagged Corpus Analysis: Training a Recurrent Perceptron for Noun Chunk Identification"
|
104 |
-
demo = gr.Interface(fn=demo_, inputs=gr.Textbox(label="sentence for which you want noun chunks"), outputs=[gr.Textbox(label="
|
105 |
|
106 |
demo.launch(share=True)
|
|
|
59 |
|
60 |
return model.predict_tags(x)
|
61 |
|
62 |
+
def get_noun_chunks(tokens, pos_tags, chunk_tags):
|
63 |
+
|
64 |
+
|
65 |
+
sequences = []
|
66 |
+
noun_chunks = []
|
67 |
+
noun_chunks_pos_tags = []
|
68 |
+
noun_chunks_tags = []
|
69 |
+
start = None
|
70 |
+
i = 0
|
71 |
+
|
72 |
+
while i < len(chunk_tags):
|
73 |
+
|
74 |
+
if chunk_tags[i] == 1:
|
75 |
+
start = i
|
76 |
+
if pos_tags[i] == 'NN':
|
77 |
+
noun_chunks.append([tokens[i]])
|
78 |
+
noun_chunks_pos_tags.append([pos_tags[i]])
|
79 |
+
noun_chunks_tags.append([chunk_tags[i]])
|
80 |
+
|
81 |
+
|
82 |
+
while i+1<len(chunk_tags) and chunk_tags[i+1] == 0 and (start is not None):
|
83 |
+
i+=1
|
84 |
+
|
85 |
+
if (start is not None) and i > start:
|
86 |
+
noun_chunks.append(tokens[start:i+1])
|
87 |
+
noun_chunks_pos_tags.append(pos_tags[start:i+1])
|
88 |
+
noun_chunks_tags.append(chunk_tags[start:i+1])
|
89 |
+
start =None
|
90 |
+
|
91 |
+
i+=1
|
92 |
+
noun_chunks = [" ".join(i) for i in noun_chunks]
|
93 |
+
sequences = [noun_chunks,noun_chunks_pos_tags, noun_chunks_tags]
|
94 |
+
return sequences
|
95 |
+
|
96 |
|
97 |
|
98 |
model2 = PRNN() # Instantiate a model
|
|
|
115 |
model4.params = P_best4
|
116 |
model4.w = W_best4
|
117 |
|
118 |
+
model1 = PRNN() # Instantiate a model
|
119 |
+
|
120 |
+
# Loading the dictionary from the file using pickle
|
121 |
+
with open('CVresults_data.pkl', 'rb') as f:
|
122 |
+
model_dict1 = pickle.load(f)
|
123 |
+
|
124 |
+
P_best1, W_best1 = process_CVresults(CVresults_dict=model_dict1, summarize=False)
|
125 |
+
model1.params = P_best1
|
126 |
+
model1.w = W_best1
|
127 |
+
|
128 |
+
model3 = PRNN() # Instantiate a model
|
129 |
+
|
130 |
+
# Loading the dictionary from the file using pickle
|
131 |
+
with open('CVresults_data_sigmoid.pkl', 'rb') as f:
|
132 |
+
model_dict3 = pickle.load(f)
|
133 |
+
|
134 |
+
P_best3, W_best3 = process_CVresults(CVresults_dict=model_dict3, summarize=False)
|
135 |
+
model3.params = P_best3
|
136 |
+
model3.w = W_best3
|
137 |
+
|
138 |
+
|
139 |
+
|
140 |
def demo_(sentence):
|
141 |
sentence, tags = tokens_and_tags(sentence)
|
142 |
+
preds1=predict_for_example(sentence=sentence, tags=tags, model=model1)
|
143 |
+
preds3=predict_for_example(sentence=sentence, tags=tags, model=model3)
|
144 |
preds2=predict_for_example(sentence=sentence, tags=tags, model=model2)
|
145 |
preds4=predict_for_example(sentence=sentence, tags=tags, model=model4)
|
146 |
+
return "predicted labels:\t"str(preds2)+"\n"+"predicted Noun chunks \t"str(get_noun_chunks(sentence=sentence, tags=tags,preds=preds2)),"predicted labels:\t"str(preds4)+"\n"+"predicted Noun chunks \t"str(get_noun_chunks(sentence=sentence, tags=tags,preds=preds4)),"predicted labels:\t"str(preds1)+"\n"+"predicted Noun chunks \t"str(get_noun_chunks(sentence=sentence, tags=tags,preds=preds1)),"predicted labels:\t"str(preds3)+"\n"+"predicted Noun chunks \t"str(get_noun_chunks(sentence=sentence, tags=tags,preds=preds3))tags
|
147 |
|
148 |
title="POS-Tagged Corpus Analysis: Training a Recurrent Perceptron for Noun Chunk Identification"
|
149 |
+
demo = gr.Interface(fn=demo_, inputs=gr.Textbox(label="sentence for which you want noun chunks",lines=1, interactive=True, show_copy_button=True), outputs=[gr.Textbox(label="prediction on conditioned data with step activation function",lines=2, interactive=True, show_copy_button=True),gr.Textbox(label="prediction on conditioned data with step activation function",lines=2, interactive=True, show_copy_button=True),gr.Textbox(label="prediction on all data with step activation function",lines=2, interactive=True, show_copy_button=True),gr.Textbox(label="prediction on whole data with sigmoid activation function",lines=2, interactive=True, show_copy_button=True),gr.Textbox(label="pos tag label given by nltk library",lines=1, interactive=True, show_copy_button=True)],title=title)
|
150 |
|
151 |
demo.launch(share=True)
|