Upload folder using huggingface_hub
Browse files- LogRegBuzzer.py +93 -0
- QBModelConfig.py +11 -0
- QBModelWrapperCopy.py +20 -0
- QBpipeline.py +61 -0
- README.md +199 -0
- config.json +23 -0
- pytorch_model.bin +3 -0
- qbmodel.py +61 -0
- tfidf.py +124 -0
LogRegBuzzer.py
ADDED
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from sklearn.linear_model import LogisticRegression
|
2 |
+
import joblib
|
3 |
+
from huggingface_hub import hf_hub_download
|
4 |
+
from transformers import pipeline
|
5 |
+
import pandas as pd
|
6 |
+
|
7 |
+
|
8 |
+
class LogisticRegressionBuzzer:
|
9 |
+
|
10 |
+
def __init__(self) -> None:
|
11 |
+
self.model = self.load_from_hf_pkl()
|
12 |
+
self.features = BuzzerFeatures()
|
13 |
+
|
14 |
+
|
15 |
+
def load_from_hf_pkl(self) -> LogisticRegression:
|
16 |
+
REPO_ID = "nes470/pipeline-as-repo"
|
17 |
+
FILENAME = "logreg_buzzer_model.pkl"
|
18 |
+
|
19 |
+
model = joblib.load(
|
20 |
+
hf_hub_download(repo_id=REPO_ID, filename=FILENAME)
|
21 |
+
)
|
22 |
+
|
23 |
+
return model
|
24 |
+
|
25 |
+
def predict_buzz(self, question, guess):
|
26 |
+
X = self.features.get_features(question, guess)
|
27 |
+
|
28 |
+
X_formatted = pd.DataFrame(X, index=[0])
|
29 |
+
pred = self.model.predict(X_formatted)
|
30 |
+
|
31 |
+
print(pred)
|
32 |
+
#use predict_proba to get confidence probabilities
|
33 |
+
prob_pred = self.model.predict_proba(X_formatted)
|
34 |
+
print(prob_pred)
|
35 |
+
|
36 |
+
return (pred, float(pred[0]))
|
37 |
+
|
38 |
+
|
39 |
+
|
40 |
+
|
41 |
+
class BuzzerFeatures:
|
42 |
+
def __init__(self) -> None:
|
43 |
+
self.ner = pipeline("ner")
|
44 |
+
|
45 |
+
#returns dict with all the features
|
46 |
+
def get_features(self, question, guess):
|
47 |
+
sent_count = self.sentence_count(question)
|
48 |
+
guess_word_count = self.guess_word_count(guess)
|
49 |
+
guess_has_paren = self.guess_has_paren(guess)
|
50 |
+
guess_length = self.guess_length(guess)
|
51 |
+
guess_entity = self.guess_entity(guess)
|
52 |
+
|
53 |
+
feats = {'sentence_count':sent_count, 'guess_word_count':guess_word_count,
|
54 |
+
'guess_has_paren':guess_has_paren, 'guess_length':guess_length}
|
55 |
+
|
56 |
+
X = feats | guess_entity
|
57 |
+
|
58 |
+
return X
|
59 |
+
|
60 |
+
|
61 |
+
|
62 |
+
|
63 |
+
def sentence_count(self, str):
|
64 |
+
return len(str.split("."))
|
65 |
+
|
66 |
+
def guess_word_count(self, str):
|
67 |
+
return len(str.split("_"))
|
68 |
+
|
69 |
+
def guess_has_paren(self, str):
|
70 |
+
return int("(" in str or ")" in str)
|
71 |
+
|
72 |
+
def guess_length(self, str):
|
73 |
+
return len(str)
|
74 |
+
|
75 |
+
def guess_entity(self, text):
|
76 |
+
entities = self.ner(text)
|
77 |
+
if len(entities) == 0:
|
78 |
+
type = "" # <-- use "None" instead TODO
|
79 |
+
else:
|
80 |
+
type = entities[0]["entity"]
|
81 |
+
|
82 |
+
if type == "":
|
83 |
+
return {'':1, 'I-LOC':0, 'I-MISC':0, 'I-ORG':0, 'I-PER':0}
|
84 |
+
if type == "I-LOC":
|
85 |
+
return {'':0, 'I-LOC':1, 'I-MISC':0, 'I-ORG':0, 'I-PER':0}
|
86 |
+
if type == "I-MISC":
|
87 |
+
return {'':0, 'I-LOC':0, 'I-MISC':1, 'I-ORG':0, 'I-PER':0}
|
88 |
+
if type == "I-ORG":
|
89 |
+
return {'':0, 'I-LOC':0, 'I-MISC':0, 'I-ORG':1, 'I-PER':0}
|
90 |
+
if type == "I-PER":
|
91 |
+
return {'':0, 'I-LOC':0, 'I-MISC':0, 'I-ORG':0, 'I-PER':1}
|
92 |
+
|
93 |
+
|
QBModelConfig.py
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import PretrainedConfig
|
2 |
+
import torch
|
3 |
+
|
4 |
+
class QBModelConfig(PretrainedConfig):
|
5 |
+
model_type = 'QA-umd-quizbowl'
|
6 |
+
|
7 |
+
def __init__(self, **kwargs):
|
8 |
+
self.torch_dtype = torch.float16
|
9 |
+
super().__init__( **kwargs)
|
10 |
+
|
11 |
+
|
QBModelWrapperCopy.py
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import List
|
2 |
+
from transformers import PreTrainedModel
|
3 |
+
from transformers import PretrainedConfig
|
4 |
+
from .QBModelConfig import QBModelConfig
|
5 |
+
from .qbmodel import QuizBowlModel
|
6 |
+
|
7 |
+
class QBModelWrapper(PreTrainedModel):
|
8 |
+
config_class= QBModelConfig
|
9 |
+
|
10 |
+
|
11 |
+
def __init__(self, config):
|
12 |
+
super().__init__(config)
|
13 |
+
|
14 |
+
self.model = QuizBowlModel(use_hf_pkl=True)
|
15 |
+
self.tfmodel = self.model.guesser
|
16 |
+
|
17 |
+
|
18 |
+
def forward(self, question, context):
|
19 |
+
output = self.model.guess_and_buzz([question])
|
20 |
+
return output[0]
|
QBpipeline.py
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import Pipeline
|
2 |
+
from transformers import PreTrainedTokenizer
|
3 |
+
from transformers.utils import ModelOutput
|
4 |
+
import torch
|
5 |
+
from transformers import PreTrainedModel, Pipeline
|
6 |
+
from typing import Any, Dict, List
|
7 |
+
|
8 |
+
class QApipeline(Pipeline):
|
9 |
+
def __init__(
|
10 |
+
self,
|
11 |
+
model: PreTrainedModel,
|
12 |
+
**kwargs
|
13 |
+
):
|
14 |
+
super().__init__(
|
15 |
+
model=model,
|
16 |
+
**kwargs
|
17 |
+
)
|
18 |
+
|
19 |
+
print("in __init__")
|
20 |
+
|
21 |
+
def __call__( self, question: str, context: str, **kwargs) -> Dict[str, Any]:
|
22 |
+
inputs = {
|
23 |
+
"question": question,
|
24 |
+
"context": context
|
25 |
+
}
|
26 |
+
|
27 |
+
outputs = self.model(**inputs)
|
28 |
+
|
29 |
+
answer = self._process_output(outputs)
|
30 |
+
|
31 |
+
print("in __call___")
|
32 |
+
|
33 |
+
return answer
|
34 |
+
|
35 |
+
def _process_output(self, outputs: Any) -> str:
|
36 |
+
|
37 |
+
print("in process outputs")
|
38 |
+
|
39 |
+
format = {'guess': outputs[0], 'confidence': outputs[1]}
|
40 |
+
return format
|
41 |
+
|
42 |
+
|
43 |
+
def _sanitize_parameters(self, **kwargs):
|
44 |
+
print("in sanatize params")
|
45 |
+
|
46 |
+
return {}, {}, {}
|
47 |
+
|
48 |
+
def preprocess(self, inputs):
|
49 |
+
print("in preprocess")
|
50 |
+
|
51 |
+
return inputs
|
52 |
+
|
53 |
+
def postprocess(self, outputs):
|
54 |
+
print("in postprocess")
|
55 |
+
format = {'guess': outputs[0], 'confidence': float(outputs[1])}
|
56 |
+
return format
|
57 |
+
|
58 |
+
def _forward(self, input_tensors, **forward_parameters: Dict) -> ModelOutput:
|
59 |
+
print("in _forward")
|
60 |
+
return super()._forward(input_tensors, **forward_parameters)
|
61 |
+
|
README.md
ADDED
@@ -0,0 +1,199 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
library_name: transformers
|
3 |
+
license: mit
|
4 |
+
---
|
5 |
+
|
6 |
+
# Model Card for Model ID
|
7 |
+
|
8 |
+
<!-- Provide a quick summary of what the model is/does. -->
|
9 |
+
|
10 |
+
|
11 |
+
|
12 |
+
## Model Details
|
13 |
+
|
14 |
+
### Model Description
|
15 |
+
|
16 |
+
<!-- Provide a longer summary of what this model is. -->
|
17 |
+
|
18 |
+
This is the model card of a 馃 transformers model that has been pushed on the Hub. This model card has been automatically generated.
|
19 |
+
|
20 |
+
- **Developed by:** [More Information Needed]
|
21 |
+
- **Funded by [optional]:** [More Information Needed]
|
22 |
+
- **Shared by [optional]:** [More Information Needed]
|
23 |
+
- **Model type:** [More Information Needed]
|
24 |
+
- **Language(s) (NLP):** [More Information Needed]
|
25 |
+
- **License:** [More Information Needed]
|
26 |
+
- **Finetuned from model [optional]:** [More Information Needed]
|
27 |
+
|
28 |
+
### Model Sources [optional]
|
29 |
+
|
30 |
+
<!-- Provide the basic links for the model. -->
|
31 |
+
|
32 |
+
- **Repository:** [More Information Needed]
|
33 |
+
- **Paper [optional]:** [More Information Needed]
|
34 |
+
- **Demo [optional]:** [More Information Needed]
|
35 |
+
|
36 |
+
## Uses
|
37 |
+
|
38 |
+
<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
|
39 |
+
|
40 |
+
### Direct Use
|
41 |
+
|
42 |
+
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
|
43 |
+
|
44 |
+
[More Information Needed]
|
45 |
+
|
46 |
+
### Downstream Use [optional]
|
47 |
+
|
48 |
+
<!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
|
49 |
+
|
50 |
+
[More Information Needed]
|
51 |
+
|
52 |
+
### Out-of-Scope Use
|
53 |
+
|
54 |
+
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
|
55 |
+
|
56 |
+
[More Information Needed]
|
57 |
+
|
58 |
+
## Bias, Risks, and Limitations
|
59 |
+
|
60 |
+
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
|
61 |
+
|
62 |
+
[More Information Needed]
|
63 |
+
|
64 |
+
### Recommendations
|
65 |
+
|
66 |
+
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
|
67 |
+
|
68 |
+
Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
|
69 |
+
|
70 |
+
## How to Get Started with the Model
|
71 |
+
|
72 |
+
Use the code below to get started with the model.
|
73 |
+
|
74 |
+
[More Information Needed]
|
75 |
+
|
76 |
+
## Training Details
|
77 |
+
|
78 |
+
### Training Data
|
79 |
+
|
80 |
+
<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
|
81 |
+
|
82 |
+
[More Information Needed]
|
83 |
+
|
84 |
+
### Training Procedure
|
85 |
+
|
86 |
+
<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
|
87 |
+
|
88 |
+
#### Preprocessing [optional]
|
89 |
+
|
90 |
+
[More Information Needed]
|
91 |
+
|
92 |
+
|
93 |
+
#### Training Hyperparameters
|
94 |
+
|
95 |
+
- **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
|
96 |
+
|
97 |
+
#### Speeds, Sizes, Times [optional]
|
98 |
+
|
99 |
+
<!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
|
100 |
+
|
101 |
+
[More Information Needed]
|
102 |
+
|
103 |
+
## Evaluation
|
104 |
+
|
105 |
+
<!-- This section describes the evaluation protocols and provides the results. -->
|
106 |
+
|
107 |
+
### Testing Data, Factors & Metrics
|
108 |
+
|
109 |
+
#### Testing Data
|
110 |
+
|
111 |
+
<!-- This should link to a Dataset Card if possible. -->
|
112 |
+
|
113 |
+
[More Information Needed]
|
114 |
+
|
115 |
+
#### Factors
|
116 |
+
|
117 |
+
<!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
|
118 |
+
|
119 |
+
[More Information Needed]
|
120 |
+
|
121 |
+
#### Metrics
|
122 |
+
|
123 |
+
<!-- These are the evaluation metrics being used, ideally with a description of why. -->
|
124 |
+
|
125 |
+
[More Information Needed]
|
126 |
+
|
127 |
+
### Results
|
128 |
+
|
129 |
+
[More Information Needed]
|
130 |
+
|
131 |
+
#### Summary
|
132 |
+
|
133 |
+
|
134 |
+
|
135 |
+
## Model Examination [optional]
|
136 |
+
|
137 |
+
<!-- Relevant interpretability work for the model goes here -->
|
138 |
+
|
139 |
+
[More Information Needed]
|
140 |
+
|
141 |
+
## Environmental Impact
|
142 |
+
|
143 |
+
<!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
|
144 |
+
|
145 |
+
Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
|
146 |
+
|
147 |
+
- **Hardware Type:** [More Information Needed]
|
148 |
+
- **Hours used:** [More Information Needed]
|
149 |
+
- **Cloud Provider:** [More Information Needed]
|
150 |
+
- **Compute Region:** [More Information Needed]
|
151 |
+
- **Carbon Emitted:** [More Information Needed]
|
152 |
+
|
153 |
+
## Technical Specifications [optional]
|
154 |
+
|
155 |
+
### Model Architecture and Objective
|
156 |
+
|
157 |
+
[More Information Needed]
|
158 |
+
|
159 |
+
### Compute Infrastructure
|
160 |
+
|
161 |
+
[More Information Needed]
|
162 |
+
|
163 |
+
#### Hardware
|
164 |
+
|
165 |
+
[More Information Needed]
|
166 |
+
|
167 |
+
#### Software
|
168 |
+
|
169 |
+
[More Information Needed]
|
170 |
+
|
171 |
+
## Citation [optional]
|
172 |
+
|
173 |
+
<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
|
174 |
+
|
175 |
+
**BibTeX:**
|
176 |
+
|
177 |
+
[More Information Needed]
|
178 |
+
|
179 |
+
**APA:**
|
180 |
+
|
181 |
+
[More Information Needed]
|
182 |
+
|
183 |
+
## Glossary [optional]
|
184 |
+
|
185 |
+
<!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
|
186 |
+
|
187 |
+
[More Information Needed]
|
188 |
+
|
189 |
+
## More Information [optional]
|
190 |
+
|
191 |
+
[More Information Needed]
|
192 |
+
|
193 |
+
## Model Card Authors [optional]
|
194 |
+
|
195 |
+
[More Information Needed]
|
196 |
+
|
197 |
+
## Model Card Contact
|
198 |
+
|
199 |
+
[More Information Needed]
|
config.json
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architectures": [
|
3 |
+
"QBModelWrapper"
|
4 |
+
],
|
5 |
+
"auto_map": {
|
6 |
+
"AutoConfig": "QBModelConfig.QBModelConfig",
|
7 |
+
"AutoModelForQuestionAnswering": "QBModelWrapperCopy.QBModelWrapper"
|
8 |
+
},
|
9 |
+
"custom_pipelines": {
|
10 |
+
"qa-pipeline-qb": {
|
11 |
+
"impl": "QBpipeline.QApipeline",
|
12 |
+
"pt": [
|
13 |
+
"AutoModelForQuestionAnswering"
|
14 |
+
],
|
15 |
+
"tf": [
|
16 |
+
"TFAutoModelForQuestionAnswering"
|
17 |
+
]
|
18 |
+
}
|
19 |
+
},
|
20 |
+
"model_type": "QA-umd-quizbowl",
|
21 |
+
"torch_dtype": "float16",
|
22 |
+
"transformers_version": "4.40.1"
|
23 |
+
}
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cfa0eae177d84185f121910835717582c0712dbfb8e767adc1857812ae957854
|
3 |
+
size 128
|
qbmodel.py
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import List, Tuple
|
2 |
+
import nltk
|
3 |
+
import sklearn
|
4 |
+
from .tfidf import TfidfWikiGuesser
|
5 |
+
import numpy as np
|
6 |
+
import pandas as pd
|
7 |
+
from .LogRegBuzzer import LogisticRegressionBuzzer
|
8 |
+
|
9 |
+
|
10 |
+
class QuizBowlModel:
|
11 |
+
|
12 |
+
def __init__(self, use_hf_pkl = False):
|
13 |
+
"""
|
14 |
+
Load your model(s) and whatever else you need in this function.
|
15 |
+
|
16 |
+
Do NOT load your model or resources in the guess_and_buzz() function,
|
17 |
+
as it will increase latency severely.
|
18 |
+
"""
|
19 |
+
#best accuracy when using wiki_page_text.json
|
20 |
+
self.guesser = TfidfWikiGuesser(wikidump=None, use_hf_pkl= use_hf_pkl) #can specify different wikidump if needed
|
21 |
+
print("guesser model loaded")
|
22 |
+
|
23 |
+
self.buzzer = LogisticRegressionBuzzer()
|
24 |
+
print("buzzer model loaded")
|
25 |
+
|
26 |
+
|
27 |
+
def guess_and_buzz(self, question_text: List[str]) -> List[Tuple[str, bool]]:
|
28 |
+
"""
|
29 |
+
This function accepts a list of question strings, and returns a list of tuples containing
|
30 |
+
strings representing the guess and corresponding booleans representing
|
31 |
+
whether or not to buzz.
|
32 |
+
|
33 |
+
So, guess_and_buzz(["This is a question"]) should return [("answer", False)]
|
34 |
+
|
35 |
+
If you are using a deep learning model, try to use batched prediction instead of
|
36 |
+
iterating using a for loop.
|
37 |
+
"""
|
38 |
+
|
39 |
+
answers = []
|
40 |
+
top_guesses = 3 #guesser will return this amount guesses for each question (in sorted confidence)
|
41 |
+
|
42 |
+
for question in question_text:
|
43 |
+
guesses = self.guesser.make_guess(question, num_guesses=top_guesses)
|
44 |
+
# print(f"\n\n\n answered {len(answers)} questions so far \n\n")
|
45 |
+
# print(f"left to answer {len(question_text)-len(answers)} questions \n\n ")
|
46 |
+
# print(f"progress: {(len(answers)/len(question_text)) * 100} \n\n")
|
47 |
+
|
48 |
+
#do the buzzing
|
49 |
+
buzz = self.buzzer.predict_buzz(question, guesses[0])
|
50 |
+
|
51 |
+
|
52 |
+
#make a tuple and add to answers list
|
53 |
+
tup = (guesses[0], buzz[1])
|
54 |
+
print(tup)
|
55 |
+
answers.append(tup)
|
56 |
+
#might neeed to format guees like replace _ with space
|
57 |
+
|
58 |
+
return answers
|
59 |
+
|
60 |
+
|
61 |
+
|
tfidf.py
ADDED
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from sklearn.feature_extraction.text import TfidfVectorizer
|
2 |
+
from sklearn.metrics.pairwise import cosine_similarity
|
3 |
+
import numpy as np
|
4 |
+
import json
|
5 |
+
import zipfile
|
6 |
+
import pickle
|
7 |
+
import os
|
8 |
+
from nltk.corpus import stopwords
|
9 |
+
from nltk.tokenize import word_tokenize
|
10 |
+
from nltk.stem import WordNetLemmatizer
|
11 |
+
from huggingface_hub import hf_hub_download
|
12 |
+
import joblib
|
13 |
+
|
14 |
+
|
15 |
+
class TfidfWikiGuesser:
|
16 |
+
def __init__(self, wikidump = 'resources/wiki_text_16.json', use_hf_pkl = False) -> None:
|
17 |
+
self.tfidf = None
|
18 |
+
self.corpus = None
|
19 |
+
self.titles = None
|
20 |
+
self.vectorizer = None
|
21 |
+
self.lemmatizer = WordNetLemmatizer()
|
22 |
+
model_file = "processed_tfidf_wiki_page_text_model.pkl" # <--- has best acc so far (using wiki_page_text.json from gdrive folder)
|
23 |
+
#model_file = "processed_large_wiki_text_model.pkl"
|
24 |
+
#model_file = "processed_tfidf_wiki_16_model.pkl"
|
25 |
+
# full_model_path = model_file
|
26 |
+
full_model_path = os.path.join("./models", model_file)
|
27 |
+
|
28 |
+
if(use_hf_pkl):
|
29 |
+
REPO_ID = "nes470/pipeline-as-repo"
|
30 |
+
FILENAME = "processed_tfidf_wiki_page_text_model.pkl"
|
31 |
+
|
32 |
+
model = joblib.load(
|
33 |
+
hf_hub_download(repo_id=REPO_ID, filename=FILENAME)
|
34 |
+
)
|
35 |
+
|
36 |
+
|
37 |
+
print("loading from hugginface pkl file")
|
38 |
+
self.load_from_pk_direct(model)
|
39 |
+
else:
|
40 |
+
if os.path.exists(full_model_path):
|
41 |
+
print("Loading model from pickle...")
|
42 |
+
self.load_from_pkl(full_model_path)
|
43 |
+
else:
|
44 |
+
if wikidump:
|
45 |
+
print("No pre-trained model found, loading data from dump...")
|
46 |
+
self.load_model(wikidump)
|
47 |
+
self.save_model(full_model_path)
|
48 |
+
# self.load_model(wikidump)
|
49 |
+
|
50 |
+
def load_model(self, wikidump):
|
51 |
+
# wiki dump is an json array of json objects with page and text fields
|
52 |
+
with open(wikidump) as f:
|
53 |
+
doc = json.load(f)
|
54 |
+
# with zipfile.ZipFile('resources/wiki_text_8.json.zip', 'r') as z:
|
55 |
+
# with z.open('wiki_text_8.json') as f:
|
56 |
+
# doc = json.load(f)
|
57 |
+
|
58 |
+
|
59 |
+
self.corpus, self.titles = self.create_corpus(doc)
|
60 |
+
|
61 |
+
self.vectorizer = TfidfVectorizer(stop_words='english')
|
62 |
+
self.tfidf = self.vectorizer.fit_transform(self.corpus)
|
63 |
+
|
64 |
+
def preprocess_text(self,text):
|
65 |
+
if type(text) == float:
|
66 |
+
return str(text)
|
67 |
+
tokens = word_tokenize(text.lower())
|
68 |
+
filtered_tokens = [token for token in tokens if token not in stopwords.words('english')]
|
69 |
+
lemmatized_tokens = [self.lemmatizer.lemmatize(token) for token in filtered_tokens]
|
70 |
+
processed_text = ' '.join(lemmatized_tokens)
|
71 |
+
return processed_text
|
72 |
+
|
73 |
+
def create_corpus(self, json_file):
|
74 |
+
corpus = []
|
75 |
+
page_titles = []
|
76 |
+
|
77 |
+
for json_obj in json_file:
|
78 |
+
# corpus.append(json_obj['text'])
|
79 |
+
#corpus.append(self.preprocess_text(json_obj['text']))
|
80 |
+
corpus.append(json_obj['text'])
|
81 |
+
page_titles.append(json_obj['page'])
|
82 |
+
|
83 |
+
return (corpus, page_titles)
|
84 |
+
|
85 |
+
def make_guess(self, question, num_guesses = 1):
|
86 |
+
tfidf_question = self.vectorizer.transform([question])
|
87 |
+
|
88 |
+
sim = cosine_similarity(self.tfidf, tfidf_question)
|
89 |
+
|
90 |
+
#get indices of best matching documents and use it to get (num_guesses) top documents
|
91 |
+
sim_indices = np.argsort(sim.flatten())[::-1]
|
92 |
+
best_indices = sim_indices[:num_guesses]
|
93 |
+
|
94 |
+
# best_docs = []
|
95 |
+
best_guesses = []
|
96 |
+
for i in best_indices:
|
97 |
+
# best_docs.append(self.corpus[i])
|
98 |
+
best_guesses.append(self.titles[i])
|
99 |
+
|
100 |
+
return best_guesses
|
101 |
+
|
102 |
+
def save_model(self, file_name):
|
103 |
+
with open(file_name, 'wb') as f:
|
104 |
+
pickle.dump({
|
105 |
+
'vectorizer': self.vectorizer,
|
106 |
+
'tfidf_matrix': self.tfidf,
|
107 |
+
'titles': self.titles,
|
108 |
+
# 'corpus': self.corpus
|
109 |
+
}, f)
|
110 |
+
|
111 |
+
def load_from_pkl(self, file_name):
|
112 |
+
with open(file_name, 'rb') as f:
|
113 |
+
data = pickle.load(f)
|
114 |
+
self.vectorizer = data['vectorizer']
|
115 |
+
self.tfidf = data['tfidf_matrix']
|
116 |
+
self.titles = data['titles']
|
117 |
+
# self.corpus = data['corpus']
|
118 |
+
|
119 |
+
def load_from_pk_direct(self, pkl):
|
120 |
+
#data = pickle.load(pkl)
|
121 |
+
data = pkl
|
122 |
+
self.vectorizer = data['vectorizer']
|
123 |
+
self.tfidf = data['tfidf_matrix']
|
124 |
+
self.titles = data['titles']
|