repo
stringclasses 358
values | pull_number
int64 6
67.9k
| instance_id
stringlengths 12
49
| issue_numbers
sequencelengths 1
7
| base_commit
stringlengths 40
40
| patch
stringlengths 87
101M
| test_patch
stringlengths 72
22.3M
| problem_statement
stringlengths 3
256k
| hints_text
stringlengths 0
545k
| created_at
stringlengths 20
20
| PASS_TO_PASS
sequencelengths 0
0
| FAIL_TO_PASS
sequencelengths 0
0
|
---|---|---|---|---|---|---|---|---|---|---|---|
mindsdb/mindsdb | 452 | mindsdb__mindsdb-452 | [
"435"
] | 51701d605e294d1c0309d706af762dbc9a77797d | diff --git a/mindsdb/__about__.py b/mindsdb/__about__.py
--- a/mindsdb/__about__.py
+++ b/mindsdb/__about__.py
@@ -1,6 +1,6 @@
__title__ = 'MindsDB'
__package_name__ = 'mindsdb'
-__version__ = '1.17.9'
+__version__ = '1.18.0'
__description__ = "MindsDB's goal is to make it very simple for developers to use the power of artificial neural networks in their projects."
__email__ = "[email protected]"
__author__ = 'MindsDB Inc'
diff --git a/mindsdb/libs/controllers/predictor.py b/mindsdb/libs/controllers/predictor.py
--- a/mindsdb/libs/controllers/predictor.py
+++ b/mindsdb/libs/controllers/predictor.py
@@ -215,6 +215,9 @@ def get_model_data(self, model_name=None, lmd=None):
# ADAPTOR CODE
amd = {}
+ if 'stats_v2' in lmd:
+ amd['data_analysis_v2'] = lmd['stats_v2']
+
if lmd['current_phase'] == MODEL_STATUS_TRAINED:
amd['status'] = 'complete'
elif lmd['current_phase'] == MODEL_STATUS_ERROR:
diff --git a/mindsdb/libs/controllers/transaction.py b/mindsdb/libs/controllers/transaction.py
--- a/mindsdb/libs/controllers/transaction.py
+++ b/mindsdb/libs/controllers/transaction.py
@@ -140,8 +140,11 @@ def _execute_analyze(self):
self._call_phase_module(module_name='DataExtractor')
self.save_metadata()
+ self._call_phase_module(module_name='DataCleaner', stage=0)
+ self.save_metadata()
+
self.lmd['current_phase'] = MODEL_STATUS_DATA_ANALYSIS
- self._call_phase_module(module_name='StatsGenerator', input_data=self.input_data, modify_light_metadata=True, hmd=self.hmd)
+ self._call_phase_module(module_name='StatsGenerator', input_data=self.input_data, hmd=self.hmd)
self.save_metadata()
self.lmd['current_phase'] = MODEL_STATUS_DONE
@@ -159,14 +162,16 @@ def _execute_learn(self):
self._call_phase_module(module_name='DataExtractor')
self.save_metadata()
+ self._call_phase_module(module_name='DataCleaner', stage=0)
+ self.save_metadata()
+
self.lmd['current_phase'] = MODEL_STATUS_DATA_ANALYSIS
- if 'skip_stats_generation' in self.lmd and self.lmd['skip_stats_generation'] == True:
- self.load_metadata()
- else:
- self.save_metadata()
- self._call_phase_module(module_name='StatsGenerator', input_data=self.input_data, modify_light_metadata=True, hmd=self.hmd)
- self.save_metadata()
+ self._call_phase_module(module_name='StatsGenerator', input_data=self.input_data, hmd=self.hmd)
+ self.save_metadata()
+ self._call_phase_module(module_name='DataCleaner', stage=0)
+ self.save_metadata()
+
self._call_phase_module(module_name='DataSplitter')
self._call_phase_module(module_name='DataTransformer', input_data=self.input_data)
diff --git a/mindsdb/libs/phases/data_cleaner/__init__.py b/mindsdb/libs/phases/data_cleaner/__init__.py
new file mode 100644
diff --git a/mindsdb/libs/phases/data_cleaner/data_cleaner.py b/mindsdb/libs/phases/data_cleaner/data_cleaner.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/libs/phases/data_cleaner/data_cleaner.py
@@ -0,0 +1,27 @@
+from mindsdb.libs.phases.base_module import BaseModule
+from mindsdb.libs.data_types.mindsdb_logger import log
+
+
+class DataCleaner(BaseModule):
+ def _cleanup_w_missing_targets(self, df):
+ initial_len = len(df)
+ df = df.dropna(subset=self.transaction.lmd['predict_columns'])
+ no_dropped = len(df) - initial_len
+ if no_dropped > 0:
+ self.log.warning(f'Dropped {no_dropped} rows because they had null values in one or more of the columns that we are trying to predict. Please always provide non-null values in the columns you want to predict !')
+ return df
+
+ def _cleanup_ignored(self, df):
+ for col_name in df.columns.values:
+ if len(df[col_name].dropna()) < 1:
+ self.transaction.lmd['columns_to_ignore'].append(col_name)
+ self.transaction.lmd['empty_columns'].append(col_name)
+ self.log.warning(f'Column "{col_name}" is empty ! We\'ll go ahead and ignore it, please make sure you gave mindsdb the correct data.')
+
+ df = df.drop(columns=self.transaction.lmd['columns_to_ignore'])
+ return df
+
+ def run(self, stage):
+ if stage == 0:
+ self.transaction.input_data.data_frame = self._cleanup_w_missing_targets(self.transaction.input_data.data_frame)
+ self.transaction.input_data.data_frame = self._cleanup_ignored(self.transaction.input_data.data_frame)
diff --git a/mindsdb/libs/phases/data_splitter/data_splitter.py b/mindsdb/libs/phases/data_splitter/data_splitter.py
--- a/mindsdb/libs/phases/data_splitter/data_splitter.py
+++ b/mindsdb/libs/phases/data_splitter/data_splitter.py
@@ -5,17 +5,7 @@
class DataSplitter(BaseModule):
- def _cleanup_w_missing_targets(self, df):
- initial_len = len(df)
- df = df.dropna(subset=self.transaction.lmd['predict_columns'])
- no_dropped = len(df) - initial_len
- if no_dropped > 0:
- self.log.warning(f'Dropped {no_dropped} rows because they had null values in one or more of the columns that we are trying to predict. Please always provide non-null values in the columns you want to predict !')
- return df
-
def run(self):
- self.transaction.input_data.data_frame = self._cleanup_w_missing_targets(self.transaction.input_data.data_frame)
-
group_by = self.transaction.lmd['model_group_by']
if group_by is None or len(group_by) == 0:
group_by = []
diff --git a/mindsdb/libs/phases/stats_generator/data_preparation.py b/mindsdb/libs/phases/stats_generator/data_preparation.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/libs/phases/stats_generator/data_preparation.py
@@ -0,0 +1,36 @@
+import random
+import numpy as np
+
+from mindsdb.external_libs.stats import calculate_sample_size
+from mindsdb.libs.constants.mindsdb import *
+from mindsdb.libs.helpers.text_helpers import clean_float
+
+def sample_data(df, sample_margin_of_error, sample_confidence_level, log):
+ population_size = len(df)
+
+ sample_size = int(calculate_sample_size(population_size, sample_margin_of_error, sample_confidence_level)) if population_size > 50 else population_size
+ sample_size_pct = sample_size*100/population_size
+
+ # get the indexes of randomly selected rows given the population size
+ input_data_sample_indexes = random.sample(range(population_size), sample_size)
+
+ log.info(f'Analyzing a sample of {sample_size} from a total population of {population_size}, this is equivalent to {sample_size_pct}% of your data.')
+
+ return df.iloc[input_data_sample_indexes]
+
+
+def clean_int_and_date_data(col_data, log):
+ cleaned_data = []
+
+ for ele in col_data:
+ if str(ele) not in ['', str(None), str(False), str(np.nan), 'NaN', 'nan', 'NA', 'null'] and (not ele or not ele.isspace()):
+ try:
+ cleaned_data.append(clean_float(ele))
+ except Exception as e1:
+ try:
+ cleaned_data.append(parse_datetime(str(ele)).timestamp())
+ except Exception as e2:
+ log.warning(f'Failed to parser numerical value with error chain:\n {e1} -> {e2}\n')
+ cleaned_data.append(0)
+
+ return cleaned_data
diff --git a/mindsdb/libs/phases/stats_generator/scores.py b/mindsdb/libs/phases/stats_generator/scores.py
--- a/mindsdb/libs/phases/stats_generator/scores.py
+++ b/mindsdb/libs/phases/stats_generator/scores.py
@@ -169,12 +169,13 @@ def compute_lof_score(stats, columns, col_name):
np_col_data = np.array(columns[col_name]).reshape(-1, 1)
lof = LocalOutlierFactor(contamination='auto')
outlier_scores = lof.fit_predict(np_col_data)
- outlier_indexes = [i for i in range(len(columns[col_name])) if outlier_scores[i] < -0.8]
+
+ outliers = [columns[col_name][i] for i in range(len(columns[col_name])) if outlier_scores[i] < -0.8]
return {
- 'lof_outliers': outlier_indexes
- ,'lof_based_outlier_score': round(10 * (1 - len(outlier_indexes)/len(columns[col_name])))
- ,'percentage_of_log_based_outliers': (len(outlier_indexes)/len(columns[col_name])) * 100
+ 'lof_outliers': outliers
+ ,'lof_based_outlier_score': round(10 * (1 - len(outliers)/len(columns[col_name])))
+ ,'percentage_of_log_based_outliers': (len(outliers)/len(columns[col_name])) * 100
,'lof_based_outlier_score_description':"""
The higher this score, the more outliers your dataset has. This is based on distance from the center of 20 clusters as constructed via KNN.
"""
diff --git a/mindsdb/libs/phases/stats_generator/stats_generator.py b/mindsdb/libs/phases/stats_generator/stats_generator.py
--- a/mindsdb/libs/phases/stats_generator/stats_generator.py
+++ b/mindsdb/libs/phases/stats_generator/stats_generator.py
@@ -5,10 +5,9 @@
import sndhdr
import logging
from collections import Counter
-#import multiprocessing
import numpy as np
-import scipy.stats as st
+from scipy.stats import entropy
from dateutil.parser import parse as parse_datetime
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.cluster import MiniBatchKMeans
@@ -18,11 +17,10 @@
from mindsdb.config import CONFIG
from mindsdb.libs.constants.mindsdb import *
from mindsdb.libs.phases.base_module import BaseModule
-from mindsdb.libs.helpers.text_helpers import splitRecursive, clean_float, cast_string_to_python_type
+from mindsdb.libs.helpers.text_helpers import splitRecursive, clean_float
from mindsdb.libs.helpers.debugging import *
-from mindsdb.external_libs.stats import calculate_sample_size
from mindsdb.libs.phases.stats_generator.scores import *
-
+from mindsdb.libs.phases.stats_generator.data_preparation import sample_data, clean_int_and_date_data
class StatsGenerator(BaseModule):
"""
@@ -271,25 +269,6 @@ def _get_column_data_type(self, data, data_frame, col_name):
return curr_data_type, curr_data_subtype, type_dist, subtype_dist, additional_info, 'Column ok'
- @staticmethod
- def clean_int_and_date_data(col_data):
- cleaned_data = []
-
- for value in col_data:
- if value != '' and value != '\r' and value != '\n':
- cleaned_data.append(value)
-
- cleaned_data_new = []
-
- for ele in cleaned_data:
- if str(ele) not in ['', str(None), str(False), str(np.nan), 'NaN', 'nan', 'NA', 'null']:
- try:
- cleaned_data_new.append(clean_float(ele))
- except:
- cleaned_data_new.append(parse_datetime(str(ele)).timestamp())
-
- return cleaned_data_new
-
@staticmethod
def get_words_histogram(data, is_full_text=False):
""" Returns an array of all the words that appear in the dataset and the number of times each word appears in the dataset """
@@ -318,22 +297,25 @@ def get_histogram(data, data_type=None, data_subtype=None, full_text=None, hmd=N
is_full_text = True if data_subtype == DATA_SUBTYPES.TEXT else False
return StatsGenerator.get_words_histogram(data, is_full_text), None
elif data_type == DATA_TYPES.NUMERIC or data_subtype == DATA_SUBTYPES.TIMESTAMP:
- data = StatsGenerator.clean_int_and_date_data(data)
- y, x = np.histogram(data, bins=50, range=(min(data),max(data)), density=False)
- x = x[:-1]
- #x = (x + np.roll(x, -1))[:-1] / 2.0 <--- original code, was causing weird bucket values when we had outliers
- x = x.tolist()
- y = y.tolist()
+ Y, X = np.histogram(data, bins=min(50,len(set(data))), range=(min(data),max(data)), density=False)
+ if data_subtype == DATA_SUBTYPES.INT:
+ Y, X = np.histogram(data, bins=[int(round(x)) for x in X], density=False)
+
+ X = X[:-1].tolist()
+ Y = Y.tolist()
+
return {
- 'x': x
- ,'y': y
- }, None
+ 'x': X
+ ,'y': Y
+ }, X
elif data_type == DATA_TYPES.CATEGORICAL or data_subtype == DATA_SUBTYPES.DATE :
histogram = Counter(data)
+ X = list(map(str,histogram.keys()))
+ Y = list(histogram.values())
return {
- 'x': list(map(str,histogram.keys())),
- 'y': list(histogram.values())
- }, None
+ 'x': X,
+ 'y': Y
+ }, Y
elif data_subtype == DATA_SUBTYPES.IMAGE:
image_hashes = []
for img_path in data:
@@ -369,7 +351,7 @@ def get_histogram(data, data_type=None, data_subtype=None, full_text=None, hmd=N
return {
'x': x,
'y': y
- }, kmeans.cluster_centers_
+ }, list(kmeans.cluster_centers_)
else:
return None, None
@@ -524,200 +506,87 @@ def _log_interesting_stats(self, stats):
# Functionality is specific to mindsdb logger
pass
- def run(self, input_data, modify_light_metadata, hmd=None, print_logs=True):
+ def run(self, input_data, hmd=None, print_logs=True):
"""
# Runs the stats generation phase
# This shouldn't alter the columns themselves, but rather provide the `stats` metadata object and update the types for each column
# A lot of information about the data distribution and quality will also be logged to the server in this phase
"""
- ''' @TODO Uncomment when we need multiprocessing, possibly disable on OSX
- no_processes = multiprocessing.cpu_count() - 2
- if no_processes < 1:
- no_processes = 1
- pool = multiprocessing.Pool(processes=no_processes)
- '''
+ stats = {}
+ stats_v2 = {}
+ col_data_dict = {}
+
if print_logs == False:
self.log = logging.getLogger('null-logger')
self.log.propagate = False
- # we dont need to generate statistic over all of the data, so we subsample, based on our accepted margin of error
- population_size = len(input_data.data_frame)
+ sample_df = sample_data(input_data.data_frame, self.transaction.lmd['sample_margin_of_error'], self.transaction.lmd['sample_confidence_level'], self.log)
- if population_size < 50:
- sample_size = population_size
- else:
- sample_size = int(calculate_sample_size(population_size=population_size, margin_error=self.transaction.lmd['sample_margin_of_error'], confidence_level=self.transaction.lmd['sample_confidence_level']))
- #if sample_size > 3000 and sample_size > population_size/8:
- # sample_size = min(round(population_size/8),3000)
+ for col_name in self.transaction.lmd['empty_columns']:
+ stats_v2[col_name]['empty'] = {'is_empty': True}
- # get the indexes of randomly selected rows given the population size
- input_data_sample_indexes = random.sample(range(population_size), sample_size)
- self.log.info('population_size={population_size}, sample_size={sample_size} {percent:.2f}%'.format(population_size=population_size, sample_size=sample_size, percent=(sample_size/population_size)*100))
+ for col_name in sample_df.columns.values:
+ stats_v2[col_name] = {}
+ stats[col_name] = {}
- all_sampled_data = input_data.data_frame.iloc[input_data_sample_indexes]
+ len_wo_nulls = len(input_data.data_frame[col_name].dropna())
+ len_w_nulls = len(input_data.data_frame[col_name])
+ len_unique = len(set(input_data.data_frame[col_name]))
+ stats_v2[col_name]['empty'] = {
+ 'empty_cells': len_w_nulls - len_wo_nulls
+ ,'empty_percentage': 100 * round((len_w_nulls - len_wo_nulls)/len_w_nulls,3)
+ ,'is_empty': False
+ }
- stats = {}
- col_data_dict = {}
+ col_data = sample_df[col_name].dropna()
- for col_name in all_sampled_data.columns.values:
- if col_name in self.transaction.lmd['columns_to_ignore']:
- continue
+ data_type, data_subtype, data_type_dist, data_subtype_dist, additional_info, column_status = self._get_column_data_type(col_data, input_data.data_frame, col_name)
- col_data = all_sampled_data[col_name].dropna()
- full_col_data = all_sampled_data[col_name]
+ stats_v2[col_name]['typing'] = {
+ 'data_type': data_type
+ ,'data_subtype': data_subtype
+ ,'data_type_dist': data_type_dist
+ ,'data_subtype_dist': data_subtype_dist
+ }
- data_type, curr_data_subtype, data_type_dist, data_subtype_dist, additional_info, column_status = self._get_column_data_type(col_data, input_data.data_frame, col_name)
+ for k in stats_v2[col_name]['typing']: stats[col_name][k] = stats_v2[col_name]['typing'][k]
- if column_status == 'Column empty':
- if modify_light_metadata:
- self.transaction.lmd['empty_columns'].append(col_name)
- logging.warning(f'The "{col_name}" column is empty, it will be ignored, please make sure the data in the column is correct !')
- self.transaction.lmd['columns_to_ignore'].append(col_name)
- continue
+ # Do some temporary processing for timestamp and numerical values
+ if data_type == DATA_TYPES.NUMERIC or data_subtype == DATA_SUBTYPES.TIMESTAMP:
+ col_data = clean_int_and_date_data(col_data, self.log)
- new_col_data = []
-
- if curr_data_subtype == DATA_SUBTYPES.TIMESTAMP: #data_type == DATA_TYPES.DATE:
- for element in col_data:
- if str(element) in [str(''), str(None), str(False), str(np.nan), 'NaN', 'nan', 'NA', 'null']:
- new_col_data.append(None)
- else:
- try:
- new_col_data.append(int(parse_datetime(element).timestamp()))
- except:
- self.log.warning(f'Could not convert string from col "{col_name}" to date and it was expected, instead got: {element}')
- new_col_data.append(None)
- col_data = new_col_data
- if data_type == DATA_TYPES.NUMERIC or curr_data_subtype == DATA_SUBTYPES.TIMESTAMP:
- histogram, _ = StatsGenerator.get_histogram(col_data, data_type=data_type, data_subtype=curr_data_subtype)
- x = histogram['x']
- y = histogram['y']
-
- col_data = StatsGenerator.clean_int_and_date_data(col_data)
- # This means the column is all nulls, which we don't handle at the moment
- if len(col_data) < 1:
- return None
-
- if len(col_data) > 0:
- max_value = max(col_data)
- min_value = min(col_data)
- mean = np.mean(col_data)
- median = np.median(col_data)
- var = np.var(col_data)
- skew = st.skew(col_data)
- kurtosis = st.kurtosis(col_data)
- else:
- max_value = 0
- min_value = 0
- mean = 0
- median = 0
- var = 0
- skew = 0
- kurtosis = 0
-
- is_float = True if max([1 if int(i) != i else 0 for i in col_data]) == 1 else False
-
- col_stats = {
- 'data_type': data_type,
- 'data_subtype': curr_data_subtype,
- "mean": mean,
- "median": median,
- "variance": var,
- "skewness": skew,
- "kurtosis": kurtosis,
- "max": max_value,
- "min": min_value,
- "is_float": is_float,
- "histogram": {
- "x": x,
- "y": y
- },
- "percentage_buckets": histogram['x']#xp
+ hist_data = col_data
+ if data_type == DATA_TYPES.CATEGORICAL:
+ hist_data = input_data.data_frame[col_name]
+ stats_v2[col_name]['unique'] = {
+ 'unique_values': len_unique
+ ,'unique_percentage': 100 * round((len_w_nulls - len_unique)/len_w_nulls,8)
}
- elif data_type == DATA_TYPES.CATEGORICAL or curr_data_subtype == DATA_SUBTYPES.DATE:
- histogram, _ = StatsGenerator.get_histogram(input_data.data_frame[col_name], data_type=data_type, data_subtype=curr_data_subtype)
+ histogram, percentage_buckets = StatsGenerator.get_histogram(hist_data, data_type=data_type, data_subtype=data_subtype)
- col_stats = {
- 'data_type': data_type,
- 'data_subtype': curr_data_subtype,
- "histogram": histogram,
- "percentage_buckets": histogram['x']
- }
+ stats[col_name]['histogram'] = histogram
+ stats[col_name]['percentage_buckets'] = percentage_buckets
+ stats_v2[col_name]['histogram'] = histogram
+ stats_v2[col_name]['percentage_buckets'] = percentage_buckets
- elif curr_data_subtype == DATA_SUBTYPES.IMAGE:
- histogram, percentage_buckets = StatsGenerator.get_histogram(col_data, data_subtype=curr_data_subtype)
+ stats[col_name]['empty_cells'] = stats_v2[col_name]['empty']['empty_cells']
+ stats[col_name]['empty_percentage'] = stats_v2[col_name]['empty']['empty_percentage']
- col_stats = {
- 'data_type': data_type,
- 'data_subtype': curr_data_subtype,
- 'percentage_buckets': percentage_buckets,
- 'histogram': histogram
- }
-
- elif curr_data_subtype == DATA_SUBTYPES.ARRAY:
- col_stats = {
- 'data_type': data_type,
- 'data_subtype': curr_data_subtype,
- 'percentage_buckets': None,
- 'histogram': None
- }
-
- # @TODO This is probably wrong, look into it a bit later
- else:
- # see if its a sentence or a word
- histogram, _ = StatsGenerator.get_histogram(col_data, data_type=data_type, data_subtype=curr_data_subtype)
- dictionary = list(histogram.keys())
-
- # if no words, then no dictionary
- if len(col_data) == 0:
- dictionary_available = False
- dictionary_lenght_percentage = 0
- dictionary = []
- else:
- dictionary_available = True
- dictionary_lenght_percentage = len(
- dictionary) / len(col_data) * 100
- # if the number of uniques is too large then treat is a text
- is_full_text = True if curr_data_subtype == DATA_SUBTYPES.TEXT else False
- if dictionary_lenght_percentage > 10 and len(col_data) > 50 and is_full_text==False:
- dictionary = []
- dictionary_available = False
-
- col_stats = {
- 'data_type': data_type,
- 'data_subtype': curr_data_subtype,
- "dictionary": dictionary,
- "dictionaryAvailable": dictionary_available,
- "dictionaryLenghtPercentage": dictionary_lenght_percentage,
- "histogram": histogram
- }
- stats[col_name] = col_stats
- stats[col_name]['data_type_dist'] = data_type_dist
- stats[col_name]['data_subtype_dist'] = data_subtype_dist
- stats[col_name]['column'] = col_name
-
- empty_count = len(full_col_data) - len(col_data)
-
- stats[col_name]['empty_cells'] = empty_count
- stats[col_name]['empty_percentage'] = empty_count * 100 / len(full_col_data)
+ stats_v2[col_name]['additional_info'] = additional_info
for k in additional_info:
stats[col_name][k] = additional_info[k]
col_data_dict[col_name] = col_data
- for col_name in all_sampled_data.columns:
- if col_name in self.transaction.lmd['columns_to_ignore']:
- continue
+ for col_name in sample_df.columns:
+ data_type = stats_v2[col_name]['typing']['data_type']
+ data_subtype = stats_v2[col_name]['typing']['data_subtype']
- # Use the multiprocessing pool for computing scores which take a very long time to compute
# For now there's only one and computing it takes way too long, so this is not enabled
scores = []
- '''
- scores.append(pool.apply_async(compute_clf_based_correlation_score, args=(stats, all_sampled_data, col_name)))
- '''
for score_promise in scores:
# Wait for function on process to finish running
score = score_promise.get()
@@ -730,7 +599,7 @@ def run(self, input_data, modify_light_metadata, hmd=None, print_logs=True):
if 'compute_z_score' in str(score_func) or 'compute_lof_score' in str(score_func):
stats[col_name].update(score_func(stats, col_data_dict, col_name))
else:
- stats[col_name].update(score_func(stats, all_sampled_data, col_name))
+ stats[col_name].update(score_func(stats, sample_df, col_name))
except Exception as e:
self.log.warning(e)
@@ -747,20 +616,34 @@ def run(self, input_data, modify_light_metadata, hmd=None, print_logs=True):
if stats[col_name]['is_foreign_key'] and self.transaction.lmd['handle_foreign_keys']:
self.transaction.lmd['columns_to_ignore'].append(col_name)
- total_rows = len(input_data.data_frame)
+ # New logic
+ col_data = sample_df[col_name]
- if modify_light_metadata:
- self.transaction.lmd['column_stats'] = stats
+ if data_type in (DATA_TYPES.NUMERIC,DATA_TYPES.DATE,DATA_TYPES.CATEGORICAL) or data_subtype in (DATA_SUBTYPES.IMAGE):
+ nr_values = sum(stats_v2[col_name]['histogram']['y'])
+ S = entropy([x/nr_values for x in stats_v2[col_name]['histogram']['y']],base=max(2,len(stats_v2[col_name]['histogram']['y'])))
+ stats_v2[col_name]['bias'] = {
+ 'entropy': S
+ }
+ if S < 0.25:
+ pick_nr = -max(1, int(len(stats_v2[col_name]['histogram']['y'])/10))
+ stats_v2[col_name]['bias']['biased_buckets'] = [stats_v2[col_name]['histogram']['x'][i] for i in np.array(stats_v2[col_name]['histogram']['y']).argsort()[pick_nr:]]
+
+ if 'lof_outliers' in stats[col_name]:
+ if data_subtype in (DATA_SUBTYPES.INT):
+ stats[col_name]['lof_outliers'] = [int(x) for x in stats[col_name]['lof_outliers']]
+
+ stats_v2[col_name]['outliers'] = {
+ 'outlier_values': stats[col_name]['lof_outliers']
+ ,'outlier_score': stats[col_name]['lof_based_outlier_score']
+ }
- self.transaction.lmd['data_preparation']['accepted_margin_of_error'] = self.transaction.lmd['sample_margin_of_error']
+ self.transaction.lmd['column_stats'] = stats
+ self.transaction.lmd['stats_v2'] = stats_v2
- self.transaction.lmd['data_preparation']['total_row_count'] = total_rows
- self.transaction.lmd['data_preparation']['used_row_count'] = sample_size
+ self.transaction.lmd['data_preparation']['accepted_margin_of_error'] = self.transaction.lmd['sample_margin_of_error']
- ''' @TODO Uncomment when we need multiprocessing, possibly disable on OSX
- pool.close()
- pool.join()
- '''
+ self.transaction.lmd['data_preparation']['total_row_count'] = len(input_data.data_frame)
+ self.transaction.lmd['data_preparation']['used_row_count'] = len(sample_df)
self._log_interesting_stats(stats)
- return stats
| diff --git a/tests/ci_tests/tests.py b/tests/ci_tests/tests.py
--- a/tests/ci_tests/tests.py
+++ b/tests/ci_tests/tests.py
@@ -20,9 +20,8 @@ def test_model_analysis(amd, to_predict):
for column, importance in zip(input_importance["x"], input_importance["y"]):
assert isinstance(column, str)
assert (len(column) > 0)
-
assert isinstance(importance, (float, int))
- assert (importance > 0)
+ assert (importance >= 0 and importance <= 10)
def test_force_vectors(amd, to_predict):
force_vectors = amd['force_vectors']
@@ -65,7 +64,7 @@ def basic_test(backend='lightwood',use_gpu=True, run_extra=False, IS_CI_TEST=Fal
ctn = True
if ctn:
continue
-
+
code = os.system(f'python3 ../functional_testing/{py_file}')
if code != 0:
raise Exception(f'Test failed with status code: {code} !')
| Various improvements to the Data Analysis
There will be various improvements to the Data Analysis phase for version 1.20.0 (compatible with scout version 2.0).
For now I will keep track of all of them here, if some of them require their own issue we can open them later.
Currently still discussing the changes around this internally, two big things would be:
* Re-work some of the scores
* Add predictive power based correlation score. i.e. how correlated/similar two columns are based on whether or not they can predict each other. could also be expanded further into creating some sort of causal graph between certain columns or column combinations.
| 2020-05-28T07:06:07Z | [] | [] |
|
mindsdb/mindsdb | 456 | mindsdb__mindsdb-456 | [
"451"
] | e55a1628f672996fe5b0fa4d0faf2240b2c54907 | diff --git a/mindsdb/__about__.py b/mindsdb/__about__.py
--- a/mindsdb/__about__.py
+++ b/mindsdb/__about__.py
@@ -1,6 +1,6 @@
__title__ = 'MindsDB'
__package_name__ = 'mindsdb'
-__version__ = '1.18.3'
+__version__ = '1.18.5'
__description__ = "MindsDB's goal is to make it very simple for developers to use the power of artificial neural networks in their projects."
__email__ = "[email protected]"
__author__ = 'MindsDB Inc'
diff --git a/mindsdb/libs/constants/mindsdb.py b/mindsdb/libs/constants/mindsdb.py
--- a/mindsdb/libs/constants/mindsdb.py
+++ b/mindsdb/libs/constants/mindsdb.py
@@ -29,9 +29,6 @@ class DATA_SUBTYPES:
VIDEO = 'Video'
AUDIO = 'Audio'
- # URL
- # How do we detect the tpye here... maybe setup async download for random sample an stats ?
-
# SEQUENTIAL
TEXT = 'Text'
ARRAY = 'Array' # Do we even want to support arrays / structs / nested ... etc ?
@@ -41,7 +38,6 @@ class DATA_TYPES:
DATE = 'Date'
CATEGORICAL = 'Categorical'
FILE_PATH = 'File Path'
- URL = 'Url'
SEQUENTIAL = 'Sequential'
class DATA_TYPES_SUBTYPES:
@@ -50,7 +46,6 @@ class DATA_TYPES_SUBTYPES:
,DATA_TYPES.DATE:(DATA_SUBTYPES.DATE, DATA_SUBTYPES.TIMESTAMP)
,DATA_TYPES.CATEGORICAL:(DATA_SUBTYPES.SINGLE, DATA_SUBTYPES.MULTIPLE)
,DATA_TYPES.FILE_PATH:(DATA_SUBTYPES.IMAGE, DATA_SUBTYPES.VIDEO, DATA_SUBTYPES.AUDIO)
- ,DATA_TYPES.URL:()
,DATA_TYPES.SEQUENTIAL:(DATA_SUBTYPES.TEXT, DATA_SUBTYPES.ARRAY)
}
diff --git a/mindsdb/libs/controllers/predictor.py b/mindsdb/libs/controllers/predictor.py
--- a/mindsdb/libs/controllers/predictor.py
+++ b/mindsdb/libs/controllers/predictor.py
@@ -25,7 +25,6 @@ def __init__(self, name, root_folder=CONFIG.MINDSDB_STORAGE_PATH, log_level=CONF
:param name: the namespace you want to identify this mind instance with
:param root_folder: the folder where you want to store this mind or load from
:param log_level: the desired log level
-
"""
# initialize variables
@@ -567,6 +566,9 @@ def analyse_dataset(self, from_data, sample_margin_of_error=0.005):
light_transaction_metadata['force_categorical_encoding'] = []
light_transaction_metadata['handle_text_as_categorical'] = False
+ light_transaction_metadata['data_types'] = {}
+ light_transaction_metadata['data_subtypes'] = {}
+
Transaction(session=self, light_transaction_metadata=light_transaction_metadata, heavy_transaction_metadata=heavy_transaction_metadata, logger=self.log)
return self.get_model_data(model_name=None, lmd=light_transaction_metadata)
@@ -680,7 +682,9 @@ def learn(self, to_predict, from_data, test_from_data=None, group_by=None, windo
light_transaction_metadata['weight_map'] = {}
light_transaction_metadata['confusion_matrices'] = {}
light_transaction_metadata['empty_columns'] = []
-
+ light_transaction_metadata['data_types'] = {}
+ light_transaction_metadata['data_subtypes'] = {}
+
light_transaction_metadata['equal_accuracy_for_all_output_categories'] = equal_accuracy_for_all_output_categories
light_transaction_metadata['output_categories_importance_dictionary'] = output_categories_importance_dictionary if output_categories_importance_dictionary is not None else {}
@@ -724,7 +728,6 @@ def learn(self, to_predict, from_data, test_from_data=None, group_by=None, windo
else:
light_transaction_metadata['use_selfaware_model'] = True
-
if rebuild_model is False:
old_lmd = {}
for k in light_transaction_metadata: old_lmd[k] = light_transaction_metadata[k]
diff --git a/mindsdb/libs/controllers/transaction.py b/mindsdb/libs/controllers/transaction.py
--- a/mindsdb/libs/controllers/transaction.py
+++ b/mindsdb/libs/controllers/transaction.py
@@ -171,7 +171,7 @@ def _execute_learn(self):
self._call_phase_module(module_name='DataCleaner', stage=0)
self.save_metadata()
-
+
self._call_phase_module(module_name='DataSplitter')
self._call_phase_module(module_name='DataTransformer', input_data=self.input_data)
diff --git a/mindsdb/libs/data_types/data_source.py b/mindsdb/libs/data_types/data_source.py
--- a/mindsdb/libs/data_types/data_source.py
+++ b/mindsdb/libs/data_types/data_source.py
@@ -1,17 +1,23 @@
from mindsdb.libs.data_types.mindsdb_logger import log
+from mindsdb.libs.constants.mindsdb import DATA_TYPES_SUBTYPES, DATA_TYPES, DATA_SUBTYPES
+
class DataSource:
def __init__(self, *args, **kwargs):
self.log = log
+ self.data_types = {}
+ self.data_subtypes = {}
df, col_map = self._setup(*args, **kwargs)
- self.setDF(df, col_map)
+ self._set_df(df, col_map)
self._cleanup()
- def _setup(self, df):
+ def _setup(self, df, **kwargs):
col_map = {}
+
for col in df.columns:
col_map[col] = col
+
return df, col_map
def _cleanup(self):
@@ -21,7 +27,25 @@ def _cleanup(self):
def df(self):
return self._df
- def setDF(self, df, col_map):
+ def set_subtypes(self, data_subtypes):
+ if data_subtypes is not None:
+ for col in data_subtypes:
+ if col not in self._col_map:
+ del data_subtypes[col]
+ log.warning(f'Column {col} not present in your data, ignoring the "{data_subtypes[col]}" subtype you specified for it')
+
+ self.data_subtypes = data_subtypes
+ for col in self.data_subtypes:
+ col_subtype = self.data_subtypes[col]
+ if col_subtype not in [getattr(DATA_SUBTYPES,x) for x in DATA_SUBTYPES.__dict__ if '__' not in x]:
+ raise Exception(f'Invalid data subtype: {col_subtype}')
+
+ for col_type in DATA_TYPES_SUBTYPES.subtypes:
+ if col_subtype in DATA_TYPES_SUBTYPES.subtypes[col_type]:
+ self.data_types[col] = col_type
+
+ def _set_df(self, df, col_map):
+
self._df = df
self._col_map = col_map
diff --git a/mindsdb/libs/phases/data_extractor/data_extractor.py b/mindsdb/libs/phases/data_extractor/data_extractor.py
--- a/mindsdb/libs/phases/data_extractor/data_extractor.py
+++ b/mindsdb/libs/phases/data_extractor/data_extractor.py
@@ -114,12 +114,6 @@ def _validate_input_data_integrity(self):
self.log.error(error)
raise ValueError(error)
- # make sure that the column we are trying to predict is on the input_data
- # else fail, because we cannot predict data we dont have
-
- #if self.transaction.lmd['model_is_time_series'] or self.transaction.lmd['type'] == TRANSACTION_LEARN:
- # ^ How did this even make sense before ? Why did it not crash tests ? Pressumably because the predict col was loaded into `input_data` as an empty col
-
if self.transaction.lmd['type'] == TRANSACTION_LEARN:
for col_target in self.transaction.lmd['predict_columns']:
if col_target not in self.transaction.input_data.columns:
@@ -130,6 +124,12 @@ def _validate_input_data_integrity(self):
raise ValueError(err)
return
+ def _set_user_data_subtypes(self):
+ if 'from_data' in self.transaction.hmd and self.transaction.hmd['from_data'] is not None:
+ for col in self.transaction.hmd['from_data'].data_subtypes:
+ self.transaction.lmd['data_types'][col] = self.transaction.hmd['from_data'].data_types[col]
+ self.transaction.lmd['data_subtypes'][col] = self.transaction.hmd['from_data'].data_subtypes[col]
+
def run(self):
# --- Dataset gets randomized or sorted (if timeseries) --- #
result = self._get_prepared_input_df()
@@ -141,6 +141,8 @@ def run(self):
self.transaction.input_data.data_frame = result
# --- Some information about the dataset gets transplanted into transaction level variables --- #
+ self._set_user_data_subtypes()
+
# --- Some preliminary dataset integrity checks --- #
self._validate_input_data_integrity()
# --- Some preliminary dataset integrity checks --- #
diff --git a/mindsdb/libs/phases/stats_generator/stats_generator.py b/mindsdb/libs/phases/stats_generator/stats_generator.py
--- a/mindsdb/libs/phases/stats_generator/stats_generator.py
+++ b/mindsdb/libs/phases/stats_generator/stats_generator.py
@@ -133,6 +133,14 @@ def _get_column_data_type(self, data, data_frame, col_name):
subtype_dist = {}
additional_info = {'other_potential_subtypes': [], 'other_potential_types': []}
+ if col_name in self.transaction.lmd['data_subtypes']:
+ curr_data_type = self.transaction.lmd['data_types'][col_name]
+ curr_data_subtype = self.transaction.lmd['data_subtypes'][col_name]
+ type_dist[curr_data_type] = len(data)
+ subtype_dist[curr_data_subtype] = len(data)
+ self.log.info(f'Manually setting the types for column {col_name} to {curr_data_type}->{curr_data_subtype}')
+ return curr_data_type, curr_data_subtype, type_dist, subtype_dist, additional_info, 'Column ok'
+
# calculate type_dist
if len(data) < 1:
self.log.warning(f'Column {col_name} has no data in it. Please remove {col_name} from the training file or fill in some of the values !')
| diff --git a/tests/ci_tests/tests.py b/tests/ci_tests/tests.py
--- a/tests/ci_tests/tests.py
+++ b/tests/ci_tests/tests.py
@@ -58,11 +58,7 @@ def basic_test(backend='lightwood',use_gpu=True, run_extra=False, IS_CI_TEST=Fal
# Skip data source tests since installing dependencies is annoying
# @TODO: Figure out a way to make travis install required dependencies on osx
- ctn = False
- for name in ['all_data_sources', 'custom_model']:
- if name in py_file:
- ctn = True
- if ctn:
+ if any(x in py_file for x in ['all_data_sources', 'custom_model']):
continue
code = os.system(f'python3 ../functional_testing/{py_file}')
diff --git a/tests/functional_testing/data_source_setting.py b/tests/functional_testing/data_source_setting.py
new file mode 100644
--- /dev/null
+++ b/tests/functional_testing/data_source_setting.py
@@ -0,0 +1,31 @@
+from mindsdb.libs.data_sources.file_ds import FileDS
+from mindsdb.libs.constants.mindsdb import DATA_TYPES, DATA_SUBTYPES
+
+
+data_source = FileDS('https://raw.githubusercontent.com/mindsdb/mindsdb-examples/master/benchmarks/german_credit_data/processed_data/test.csv')
+data_source.set_subtypes({})
+
+data_source_mod = FileDS('https://raw.githubusercontent.com/mindsdb/mindsdb-examples/master/benchmarks/german_credit_data/processed_data/test.csv')
+data_source_mod.set_subtypes({'credit_usage': 'Int', 'Average_Credit_Balance': 'Text','existing_credits': 'Binary Category'})
+
+import mindsdb
+analysis = mindsdb.Predictor('analyzer1').analyse_dataset(data_source)
+analysis_mod = mindsdb.Predictor('analyzer2').analyse_dataset(data_source_mod)
+
+a1 = analysis['data_analysis_v2']
+a2 = analysis_mod['data_analysis_v2']
+assert(len(a1) == len(a2))
+assert(a1['over_draft']['typing']['data_type'] == a2['over_draft']['typing']['data_type'])
+
+assert(a1['credit_usage']['typing']['data_type'] == a2['credit_usage']['typing']['data_type'])
+assert(a1['credit_usage']['typing']['data_subtype'] != a2['credit_usage']['typing']['data_subtype'])
+assert(a2['credit_usage']['typing']['data_subtype'] == DATA_SUBTYPES.INT)
+
+assert(a1['Average_Credit_Balance']['typing']['data_type'] != a2['Average_Credit_Balance']['typing']['data_type'])
+assert(a1['Average_Credit_Balance']['typing']['data_subtype'] != a2['Average_Credit_Balance']['typing']['data_subtype'])
+assert(a2['Average_Credit_Balance']['typing']['data_subtype'] == DATA_SUBTYPES.TEXT)
+assert(a2['Average_Credit_Balance']['typing']['data_type'] == DATA_TYPES.SEQUENTIAL)
+
+assert(a1['existing_credits']['typing']['data_type'] == a2['existing_credits']['typing']['data_type'])
+assert(a1['existing_credits']['typing']['data_subtype'] != a2['existing_credits']['typing']['data_subtype'])
+assert(a2['existing_credits']['typing']['data_subtype'] == DATA_SUBTYPES.SINGLE)
| Support user-decided type
Allow the user to configure the data type/subtype, for now do it via an unstable param, in the future data skillet might be responsible for providing these (so they will essentially be part of the data source).
| 2020-06-02T11:55:54Z | [] | [] |
|
mindsdb/mindsdb | 505 | mindsdb__mindsdb-505 | [
"487"
] | 9f4703418a2f4f5de8563ede32e3fdb232a61b85 | diff --git a/mindsdb/__about__.py b/mindsdb/__about__.py
--- a/mindsdb/__about__.py
+++ b/mindsdb/__about__.py
@@ -1,6 +1,6 @@
__title__ = 'MindsDB'
__package_name__ = 'mindsdb'
-__version__ = '1.26.1'
+__version__ = '1.26.2'
__description__ = "MindsDB's goal is to make it very simple for developers to use the power of artificial neural networks in their projects."
__email__ = "[email protected]"
__author__ = 'MindsDB Inc'
diff --git a/mindsdb/libs/data_types/transaction_data.py b/mindsdb/libs/data_types/transaction_data.py
--- a/mindsdb/libs/data_types/transaction_data.py
+++ b/mindsdb/libs/data_types/transaction_data.py
@@ -6,4 +6,5 @@ def __init__(self):
self.train_df = None
self.test_df = None
self.validation_df = None
+ self.sample_df = None
self.columns = []
diff --git a/mindsdb/libs/phases/data_analyzer/data_analyzer.py b/mindsdb/libs/phases/data_analyzer/data_analyzer.py
--- a/mindsdb/libs/phases/data_analyzer/data_analyzer.py
+++ b/mindsdb/libs/phases/data_analyzer/data_analyzer.py
@@ -356,10 +356,7 @@ def run(self, input_data):
stats_v2 = self.transaction.lmd['stats_v2']
col_data_dict = {}
- sample_df = sample_data(input_data.data_frame,
- self.transaction.lmd['sample_margin_of_error'],
- self.transaction.lmd['sample_confidence_level'],
- self.log)
+ sample_df = input_data.sample_df
for col_name in self.transaction.lmd['empty_columns']:
stats_v2[col_name] = {}
diff --git a/mindsdb/libs/phases/type_deductor/type_deductor.py b/mindsdb/libs/phases/type_deductor/type_deductor.py
--- a/mindsdb/libs/phases/type_deductor/type_deductor.py
+++ b/mindsdb/libs/phases/type_deductor/type_deductor.py
@@ -244,13 +244,14 @@ def run(self, input_data):
stats = defaultdict(dict)
stats_v2 = defaultdict(dict)
- # Really bad that these parameters are implicitly passed through lmd
- # Perhaps sampling can be moved somewhere upwards,
- # so that it can be reused by all downstream phases?
- sample_df = sample_data(input_data.data_frame,
- self.transaction.lmd['sample_margin_of_error'],
- self.transaction.lmd['sample_confidence_level'],
- self.log)
+ self.transaction.input_data.sample_df = sample_data(
+ input_data.data_frame,
+ self.transaction.lmd['sample_margin_of_error'],
+ self.transaction.lmd['sample_confidence_level'],
+ self.log
+ )
+
+ sample_df = self.transaction.input_data.sample_df
for col_name in sample_df.columns.values:
col_data = sample_df[col_name].dropna()
| diff --git a/tests/unit_tests/libs/phases/data_analyzer/test_data_analyzer.py b/tests/unit_tests/libs/phases/data_analyzer/test_data_analyzer.py
--- a/tests/unit_tests/libs/phases/data_analyzer/test_data_analyzer.py
+++ b/tests/unit_tests/libs/phases/data_analyzer/test_data_analyzer.py
@@ -76,6 +76,7 @@ def test_data_analysis(self, transaction, lmd):
input_data = TransactionData()
input_data.data_frame = input_dataframe
+ input_data.sample_df = input_dataframe.iloc[n_points // 2:]
data_analyzer.run(input_data)
stats_v2 = lmd['stats_v2']
@@ -114,6 +115,7 @@ def test_empty_values(self, transaction, lmd):
input_dataframe['numeric_int'].iloc[::2] = None
input_data = TransactionData()
input_data.data_frame = input_dataframe
+ input_data.sample_df = input_dataframe.iloc[n_points // 2:]
data_analyzer.run(input_data)
stats_v2 = lmd['stats_v2']
| Sampling of data is done twice in `TypeDeductor` and `DataAnalyzer`
**Is your feature request related to a problem? Please describe.**
Both phases do a step of sampling data. Some time could be saved by computing it only once.
E.g. here:
https://github.com/mindsdb/mindsdb/blob/master/mindsdb/libs/phases/type_deductor/type_deductor.py#L249
**Describe the solution you'd like**
I am not familiar enough with the codebase to see which solution is best.
**Describe alternatives you've considered**
I see the following options:
* Cache the sample if it was already computed (can be done inside the sampling function or using a decorator)
* Do it as a separate phase and keep the sample in `transaction.heavy_meta_data`
**Additional context**
@maximlopin made an interesting suggestion on caching it inside a wrapper over `pd.DataFrame`:
https://github.com/mindsdb/mindsdb/pull/475#issuecomment-641952668
| Hmh, we have an entitiy called `input_data` which already acts as a joint container for the dataframes (test, validation and train) which get created later on.
We could add a 4th element to the input_data, called `sample_set` and populate it during the `TypeInference` phase, probably the easiest and cleanest solution. | 2020-06-18T07:19:21Z | [] | [] |
mindsdb/mindsdb | 554 | mindsdb__mindsdb-554 | [
"550",
"550"
] | 398a3ed2c1320cbf8842b05e53b8651d46c7078b | diff --git a/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packet.py b/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packet.py
--- a/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packet.py
+++ b/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packet.py
@@ -76,8 +76,7 @@ def get(self):
break
count_header = int(packet_string[3])
body += self.mysql_socket.recv(len_header)
- self.session.logging.info('Got packet')
- self.session.logging.info(body)
+ self.session.logging.info(f'Got packet: {str(body)}')
self.proxy.count = int(count_header) + 1
self.setup(len(body), count_header, body)
return True
diff --git a/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packets/switch_auth_response_packet.py b/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packets/switch_auth_response_packet.py
--- a/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packets/switch_auth_response_packet.py
+++ b/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packets/switch_auth_response_packet.py
@@ -6,8 +6,10 @@ def setup(self, length=0, count_header=1, body=''):
length = len(body)
if length == 0:
+ self.password = b''
return
self.enc_password = Datum('string<EOF>') # 0x04
buffer = body
buffer = self.enc_password.setFromBuff(buffer)
+ self.password = self.enc_password.value
diff --git a/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/mindsdb_datanode.py b/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/mindsdb_datanode.py
--- a/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/mindsdb_datanode.py
+++ b/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/mindsdb_datanode.py
@@ -97,17 +97,17 @@ def select(self, table, columns=None, where=None, where_data=None, order_by=None
res = self.mindsdb_native.predict(name=table, when=new_where, when_data=where_data)
predicted_columns = self.mindsdb_native.get_model_data(name=table)['predict']
- length = len(res.data[predicted_columns[0]])
+ length = len(res._data[predicted_columns[0]])
data = []
- keys = [x for x in list(res.data.keys()) if x in columns]
+ keys = [x for x in list(res._data.keys()) if x in columns]
confidence_keys = [f'{x}_confidence' for x in predicted_columns]
for i in range(length):
row = {}
for key in keys:
- row[key] = res.data[key][i]
+ row[key] = res._data[key][i]
for key in confidence_keys:
- row['$' + key] = res.data[key][i]
+ row['$' + key] = res._data[key][i]
data.append(row)
if select_data_query is not None:
diff --git a/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py b/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py
--- a/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py
+++ b/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py
@@ -137,7 +137,10 @@ def switch_auth(method='mysql_native_password'):
self.packet(SwitchOutPacket, seed=self.salt, method=method).send()
switch_out_answer = self.packet(SwitchOutResponse)
switch_out_answer.get()
- return switch_out_answer.enc_password.value
+ password = switch_out_answer.password
+ if method == 'mysql_native_password' and len(password) == 0:
+ password = handshake_resp.scramble_func('', self.salt)
+ return password
def get_fast_auth_password():
log.info('Asking for fast auth password')
@@ -191,36 +194,37 @@ def get_fast_auth_password():
username = handshake_resp.username.value.decode()
- if orig_username == username and HARDCODED_PASSWORD == '':
- log.info(f'Check auth, user={username}, ssl={self.session.is_ssl}, auth_method={client_auth_plugin}: '
- 'empty password')
- password = ''
- elif (DEFAULT_AUTH_METHOD not in client_auth_plugin) or \
- self.session.is_ssl is False and 'caching_sha2_password' in client_auth_plugin:
- new_method = 'caching_sha2_password' if 'caching_sha2_password' in client_auth_plugin else 'mysql_native_password'
-
- if new_method == 'caching_sha2_password' and self.session.is_ssl is False:
- log.info(f'Check auth, user={username}, ssl={self.session.is_ssl}, auth_method={client_auth_plugin}: '
- 'error: cant switch to caching_sha2_password without SSL')
- self.packet(ErrPacket, err_code=ERR.ER_PASSWORD_NO_MATCH, msg=f'caching_sha2_password without SSL not supported').send()
- return False
+ if client_auth_plugin != DEFAULT_AUTH_METHOD:
+ if client_auth_plugin == 'mysql_native_password' and \
+ orig_password == '' and len(handshake_resp.enc_password.value) == 0:
+ password = orig_password
+ else:
+ new_method = 'caching_sha2_password' if client_auth_plugin == 'caching_sha2_password' else 'mysql_native_password'
- log.info(f'Check auth, user={username}, ssl={self.session.is_ssl}, auth_method={client_auth_plugin}: '
- f'switch auth method to {new_method}')
- password = switch_auth(new_method)
+ if new_method == 'caching_sha2_password' and self.session.is_ssl is False:
+ log.info(f'Check auth, user={username}, ssl={self.session.is_ssl}, auth_method={client_auth_plugin}: '
+ 'error: cant switch to caching_sha2_password without SSL')
+ self.packet(ErrPacket, err_code=ERR.ER_PASSWORD_NO_MATCH, msg=f'caching_sha2_password without SSL not supported').send()
+ return False
- if new_method == 'caching_sha2_password':
- password = get_fast_auth_password()
- else:
- orig_password = orig_password_hash
+ log.info(f'Check auth, user={username}, ssl={self.session.is_ssl}, auth_method={client_auth_plugin}: '
+ f'switch auth method to {new_method}')
+ password = switch_auth(new_method)
+ if new_method == 'caching_sha2_password':
+ password = get_fast_auth_password()
+ else:
+ orig_password = orig_password_hash
+ elif orig_username == username and HARDCODED_PASSWORD == '':
+ log.info(f'Check auth, user={username}, ssl={self.session.is_ssl}, auth_method={client_auth_plugin}: '
+ 'empty password')
+ password = ''
elif 'caching_sha2_password' in client_auth_plugin:
log.info(f'Check auth, user={username}, ssl={self.session.is_ssl}, auth_method={client_auth_plugin}: '
'check auth using caching_sha2_password')
password = get_fast_auth_password()
orig_password = HARDCODED_PASSWORD
-
elif 'mysql_native_password' in client_auth_plugin:
log.info(f'Check auth, user={username}, ssl={self.session.is_ssl}, auth_method={client_auth_plugin}: '
'check auth using mysql_native_password')
@@ -892,7 +896,3 @@ def startProxy(config):
# interrupt the program with Ctrl-C
log.info('Waiting for incoming connections...')
server.serve_forever()
-
-
-if __name__ == "__main__":
- MysqlProxy.startProxy()
diff --git a/mindsdb/integrations/clickhouse/clickhouse.py b/mindsdb/integrations/clickhouse/clickhouse.py
--- a/mindsdb/integrations/clickhouse/clickhouse.py
+++ b/mindsdb/integrations/clickhouse/clickhouse.py
@@ -56,10 +56,13 @@ def _query(self, query):
response = requests.post(f'http://{host}:{port}', data=query, params=params)
+ if response.status_code != 200:
+ raise Exception(f'Error: {response.content}\nQuery:{query}')
+
return response
def setup(self):
- self._query('DROP DATABASE IF EXISTS MINDSB')
+ self._query('DROP DATABASE IF EXISTS mindsdb')
self._query('CREATE DATABASE IF NOT EXISTS mindsdb')
diff --git a/mindsdb/interfaces/database/database.py b/mindsdb/interfaces/database/database.py
--- a/mindsdb/interfaces/database/database.py
+++ b/mindsdb/interfaces/database/database.py
@@ -25,9 +25,8 @@ def unregister_predictor(self, name):
for integration in self.integration_arr: integration.unregister_predictor(name)
def check_connections(self):
- broken_connections = []
+ connections = {}
for integration in self.integration_arr:
- if not integration.check_connection():
- broken_connections.append(integration.name)
+ connections[integration.name] = integration.check_connection()
- return broken_connections
+ return connections
diff --git a/mindsdb/interfaces/native/mindsdb.py b/mindsdb/interfaces/native/mindsdb.py
--- a/mindsdb/interfaces/native/mindsdb.py
+++ b/mindsdb/interfaces/native/mindsdb.py
@@ -68,12 +68,12 @@ def get_models(self, status='any'):
def delete_model(self, name):
F.delete_model(name)
- dbw.unregister_predictor(name)
+ self.dbw.unregister_predictor(name)
def rename_model(self, name, new_name):
- dbw.unregister_predictor(name)
+ self.dbw.unregister_predictor(name)
F.rename_model(name, new_name)
- dbw.register_predictors(new_name)
+ self.dbw.register_predictors(new_name)
def load_model(self, fpath):
F.load_model(model_archive_path=fpath)
| diff --git a/tests/docker/cli.sh b/tests/docker/cli.sh
--- a/tests/docker/cli.sh
+++ b/tests/docker/cli.sh
@@ -14,9 +14,9 @@ if [ $1 = "mariadb" ]; then
wget --no-check-certificate 'https://docs.google.com/uc?export=download&id=1uH2yKnvLBCpDpQnhOmm_Us988-AcKkLw' -O mariadb/jars/wrapper/JdbcInterface.jar
fi
mkdir -p mariadb/connectData/
- docker-compose up mariadb-test
+ docker-compose up mariadb
elif [ $1 = "mariadb-stop" ]; then
- docker-compose stop mariadb-test
+ docker-compose stop mariadb
elif [ $1 = "clickhouse" ]; then
mkdir -p clickhouse/database/
docker-compose up clickhouse
diff --git a/tests/docker/docker-compose.yml b/tests/docker/docker-compose.yml
--- a/tests/docker/docker-compose.yml
+++ b/tests/docker/docker-compose.yml
@@ -4,10 +4,10 @@
version: "3"
services:
mariadb:
- build: .
+ build: mariadb/.
network_mode: host
- image: mariadb-connect:1
- container_name: connect
+ image: mariadb-test:1.0
+ container_name: mariadb-test
command: --default-authentication-plugin=caching_sha2_password
ports:
- 3306:3306
@@ -15,11 +15,13 @@ services:
MYSQL_ALLOW_EMPTY_PASSWORD: "yes"
MYSQL_ROOT_PASSWORD: "root"
volumes:
- - ./connectData:/var/lib/mysql
- - ./connect.cnf:/etc/mysql/mariadb.conf.d/connect.cnf
+ - ./mariadb/connectData:/var/lib/mysql
+ - ./mariadb/connect.cnf:/etc/mysql/mariadb.conf.d/connect.cnf
clickhouse:
network_mode: host
image: yandex/clickhouse-server:20.1
+ volumes:
+ - ./clickhouse/database:/var/lib/clickhouse
ports:
- 8123:8123
- 9000:9000
diff --git a/tests/integration_tests/common.py b/tests/integration_tests/common.py
--- a/tests/integration_tests/common.py
+++ b/tests/integration_tests/common.py
@@ -1,5 +1,8 @@
import psutil
import time
+import pathlib
+import os
+import json
def is_port_in_use(port_num):
portsinuse = []
@@ -17,3 +20,27 @@ def wait_port(port_num, timeout):
in_use = is_port_in_use(port_num)
return in_use
+
+def prepare_config(config):
+ for key in config._config['integrations'].keys():
+ config._config['integrations'][key]['enabled'] = key == 'default_mariadb'
+
+ TEMP_DIR = pathlib.Path(__file__).parent.absolute().joinpath('../temp/').resolve()
+ TEMP_DIR.mkdir(parents=True, exist_ok=True)
+
+ config.merge({
+ 'interface': {
+ 'datastore': {
+ 'storage_dir': str(TEMP_DIR.joinpath('datastore/'))
+ },
+ 'mindsdb_native': {
+ 'storage_dir': str(TEMP_DIR.joinpath('predictors/'))
+ }
+ }
+ })
+
+ temp_config_path = str(TEMP_DIR.joinpath('config.json').resolve())
+ with open(temp_config_path, 'wt') as f:
+ f.write(json.dumps(config._config))
+
+ return temp_config_path
diff --git a/tests/integration_tests/test_mariadb.py b/tests/integration_tests/test_mariadb.py
--- a/tests/integration_tests/test_mariadb.py
+++ b/tests/integration_tests/test_mariadb.py
@@ -13,9 +13,9 @@
from mindsdb.interfaces.native.mindsdb import MindsdbNative
from mindsdb.utilities.config import Config
-from mindsdb.interfaces.mariadb.mariadb import Mariadb
+from mindsdb.interfaces.database.database import DatabaseWrapper
-from common import wait_port
+from common import wait_port, prepare_config
TEST_CONFIG = '/path_to/config.json'
@@ -132,6 +132,7 @@ def setUpClass(cls):
test_tables = query('show tables from test')
test_tables = [x[0] for x in test_tables]
if TEST_DATA_TABLE not in test_tables:
+ print('creating test data table...')
query(f'''
CREATE TABLE test.{TEST_DATA_TABLE} (
number_of_rooms int,
@@ -174,6 +175,7 @@ def setUpClass(cls):
{rental_price}
)''')
i += 1
+ print('done')
def test_1_initial_state(self):
print(f'\nExecuting {inspect.stack()[0].function}')
@@ -297,14 +299,14 @@ def test_7_delete_predictor_by_delete_statement(self):
def wait_mysql(timeout):
global config
- m = Mariadb(config)
+ m = DatabaseWrapper(config)
start_time = time.time()
- connected = m.check_connection()
- while connected is False and (time.time() - start_time) < timeout:
+ connected = m.check_connections()['default_mariadb']
+ while not connected and (time.time() - start_time) < timeout:
time.sleep(2)
- connected = m.check_connection()
+ connected = m.check_connections()['default_mariadb']
return connected
@@ -318,31 +320,7 @@ def stop_mariadb():
maria_sp.wait()
if __name__ == "__main__":
- for key in config._config['integrations'].keys():
- config._config['integrations'][key]['enabled'] = key == 'default_mariadb'
-
- TEMP_DIR = pathlib.Path(__file__).parent.absolute().joinpath('../temp/').resolve()
-
- config.merge({
- 'interface': {
- 'datastore': {
- 'storage_dir': str(pathlib.Path(TEMP_DIR).joinpath('datastore/'))
- },
- 'mindsdb_native': {
- 'storage_dir': str(pathlib.Path(TEMP_DIR).joinpath('predictors/'))
- }
- }
- })
-
- if not os.path.isdir(config['interface']['datastore']['storage_dir']):
- os.makedirs(config['interface']['datastore']['storage_dir'])
-
- if not os.path.isdir(config['interface']['mindsdb_native']['storage_dir']):
- os.makedirs(config['interface']['mindsdb_native']['storage_dir'])
-
- temp_config_path = str(TEMP_DIR.joinpath('config.json').resolve())
- with open(temp_config_path, 'wt') as f:
- f.write(json.dumps(config._config))
+ temp_config_path = prepare_config(config)
maria_sp = subprocess.Popen(
['./cli.sh', 'mariadb'],
@@ -353,20 +331,21 @@ def stop_mariadb():
atexit.register(stop_mariadb)
maria_ready = wait_mysql(START_TIMEOUT)
- sp = subprocess.Popen(
- ['python3', '-m', 'mindsdb', '--api', 'mysql', '--config', temp_config_path],
- stdout=subprocess.DEVNULL,
- stderr=subprocess.DEVNULL
- )
- atexit.register(sp.kill)
- port_num = config['api']['mysql']['port']
- api_ready = wait_port(port_num, START_TIMEOUT)
+ if maria_ready:
+ sp = subprocess.Popen(
+ ['python3', '-m', 'mindsdb', '--api', 'mysql', '--config', temp_config_path],
+ stdout=subprocess.DEVNULL,
+ stderr=subprocess.DEVNULL
+ )
+ atexit.register(sp.kill)
+ port_num = config['api']['mysql']['port']
+ api_ready = maria_ready and wait_port(port_num, START_TIMEOUT)
try:
if maria_ready is False or api_ready is False:
print(f'Failed by timeout. MariaDB started={maria_ready}, MindsDB started={api_ready}')
raise Exception()
- unittest.main()
+ unittest.main(failfast=True)
print('Tests passed !')
except Exception as e:
print('Tests Failed !')
| Issues connection from Mariadb
Inserting a predictor causes the following error:
```
2020-07-01 00:32:38,356 - INFO - handle new incoming connection
2020-07-01 00:32:38,356 - INFO - New connection [127.0.0.1:43208]
2020-07-01 00:32:38,356 - INFO - send HandshakePacket
2020-07-01 00:32:38,357 - INFO - Sending packet: HandshakePacket
2020-07-01 00:32:38,357 - INFO - Get packet: HandshakeResponsePacket
2020-07-01 00:32:38,357 - INFO - Got packet
2020-07-01 00:32:38,357 - INFO - b'\r\xa2:\x80\x00\x00\x00\x01-\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00mindsdb\x00\x00mindsdb\x00mysql_native_password\x00u\x03_os\x10debian-linux-gnu\x0c_client_name\x08libmysql\x04_pid\x041425\x0f_client_version\x0710.3.22\x0c_server_host\t127.0.0.1\t_platform\x06x86_64'
2020-07-01 00:32:38,357 - INFO - Check auth, user=mindsdb, ssl=False, auth_method=mysql_native_password: empty password
2020-07-01 00:32:38,357 - INFO - Check auth, user=mindsdb, ssl=False, auth_method=mysql_native_password: connecting to database mindsdb
2020-07-01 00:32:38,357 - INFO - Check auth, user=mindsdb: Ok
2020-07-01 00:32:38,357 - INFO - Sending packet: OkPacket
2020-07-01 00:32:38,357 - INFO - Get packet: CommandPacket
2020-07-01 00:32:38,357 - WARNING - Packet with less than 4 bytes in length: b''
2020-07-01 00:32:38,357 - INFO - Session closed by client
```
Issues connection from Mariadb
Inserting a predictor causes the following error:
```
2020-07-01 00:32:38,356 - INFO - handle new incoming connection
2020-07-01 00:32:38,356 - INFO - New connection [127.0.0.1:43208]
2020-07-01 00:32:38,356 - INFO - send HandshakePacket
2020-07-01 00:32:38,357 - INFO - Sending packet: HandshakePacket
2020-07-01 00:32:38,357 - INFO - Get packet: HandshakeResponsePacket
2020-07-01 00:32:38,357 - INFO - Got packet
2020-07-01 00:32:38,357 - INFO - b'\r\xa2:\x80\x00\x00\x00\x01-\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00mindsdb\x00\x00mindsdb\x00mysql_native_password\x00u\x03_os\x10debian-linux-gnu\x0c_client_name\x08libmysql\x04_pid\x041425\x0f_client_version\x0710.3.22\x0c_server_host\t127.0.0.1\t_platform\x06x86_64'
2020-07-01 00:32:38,357 - INFO - Check auth, user=mindsdb, ssl=False, auth_method=mysql_native_password: empty password
2020-07-01 00:32:38,357 - INFO - Check auth, user=mindsdb, ssl=False, auth_method=mysql_native_password: connecting to database mindsdb
2020-07-01 00:32:38,357 - INFO - Check auth, user=mindsdb: Ok
2020-07-01 00:32:38,357 - INFO - Sending packet: OkPacket
2020-07-01 00:32:38,357 - INFO - Get packet: CommandPacket
2020-07-01 00:32:38,357 - WARNING - Packet with less than 4 bytes in length: b''
2020-07-01 00:32:38,357 - INFO - Session closed by client
```
| 2020-07-01T16:30:46Z | [] | [] |
|
mindsdb/mindsdb | 559 | mindsdb__mindsdb-559 | [
"547"
] | 449b356c46ecac197bd91ba2e0c830e86708ad28 | diff --git a/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packet.py b/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packet.py
--- a/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packet.py
+++ b/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packet.py
@@ -72,9 +72,9 @@ def get(self):
return False
break
len_header = struct.unpack('i', packet_string[:3] + b'\x00')[0]
+ count_header = int(packet_string[3])
if len_header == 0:
break
- count_header = int(packet_string[3])
body += self.mysql_socket.recv(len_header)
self.session.logging.info(f'Got packet: {str(body)}')
self.proxy.count = int(count_header) + 1
diff --git a/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/mindsdb_datanode.py b/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/mindsdb_datanode.py
--- a/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/mindsdb_datanode.py
+++ b/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/mindsdb_datanode.py
@@ -1,3 +1,5 @@
+import json
+
import pandas
from mindsdb.api.mysql.mysql_proxy.datahub.datanodes.datanode import DataNode
@@ -30,9 +32,14 @@ def getTableColumns(self, table):
columns = []
columns += [x['column_name'] for x in model['data_analysis']['input_columns_metadata']]
columns += [x['column_name'] for x in model['data_analysis']['target_columns_metadata']]
- columns += [f"${x['column_name']}_confidence" for x in model['data_analysis']['target_columns_metadata']]
+ for col in model['predict']:
+ columns += [f"{col}_confidence"]
+ if model['data_analysis_v2'][col]['typing']['data_type'] == 'Numeric':
+ columns += [f"{col}_min", f"{col}_max"]
+ columns += [f"{col}_explain"]
+
# TODO this should be added just for clickhouse queries
- columns += ['$select_data_query']
+ columns += ['select_data_query']
return columns
def _select_predictors(self):
@@ -54,9 +61,9 @@ def select(self, table, columns=None, where=None, where_data=None, order_by=None
return self._select_predictors()
select_data_query = None
- if came_from is not None and '$select_data_query' in where:
- select_data_query = where['$select_data_query']['$eq']
- del where['$select_data_query']
+ if came_from is not None and 'select_data_query' in where:
+ select_data_query = where['select_data_query']['$eq']
+ del where['select_data_query']
'''
@TODO (Urgent~ish)
@@ -96,23 +103,32 @@ def select(self, table, columns=None, where=None, where_data=None, order_by=None
res = self.mindsdb_native.predict(name=table, when=new_where, when_data=where_data)
- predicted_columns = self.mindsdb_native.get_model_data(name=table)['predict']
- length = len(res._data[predicted_columns[0]])
+ model = self.mindsdb_native.get_model_data(name=table)
+ predicted_columns = model['predict']
data = []
keys = [x for x in list(res._data.keys()) if x in columns]
- confidence_keys = [f'{x}_confidence' for x in predicted_columns]
+ min_max_keys = []
+ for col in predicted_columns:
+ if model['data_analysis_v2'][col]['typing']['data_type'] == 'Numeric':
+ min_max_keys.append(col)
+
+ length = len(res._data[predicted_columns[0]])
for i in range(length):
row = {}
for key in keys:
row[key] = res._data[key][i]
- for key in confidence_keys:
- row['$' + key] = res._data[key][i]
+ for key in predicted_columns:
+ row[key + '_confidence'] = res[i].explanation[key]['confidence']
+ row[key + '_explain'] = json.dumps(res[i].explanation[key])
+ for key in min_max_keys:
+ row[key + '_min'] = res[i].explanation[key]['confidence_interval'][0]
+ row[key + '_max'] = res[i].explanation[key]['confidence_interval'][-1]
data.append(row)
if select_data_query is not None:
for row in data:
- row['$select_data_query'] = select_data_query
+ row['select_data_query'] = select_data_query
if new_where is not None and len(new_where.keys()) > 0:
columns = self.getTableColumns(table)
diff --git a/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py b/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py
--- a/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py
+++ b/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py
@@ -194,11 +194,11 @@ def get_fast_auth_password():
username = handshake_resp.username.value.decode()
-
if client_auth_plugin != DEFAULT_AUTH_METHOD:
if client_auth_plugin == 'mysql_native_password' and \
orig_password == '' and len(handshake_resp.enc_password.value) == 0:
- password = orig_password
+ switch_auth('mysql_native_password')
+ password = ''
else:
new_method = 'caching_sha2_password' if client_auth_plugin == 'caching_sha2_password' else 'mysql_native_password'
diff --git a/mindsdb/integrations/clickhouse/clickhouse.py b/mindsdb/integrations/clickhouse/clickhouse.py
--- a/mindsdb/integrations/clickhouse/clickhouse.py
+++ b/mindsdb/integrations/clickhouse/clickhouse.py
@@ -98,9 +98,13 @@ def register_predictors(self, model_data_arr):
name = model_meta['name']
stats = model_meta['data_analysis']
columns_sql = ','.join(self._to_clickhouse_table(stats))
- columns_sql += ',`$select_data_query` Nullable(String)'
+ columns_sql += ',`select_data_query` Nullable(String)'
for col in model_meta['predict_cols']:
- columns_sql += f',`${col}_confidence` Nullable(Float64)'
+ columns_sql += f',`{col}_confidence` Nullable(Float64)'
+ if model_meta['data_analysis'][col]['typing']['data_type'] == 'Numeric':
+ columns_sql += f',`{col}_min` Nullable(Float64)'
+ columns_sql += f',`{col}_max` Nullable(Float64)'
+ columns_sql += f',`{col}_explain` Nullable(String)'
msqyl_conn = self.config['api']['mysql']['host'] + ':' + str(self.config['api']['mysql']['port'])
msqyl_user = self.config['api']['mysql']['user']
diff --git a/mindsdb/integrations/mariadb/mariadb.py b/mindsdb/integrations/mariadb/mariadb.py
--- a/mindsdb/integrations/mariadb/mariadb.py
+++ b/mindsdb/integrations/mariadb/mariadb.py
@@ -105,9 +105,13 @@ def register_predictors(self, model_data_arr):
name = model_meta['name']
stats = model_meta['data_analysis']
columns_sql = ','.join(self._to_mariadb_table(stats))
- columns_sql += ',`$select_data_query` varchar(500)'
+ columns_sql += ',`select_data_query` varchar(500)'
for col in model_meta['predict_cols']:
- columns_sql += f',`${col}_confidence` double'
+ columns_sql += f',`{col}_confidence` double'
+ if model_meta['data_analysis'][col]['typing']['data_type'] == 'Numeric':
+ columns_sql += f',`{col}_min` double'
+ columns_sql += f',`{col}_max` double'
+ columns_sql += f',`{col}_explain` varchar(500)'
connect = self._get_connect_string(f'{name}_mariadb')
| diff --git a/tests/integration_tests/test_mariadb.py b/tests/integration_tests/test_mariadb.py
--- a/tests/integration_tests/test_mariadb.py
+++ b/tests/integration_tests/test_mariadb.py
@@ -228,7 +228,8 @@ def test_3_query_predictor(self):
print(f'\nExecuting {inspect.stack()[0].function}')
res = query(f"""
select
- rental_price, location, sqft, $rental_price_confidence, number_of_rooms
+ rental_price, location, sqft, number_of_rooms,
+ rental_price_confidence, rental_price_min, rental_price_max, rental_price_explain
from
mindsdb.{TEST_PREDICTOR_NAME} where sqft=1000;
""", as_dict=True)
@@ -241,7 +242,10 @@ def test_3_query_predictor(self):
self.assertTrue(res['rental_price'] is not None and res['rental_price'] != 'None')
self.assertTrue(res['location'] is not None and res['location'] != 'None')
self.assertTrue(res['sqft'] == 1000)
- self.assertIsInstance(res['$rental_price_confidence'], float)
+ self.assertIsInstance(res['rental_price_confidence'], float)
+ self.assertIsInstance(res['rental_price_min'], float)
+ self.assertIsInstance(res['rental_price_max'], float)
+ self.assertIsInstance(res['rental_price_explain'], str)
self.assertTrue(res['number_of_rooms'] == 'None' or res['number_of_rooms'] is None)
def test_4_range_query(self):
@@ -249,9 +253,10 @@ def test_4_range_query(self):
results = query(f"""
select
- rental_price, location, sqft, $rental_price_confidence, number_of_rooms
+ rental_price, location, sqft, number_of_rooms,
+ rental_price_confidence, rental_price_min, rental_price_max, rental_price_explain
from
- mindsdb.{TEST_PREDICTOR_NAME} where $select_data_query='select * from test.{TEST_DATA_TABLE} limit 3';
+ mindsdb.{TEST_PREDICTOR_NAME} where select_data_query='select * from test.{TEST_DATA_TABLE} limit 3';
""", as_dict=True)
print('check result')
@@ -259,7 +264,10 @@ def test_4_range_query(self):
for res in results:
self.assertTrue(res['rental_price'] is not None and res['rental_price'] != 'None')
self.assertTrue(res['location'] is not None and res['location'] != 'None')
- self.assertIsInstance(res['$rental_price_confidence'], float)
+ self.assertIsInstance(res['rental_price_confidence'], float)
+ self.assertIsInstance(res['rental_price_min'], float)
+ self.assertIsInstance(res['rental_price_max'], float)
+ self.assertIsInstance(res['rental_price_explain'], str)
def test_5_delete_predictor_by_command(self):
print(f'\nExecuting {inspect.stack()[0].function}')
| Allow selecting from `.explain`
Discussed during a call
Gist of it is, assume you are prediciting two columns `foo` and `bar`. Assume `foo` is numerical and `bar` is categorical.
1. All the values (including the predicted value) should come from the `.explain` property of the prediction
2. The values we expose should be:
`foo`
`bar`
`foo_confidence`
`bar_confidence`
`foo_max`
`foo_min`
Where min and max are the first and last value of the `confidence_interval` found in `.explain->foo` and `confidence` is also a property of `.explain`
In addition to this, maybe expose two more properties:
`foo_explain` and `bar_explain` that are just strings containing a json representation of `.explain` (which is just a python dictionary).
| 2020-07-03T16:41:53Z | [] | [] |
|
mindsdb/mindsdb | 569 | mindsdb__mindsdb-569 | [
"521"
] | a378325012fc90648988e82bd3871964bcdc427c | diff --git a/mindsdb/__main__.py b/mindsdb/__main__.py
--- a/mindsdb/__main__.py
+++ b/mindsdb/__main__.py
@@ -37,7 +37,7 @@ def close_api_gracefully(p_arr):
print(f'Using configuration file: {config_path}')
config = Config(config_path)
-
+
if args.api is None:
api_arr = [api for api in config['api']]
else:
diff --git a/mindsdb/api/http/namespaces/datasource.py b/mindsdb/api/http/namespaces/datasource.py
--- a/mindsdb/api/http/namespaces/datasource.py
+++ b/mindsdb/api/http/namespaces/datasource.py
@@ -110,7 +110,7 @@ def on_file(file):
file_path = os.path.join(temp_dir_path, data['file'])
else:
file_path = None
-
+
ca.default_store.save_datasource(ds_name, source_type, source, file_path)
os.rmdir(temp_dir_path)
diff --git a/mindsdb/api/http/namespaces/predictor.py b/mindsdb/api/http/namespaces/predictor.py
--- a/mindsdb/api/http/namespaces/predictor.py
+++ b/mindsdb/api/http/namespaces/predictor.py
@@ -129,7 +129,7 @@ def put(self, name):
retrain = None
ds_name = data.get('data_source_name') if data.get('data_source_name') is not None else data.get('from_data')
- from_data = ca.default_store.get_datasource_obj(ds_name)
+ from_data = ca.default_store.get_datasource_obj(ds_name, raw=True)
if retrain is True:
original_name = name
@@ -217,7 +217,7 @@ def post(self, name):
global model_swapping_map
data = request.json
- from_data = ca.default_store.get_datasource_obj(data.get('data_source_name'))
+ from_data = ca.default_store.get_datasource_obj(data.get('data_source_name'), raw=True)
try:
format_flag = data.get('format_flag')
diff --git a/mindsdb/interfaces/database/database.py b/mindsdb/interfaces/database/database.py
--- a/mindsdb/interfaces/database/database.py
+++ b/mindsdb/interfaces/database/database.py
@@ -16,7 +16,15 @@ def __init__(self, config, setup=False):
self.integration_arr.append(Mariadb(config,db_alias))
# Doesn't really matter if we call this multiple times, but it will waste time so ideally don't
if setup:
- for integration in self.integration_arr: integration.setup()
+ working_integrations = []
+ try:
+ for integration in self.integration_arr:
+ integration.setup()
+ working_integrations.append(integration)
+ except Exception as e:
+ print(f'Failed to integrate with a database, error: {e}')
+
+ self.integration_arr = working_integrations
def register_predictors(self, model_data_arr):
for integration in self.integration_arr: integration.register_predictors(model_data_arr)
diff --git a/mindsdb/interfaces/datastore/datastore.py b/mindsdb/interfaces/datastore/datastore.py
--- a/mindsdb/interfaces/datastore/datastore.py
+++ b/mindsdb/interfaces/datastore/datastore.py
@@ -129,7 +129,6 @@ def save_datasource(self, name, source_type, source, file_path=None):
df_with_types = cast_df_columns_types(df, self.get_analysis(df)['data_analysis_v2'])
create_sqlite_db(os.path.join(ds_dir, 'sqlite.db'), df_with_types)
- print(picklable)
with open(os.path.join(ds_dir,'ds.pickle'), 'wb') as fp:
pickle.dump(picklable, fp)
@@ -144,9 +143,9 @@ def save_datasource(self, name, source_type, source, file_path=None):
'columns': [dict(name=x) for x in list(df.keys())]
}, fp)
- return self.get_datasource_obj(name, avoid_crash=True)
+ return self.get_datasource_obj(name, raw=True)
- def get_datasource_obj(self, name, avoid_crash=False):
+ def get_datasource_obj(self, name, raw=False):
ds_meta_dir = os.path.join(self.dir, name)
ds_dir = os.path.join(ds_meta_dir, 'datasource')
ds = None
@@ -155,7 +154,7 @@ def get_datasource_obj(self, name, avoid_crash=False):
#sys.setrecursionlimit(0x100000)
with open(os.path.join(ds_dir,'ds.pickle'), 'rb') as fp:
picklable = pickle.load(fp)
- if avoid_crash:
+ if raw:
return picklable
try:
ds = eval(picklable['class'])(*picklable['args'],**picklable['kwargs'])
diff --git a/mindsdb/interfaces/native/mindsdb.py b/mindsdb/interfaces/native/mindsdb.py
--- a/mindsdb/interfaces/native/mindsdb.py
+++ b/mindsdb/interfaces/native/mindsdb.py
@@ -12,13 +12,16 @@ def __init__(self, config):
self.dbw = DatabaseWrapper(self.config)
def learn(self, name, from_data, to_predict, kwargs={}):
- join_learn_process = kwargs.get('join_learn_process')
+ join_learn_process = kwargs.get('join_learn_process', False)
if 'join_learn_process' in kwargs:
del kwargs['join_learn_process']
+
p = PredictorProcess(name, from_data, to_predict, kwargs, self.config.get_all(), 'learn')
p.start()
if join_learn_process is True:
- model_data = p.join()
+ p.join()
+ if p.exitcode != 0:
+ raise Exception('Learning process failed !')
def predict(self, name, when=None, when_data=None, kwargs={}):
| diff --git a/requirements_test.txt b/requirements_test.txt
new file mode 100644
--- /dev/null
+++ b/requirements_test.txt
@@ -0,0 +1,3 @@
+pytest>=3.3.2
+pytest-randomly>=3.3.1
+pytest-ordering >= 0.6
diff --git a/tests/ci_tests/__init__.py b/tests/ci_tests/__init__.py
deleted file mode 100644
diff --git a/tests/ci_tests/test_endpoints.py b/tests/ci_tests/test_endpoints.py
deleted file mode 100644
--- a/tests/ci_tests/test_endpoints.py
+++ /dev/null
@@ -1,179 +0,0 @@
-import subprocess
-import unittest
-import requests
-import time
-
-class PredictorTest(unittest.TestCase):
-
- def setUp(self):
- pass
-
- def test_put_ds_put_pred(self):
- PRED_NAME = 'test_predictor_12'
- DS_NAME = 'test_ds_12'
-
- DS_URL = 'https://raw.githubusercontent.com/mindsdb/mindsdb-examples/master/benchmarks/home_rentals/dataset/train.csv'
-
- # PUT datasource
- params = {
- 'name': DS_NAME,
- 'source_type': 'url',
- 'source': DS_URL
- }
- url = f'http://localhost:47334/datasources/{DS_NAME}'
- res = requests.put(url, json=params)
- print(res)
- #assert res.status_code == 200
-
- # PUT predictor
- params = {
- 'data_source_name': DS_NAME,
- 'to_predict': 'rental_price'
- }
- url = f'http://localhost:47334/predictors/{PRED_NAME}'
- res = requests.put(url, json=params)
- assert res.status_code == 200
- time.sleep(50)
-
- # HTTP clickhouse interface: try to make a prediction
- where = {
- '`initial_price`': 2200,
- }
-
- query = "SELECT rental_price FROM {} WHERE {} FORMAT JSON".format(
- f'mindsdb.{PRED_NAME}',
- ' AND '.join('{} = {}'.format(k, v) for k, v in where.items())
- )
-
- print(query)
-
- res = requests.post('http://{}:{}'.format(
- 'localhost',
- 8123
- ), data=query)
- assert res.status_code == 200
-
- data = res.json()
- print(data)
- print(data['data'][0])
- assert 'rental_price' in data['data'][0] and data['data'][0]['rental_price'] is not None
-
-'''
-@TODO: Fix these
- def test_predictors(self):
- """
- Call list predictors endpoint
- THEN check the response is success
- """
- response = self.app.get('/predictors/')
- assert response.status_code == 200
-
- def test_columns_predictor_not_found(self):
- """
- Call unexisting predictor to analyse_dataset
- then check the response is NOT FOUND
- """
- response = self.app.get('/predictors/dummy_predictor/columns')
- assert response.status_code == 404
-
- def test_predictor_not_found(self):
- """
- Call unexisting predictor
- then check the response is NOT FOUND
- """
- response = self.app.get('/predictors/dummy_predictor')
- assert response.status_code == 404
-
- def test_analyse_invalid_datasource(self):
- """
- Don't provide datasource
- then check the response is No valid datasource given
- """
- response = self.app.get('/predictors/dummy_predictor/analyse_dataset')
- assert response.status_code == 400
-
- def test_analyse_valid_datasource(self):
- """
- Add valid datasource as parameter
- then check the response is 200
- """
- from_data = 'https://raw.githubusercontent.com/mindsdb/mindsdb-examples/master/benchmarks/heart_disease/processed_data/train.csv'
- response = self.app.get('/predictors/dummy_predictor/analyse_dataset?from_data='+ from_data)
- assert response.status_code == 200
-
-class DatasourceTest(unittest.TestCase):
-
- def setUp(self):
- pass
-
- def test_datasources(self):
- """
- Call list datasources endpoint
- THEN check the response is success
- """
- response = self.app.get('/datasources/')
- assert response.status_code == 200
-
- def test_datasource_not_found(self):
- """
- Call unexisting datasource
- then check the response is NOT FOUND
- """
- response = self.app.get('/datasource/dummy_source')
- assert response.status_code == 404
-
-
-class UtilTest(unittest.TestCase):
-
- def setUp(self):
- pass
-
- def test_ping(self):
- """
- Call utilities ping endpoint
- THEN check the response is success
- """
- response = self.app.get('/util/ping')
- assert response.status_code == 200
-
- def test_shotdown_throws_error(self):
- """
- Call shutdown endpoint
- localhost is not started raise error
- """
- response = self.app.get('/util/shutdown')
- assert response.status_code == 500
-'''
-
-if __name__ == "__main__":
- HOST = 'localhost'
- PORT = 47334
- sp = subprocess.Popen(['python3', '-m', 'mindsdb', '--api', 'mysql,http', '--config', 'mindsdb/default_config.json'])
-
- # less fancy
- try:
- time.sleep(12)
- unittest.main()
-
- '''
- t_0 = time.time()
- while True:
- try:
- res = requests.get('http://{}:{}/util/ping'.format(HOST, PORT), timeout=0.1)
- res.raise_for_status()
- unittest.main()
- break
- except requests.exceptions.ConnectionError:
- if (time.time() - t_0) > 15:
- print('Failed to connect to server')
- break
- time.sleep(1)
- '''
- print('Tests passed !')
- except:
- print('Tests Failed !')
- pass
- finally:
- print('Shutting Down Server !')
- time.sleep(2)
- sp.terminate()
diff --git a/tests/conftest.py b/tests/conftest.py
new file mode 100644
--- /dev/null
+++ b/tests/conftest.py
@@ -0,0 +1,4 @@
+# content of conftest.py
+import sys
+
+collect_ignore_glob = ['docker/*']
diff --git a/tests/docker/docker-compose.yml b/tests/docker/docker-compose.yml
old mode 100644
new mode 100755
--- a/tests/docker/docker-compose.yml
+++ b/tests/docker/docker-compose.yml
@@ -20,6 +20,7 @@ services:
clickhouse:
network_mode: host
image: yandex/clickhouse-server:20.1
+ container_name: clickhouse-test
volumes:
- ./clickhouse/database:/var/lib/clickhouse
ports:
diff --git a/tests/docker/mariadb/Dockerfile b/tests/docker/mariadb/Dockerfile
old mode 100644
new mode 100755
--- a/tests/docker/mariadb/Dockerfile
+++ b/tests/docker/mariadb/Dockerfile
@@ -5,12 +5,6 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
git \
mariadb-plugin-connect
-RUN apt-get update && apt-get install -y openjdk-8-jdk && export JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk-amd64
-
-COPY ./jars/wrapper/* /usr/lib/mysql/plugin/
-
-COPY ./jars/jdbc/* /usr/lib/jvm/java-1.8.0-openjdk-amd64/jre/lib/ext/
-
RUN chmod 0444 /etc/mysql/mariadb.conf.d/connect.cnf
VOLUME /var/lib/mysql
diff --git a/tests/docker/mariadb/connect.cnf b/tests/docker/mariadb/connect.cnf
old mode 100644
new mode 100755
diff --git a/tests/integration_tests/test_clickhouse.py b/tests/integration_tests/_test_clickhouse.py
old mode 100644
new mode 100755
similarity index 100%
rename from tests/integration_tests/test_clickhouse.py
rename to tests/integration_tests/_test_clickhouse.py
diff --git a/tests/integration_tests/test_mariadb.py b/tests/integration_tests/_test_mariadb.py
old mode 100644
new mode 100755
similarity index 100%
rename from tests/integration_tests/test_mariadb.py
rename to tests/integration_tests/_test_mariadb.py
diff --git a/tests/integration_tests/api/test_http.py b/tests/integration_tests/api/test_http.py
new file mode 100755
--- /dev/null
+++ b/tests/integration_tests/api/test_http.py
@@ -0,0 +1,128 @@
+import pytest
+import requests
+import runpy
+
+from subprocess import Popen
+import time
+import os
+import signal
+from random import randint
+
+
[email protected](scope="module")
+def ds_name():
+ rand = randint(0,pow(10,12))
+ return f'hr_ds_{rand}'
+
[email protected](scope="module")
+def pred_name():
+ rand = randint(0,pow(10,12))
+ return f'hr_predictor_{rand}'
+
+root = 'http://localhost:47334'
+class TestPredictor:
+ @classmethod
+ def setup_class(cls):
+ cls.sp = Popen(['python3', '-m', 'mindsdb', '--api', 'http'], close_fds=True)
+
+ for i in range(20):
+ try:
+ res = requests.get(f'{root}/ping')
+ if res.status != 200:
+ raise Exception('')
+ except:
+ time.sleep(1)
+
+ @classmethod
+ def teardown_class(cls):
+ try:
+ pgrp = os.getpgid(cls.sp.pid)
+ os.killpg(pgrp, signal.SIGINT)
+ except:
+ pass
+
+ @pytest.mark.order1
+ def test_put_ds(self, ds_name):
+ # PUT datasource
+ params = {
+ 'name': ds_name,
+ 'source_type': 'url',
+ 'source': 'https://raw.githubusercontent.com/mindsdb/mindsdb-examples/master/benchmarks/home_rentals/dataset/train.csv'
+ }
+ url = f'{root}/datasources/{ds_name}'
+ res = requests.put(url, json=params)
+ assert res.status_code == 200
+
+ @pytest.mark.order2
+ def test_analyze(self, ds_name):
+ response = requests.get(f'{root}/datasources/{ds_name}/analyze')
+ assert response.status_code == 200
+
+ @pytest.mark.order2
+ def test_put_predictor(self, ds_name, pred_name):
+ # PUT predictor
+ params = {
+ 'data_source_name': ds_name,
+ 'to_predict': 'rental_price',
+ 'kwargs': {
+ 'stop_training_in_x_seconds': 5,
+ 'join_learn_process': True
+ }
+ }
+ url = f'{root}/predictors/{pred_name}'
+ res = requests.put(url, json=params)
+ assert res.status_code == 200
+
+ # POST predictions
+ params = {
+ 'when': {'sqft':500}
+ }
+ url = f'{root}/predictors/{pred_name}/predict'
+ res = requests.post(url, json=params)
+ assert isinstance(res.json()[0]['rental_price']['predicted_value'],float)
+ assert res.status_code == 200
+
+ @pytest.mark.order3
+ def test_datasources(self):
+ """
+ Call list datasources endpoint
+ THEN check the response is success
+ """
+ response = requests.get(f'{root}/datasources/')
+ assert response.status_code == 200
+
+ @pytest.mark.order3
+ def test_datasource_not_found(self):
+ """
+ Call unexisting datasource
+ then check the response is NOT FOUND
+ """
+ response = requests.get(f'{root}/datasource/dummy_source')
+ assert response.status_code == 404
+
+ @pytest.mark.order3
+ def test_ping(self):
+ """
+ Call utilities ping endpoint
+ THEN check the response is success
+ """
+ response = requests.get(f'{root}/util/ping')
+ assert response.status_code == 200
+
+ @pytest.mark.order3
+ def test_predictors(self):
+ """
+ Call list predictors endpoint
+ THEN check the response is success
+ """
+ response = requests.get(f'{root}/predictors/')
+ assert response.status_code == 200
+
+ @pytest.mark.order3
+ def test_predictor_not_found(self):
+ """
+ Call unexisting predictor
+ then check the response is NOT FOUND
+ """
+ response = requests.get(f'{root}/predictors/dummy_predictor')
+ assert response.status_code == 404
diff --git a/tests/integration_tests/common.py b/tests/integration_tests/common.py
old mode 100644
new mode 100755
diff --git a/tests/plugin.py b/tests/plugin.py
new file mode 100644
--- /dev/null
+++ b/tests/plugin.py
@@ -0,0 +1,6 @@
+import pytest
+
+
+def pytest_configure(config):
+ if config.getoption("randomly_seed") == 'default':
+ config.option.randomly_seed = 420
| Compile installers using Travis and Deploy them to s3
The installers should be compiled (for those which require it) via travis and deployed to a public s3 bucket every time we release the `stable` branch.
| 2020-07-06T08:09:41Z | [] | [] |
|
mindsdb/mindsdb | 581 | mindsdb__mindsdb-581 | [
"530",
"550"
] | 22eecb7f48a871f347add9a44784eacd382a5956 | diff --git a/mindsdb/__about__.py b/mindsdb/__about__.py
--- a/mindsdb/__about__.py
+++ b/mindsdb/__about__.py
@@ -1,6 +1,6 @@
__title__ = 'MindsDB'
__package_name__ = 'mindsdb'
-__version__ = '1.99.9'
+__version__ = '1.99.10'
__description__ = "MindsDB server, provides server capabilities to mindsdb native python library"
__email__ = "[email protected]"
__author__ = 'MindsDB Inc'
diff --git a/mindsdb/__init__.py b/mindsdb/__init__.py
--- a/mindsdb/__init__.py
+++ b/mindsdb/__init__.py
@@ -5,6 +5,7 @@
from mindsdb.utilities.wizards import cli_config
config_dir, predictor_dir, datasource_dir = get_or_create_dir_struct()
+
config_path = os.path.join(config_dir,'config.json')
if not os.path.exists(config_path):
_ = cli_config(None,None,predictor_dir,datasource_dir,config_dir,use_default=True)
diff --git a/mindsdb/__main__.py b/mindsdb/__main__.py
--- a/mindsdb/__main__.py
+++ b/mindsdb/__main__.py
@@ -8,10 +8,11 @@
from torch.multiprocessing import Process
from mindsdb.utilities.config import Config
+from mindsdb.interfaces.native.mindsdb import MindsdbNative
from mindsdb.api.http.start import start as start_http
from mindsdb.api.mysql.start import start as start_mysql
from mindsdb.utilities.fs import get_or_create_dir_struct
-
+from mindsdb.interfaces.database.database import DatabaseWrapper
def close_api_gracefully(p_arr):
for p in p_arr:
@@ -47,6 +48,20 @@ def close_api_gracefully(p_arr):
'mysql': start_mysql
}
+ mdb = MindsdbNative(config)
+ model_data_arr = [
+ {
+ 'name': x['name'],
+ 'predict_cols': x['predict'],
+ 'data_analysis': mdb.get_model_data(x['name'])['data_analysis_v2']
+ } for x in mdb.get_models()
+ ]
+ dbw = DatabaseWrapper(config)
+ dbw.register_predictors(model_data_arr)
+
+ for broken_name in [name for name, connected in dbw.check_connections().items() if connected is False]:
+ print(f'Error failed to integrate with database aliased: {broken_name}')
+
p_arr = []
ctx = mp.get_context('spawn')
for api in api_arr:
diff --git a/mindsdb/api/http/namespaces/datasource.py b/mindsdb/api/http/namespaces/datasource.py
--- a/mindsdb/api/http/namespaces/datasource.py
+++ b/mindsdb/api/http/namespaces/datasource.py
@@ -110,7 +110,7 @@ def on_file(file):
file_path = os.path.join(temp_dir_path, data['file'])
else:
file_path = None
-
+
ca.default_store.save_datasource(ds_name, source_type, source, file_path)
os.rmdir(temp_dir_path)
diff --git a/mindsdb/api/http/namespaces/entitites/predictor_metadata.py b/mindsdb/api/http/namespaces/entitites/predictor_metadata.py
--- a/mindsdb/api/http/namespaces/entitites/predictor_metadata.py
+++ b/mindsdb/api/http/namespaces/entitites/predictor_metadata.py
@@ -14,6 +14,9 @@
# other attributes
'data_preparation': fields.Nested(data_preparation_metadata, required=False, description='The metadata used in the preparation stage, in which we break the data into train, test, validation'),
'accuracy': fields.Float(description='The current accuracy of the model'),
+ 'train_data_accuracy': fields.Float(description='The current accuracy of the model',required=False),
+ 'test_data_accuracy': fields.Float(description='The current accuracy of the model', required=False),
+ 'valid_data_accuracy': fields.Float(description='The current accuracy of the model', required=False),
'data_analysis': fields.Nested(data_analysis_metadata, required=False, description='The metadata used in the analysis stage, in which we extract statistical information from the input data'),
'model_analysis': fields.List(fields.Nested(target_column_metadata), required=False, description='The model analysis stage, in which we extract statistical information from the input data for each target variable, thus, this is a list; one item per target column')
,'data_analysis_v2': fields.Raw(default={})
diff --git a/mindsdb/api/http/namespaces/predictor.py b/mindsdb/api/http/namespaces/predictor.py
--- a/mindsdb/api/http/namespaces/predictor.py
+++ b/mindsdb/api/http/namespaces/predictor.py
@@ -113,14 +113,11 @@ def put(self, name):
if 'equal_accuracy_for_all_output_categories' not in kwargs:
kwargs['equal_accuracy_for_all_output_categories'] = True
- if 'sample_margin_of_error' not in kwargs:
- kwargs['sample_margin_of_error'] = 0.005
+ if 'advanced_args' not in kwargs:
+ kwargs['advanced_args'] = {}
- if 'unstable_parameters_dict' not in kwargs:
- kwargs['unstable_parameters_dict'] = {}
-
- if 'use_selfaware_model' not in kwargs['unstable_parameters_dict']:
- kwargs['unstable_parameters_dict']['use_selfaware_model'] = False
+ if 'use_selfaware_model' not in kwargs['advanced_args']:
+ kwargs['advanced_args']['use_selfaware_model'] = False
try:
retrain = data.get('retrain')
@@ -132,7 +129,7 @@ def put(self, name):
retrain = None
ds_name = data.get('data_source_name') if data.get('data_source_name') is not None else data.get('from_data')
- from_data = ca.default_store.get_datasource_obj(ds_name)
+ from_data = ca.default_store.get_datasource_obj(ds_name, raw=True)
if retrain is True:
original_name = name
@@ -207,7 +204,7 @@ def post(self, name):
while name in model_swapping_map and model_swapping_map[name] is True:
time.sleep(1)
- results = ca.mindsdb_native.predict(name, when=when, **kwargs)
+ results = ca.mindsdb_native.predict(name, when_data=when, **kwargs)
# return '', 500
return preparse_results(results, format_flag)
@@ -220,7 +217,7 @@ def post(self, name):
global model_swapping_map
data = request.json
- from_data = ca.default_store.get_datasource_obj(data.get('data_source_name'))
+ from_data = ca.default_store.get_datasource_obj(data.get('data_source_name'), raw=True)
try:
format_flag = data.get('format_flag')
diff --git a/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packet.py b/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packet.py
--- a/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packet.py
+++ b/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packet.py
@@ -48,14 +48,14 @@ def setBody(self, body_string):
def loadFromPacketString(self, packet_string):
len_header = struct.unpack('>i', struct.pack('1s', '') + packet_string[:3])[0]
- count_header = struct.unpack('b', packet_string[3])[0]
+ count_header = struct.unpack('B', packet_string[3])[0]
body = packet_string[4:]
self.loadFromParams(length=len_header, seq=count_header, body=body)
def getPacketString(self):
body = self.body
len_header = struct.pack('<i', self.length)[:3] # keep it 3 bytes
- count_header = struct.pack('b', self.seq)
+ count_header = struct.pack('B', self.seq)
packet = len_header + count_header + body
return packet
@@ -72,13 +72,12 @@ def get(self):
return False
break
len_header = struct.unpack('i', packet_string[:3] + b'\x00')[0]
+ count_header = int(packet_string[3])
if len_header == 0:
break
- count_header = int(packet_string[3])
body += self.mysql_socket.recv(len_header)
- self.session.logging.info('Got packet')
- self.session.logging.info(body)
- self.proxy.count = int(count_header) + 1
+ self.session.logging.info(f'Got packet: {str(body)}')
+ self.proxy.count = (int(count_header) + 1) % 256
self.setup(len(body), count_header, body)
return True
diff --git a/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packets/switch_auth_response_packet.py b/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packets/switch_auth_response_packet.py
--- a/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packets/switch_auth_response_packet.py
+++ b/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packets/switch_auth_response_packet.py
@@ -6,8 +6,10 @@ def setup(self, length=0, count_header=1, body=''):
length = len(body)
if length == 0:
+ self.password = b''
return
self.enc_password = Datum('string<EOF>') # 0x04
buffer = body
buffer = self.enc_password.setFromBuff(buffer)
+ self.password = self.enc_password.value
diff --git a/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/mindsdb_datanode.py b/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/mindsdb_datanode.py
--- a/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/mindsdb_datanode.py
+++ b/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/mindsdb_datanode.py
@@ -1,9 +1,11 @@
+import json
+
import pandas
from mindsdb.api.mysql.mysql_proxy.datahub.datanodes.datanode import DataNode
from mindsdb.interfaces.native.mindsdb import MindsdbNative
-from mindsdb.interfaces.clickhouse.clickhouse import Clickhouse
-from mindsdb.interfaces.mariadb.mariadb import Mariadb
+from mindsdb.integrations.clickhouse.clickhouse import Clickhouse
+from mindsdb.integrations.mariadb.mariadb import Mariadb
class MindsDBDataNode(DataNode):
type = 'mindsdb'
@@ -30,8 +32,14 @@ def getTableColumns(self, table):
columns = []
columns += [x['column_name'] for x in model['data_analysis']['input_columns_metadata']]
columns += [x['column_name'] for x in model['data_analysis']['target_columns_metadata']]
+ for col in model['predict']:
+ columns += [f"{col}_confidence"]
+ if model['data_analysis_v2'][col]['typing']['data_type'] == 'Numeric':
+ columns += [f"{col}_min", f"{col}_max"]
+ columns += [f"{col}_explain"]
+
# TODO this should be added just for clickhouse queries
- columns += ['$select_data_query']
+ columns += ['select_data_query']
return columns
def _select_predictors(self):
@@ -42,27 +50,39 @@ def _select_predictors(self):
'accuracy': x['accuracy'],
'predict_cols': ', '.join(x['predict']),
'select_data_query': x['data_source'],
- 'training_options': '' # TODEL ?
+ 'training_options': '' # TODO ?
} for x in models]
def delete_predictor(self, name):
self.mindsdb_native.delete_model(name)
def select(self, table, columns=None, where=None, where_data=None, order_by=None, group_by=None, came_from=None):
+ ''' NOTE WHERE statements can be just $eq joined with 'and'
+ '''
if table == 'predictors':
return self._select_predictors()
select_data_query = None
- if came_from is not None and '$select_data_query' in where:
- select_data_query = where['$select_data_query']['$eq']
- del where['$select_data_query']
+ if came_from is not None and 'select_data_query' in where:
+ select_data_query = where['select_data_query']['$eq']
+ del where['select_data_query']
+
+ '''
+ @TODO (Urgent~ish)
+ This is a horrible but function hack, however the proper way to do this is:
+ 1. Figure out the alias of the database sending the query
+ 2. Lookup the connection information in the config
+ 3. Send that information + the query + a name (maybe the hash of the query or the query itself) to the Datastore API and ask it to create a datasource
+
+ That way we also avoid making the same query twice and we don't use the database integrations (meant to sync predictors) in order to query data (the role of the mindsdb_native datasources / the datastore / data skillet)
+ '''
if came_from == 'clickhouse':
- ch = Clickhouse(self.config)
+ ch = Clickhouse(self.config, 'default_clickhouse')
res = ch._query(select_data_query.strip(' ;') + ' FORMAT JSON')
data = res.json()['data']
elif came_from == 'mariadb':
- maria = Mariadb(self.config)
+ maria = Mariadb(self.config, 'default_mariadb')
data = maria._query(select_data_query)
if where_data is None:
@@ -70,42 +90,44 @@ def select(self, table, columns=None, where=None, where_data=None, order_by=None
else:
where_data += data
- # NOTE WHERE statements can be just $eq joined with 'and'
new_where = {}
- for key, value in where.items():
- if isinstance(value, dict) is False or len(value.keys()) != 1 or list(value.keys())[0] != '$eq':
- # TODO value should be just string or number
- raise Exception()
- new_where[key] = value['$eq']
- if len(new_where) == 0:
- new_where = None
-
if where_data is not None:
where_data = pandas.DataFrame(where_data)
+ else:
+ for key, value in where.items():
+ if isinstance(value, dict) is False or len(value.keys()) != 1 or list(value.keys())[0] != '$eq':
+ # TODO value should be just string or number
+ raise Exception()
+ new_where[key] = value['$eq']
+ if len(new_where) == 0:
+ new_where = None
- res = self.mindsdb_native.predict(name=table, when=new_where, when_data=where_data)
+ where_data = [new_where]
- predicted_columns = self.mindsdb_native.get_model_data(name=table)['predict']
- length = len(res.data[predicted_columns[0]])
+ res = self.mindsdb_native.predict(name=table, when_data=where_data)
+
+ model = self.mindsdb_native.get_model_data(name=table)
+ predicted_columns = model['predict']
data = []
- keys = [x for x in list(res.data.keys()) if x in columns]
+ keys = [x for x in list(res._data.keys()) if x in columns]
+ min_max_keys = []
+ for col in predicted_columns:
+ if model['data_analysis_v2'][col]['typing']['data_type'] == 'Numeric':
+ min_max_keys.append(col)
+
+ length = len(res._data[predicted_columns[0]])
for i in range(length):
row = {}
for key in keys:
- row[key] = res.data[key][i]
+ row[key] = res._data[key][i]
+ for key in predicted_columns:
+ row[key + '_confidence'] = res[i].explanation[key]['confidence']
+ row[key + '_explain'] = json.dumps(res[i].explanation[key])
+ for key in min_max_keys:
+ row[key + '_min'] = res[i].explanation[key]['confidence_interval'][0]
+ row[key + '_max'] = res[i].explanation[key]['confidence_interval'][-1]
+ row['select_data_query'] = select_data_query
data.append(row)
- if select_data_query is not None:
- for row in data:
- row['$select_data_query'] = select_data_query
-
- if new_where is not None and len(new_where.keys()) > 0:
- columns = self.getTableColumns(table)
- for row in data:
- for column in columns:
- if column not in row:
- row[column] = None
- row.update(new_where)
-
return data
diff --git a/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py b/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py
--- a/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py
+++ b/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py
@@ -131,13 +131,16 @@ def isAuthOk(self, user, orig_user, password, orig_password):
log.error(traceback.format_exc())
def handshake(self):
- global HARDCODED_PASSWORD, HARDCODED_USER
+ global HARDCODED_PASSWORD, HARDCODED_USER, CERT_PATH
def switch_auth(method='mysql_native_password'):
self.packet(SwitchOutPacket, seed=self.salt, method=method).send()
switch_out_answer = self.packet(SwitchOutResponse)
switch_out_answer.get()
- return switch_out_answer.enc_password.value
+ password = switch_out_answer.password
+ if method == 'mysql_native_password' and len(password) == 0:
+ password = handshake_resp.scramble_func('', self.salt)
+ return password
def get_fast_auth_password():
log.info('Asking for fast auth password')
@@ -178,12 +181,14 @@ def get_fast_auth_password():
if handshake_resp.type == 'SSLRequest':
log.info('switch to SSL')
self.session.is_ssl = True
+
ssl_socket = ssl.wrap_socket(
self.socket,
server_side=True,
certfile=CERT_PATH,
do_handshake_on_connect=True
)
+
self.socket = ssl_socket
handshake_resp = self.packet(HandshakeResponsePacket)
handshake_resp.get()
@@ -191,36 +196,37 @@ def get_fast_auth_password():
username = handshake_resp.username.value.decode()
- if orig_username == username and HARDCODED_PASSWORD == '':
- log.info(f'Check auth, user={username}, ssl={self.session.is_ssl}, auth_method={client_auth_plugin}: '
- 'empty password')
- password = ''
+ if client_auth_plugin != DEFAULT_AUTH_METHOD:
+ if client_auth_plugin == 'mysql_native_password' and \
+ orig_password == '' and len(handshake_resp.enc_password.value) == 0:
+ switch_auth('mysql_native_password')
+ password = ''
+ else:
+ new_method = 'caching_sha2_password' if client_auth_plugin == 'caching_sha2_password' else 'mysql_native_password'
+
+ if new_method == 'caching_sha2_password' and self.session.is_ssl is False:
+ log.info(f'Check auth, user={username}, ssl={self.session.is_ssl}, auth_method={client_auth_plugin}: '
+ 'error: cant switch to caching_sha2_password without SSL')
+ self.packet(ErrPacket, err_code=ERR.ER_PASSWORD_NO_MATCH, msg=f'caching_sha2_password without SSL not supported').send()
+ return False
- elif (DEFAULT_AUTH_METHOD not in client_auth_plugin) or \
- self.session.is_ssl is False and 'caching_sha2_password' in client_auth_plugin:
- new_method = 'caching_sha2_password' if 'caching_sha2_password' in client_auth_plugin else 'mysql_native_password'
-
- if new_method == 'caching_sha2_password' and self.session.is_ssl is False:
log.info(f'Check auth, user={username}, ssl={self.session.is_ssl}, auth_method={client_auth_plugin}: '
- 'error: cant switch to caching_sha2_password without SSL')
- self.packet(ErrPacket, err_code=ERR.ER_PASSWORD_NO_MATCH, msg=f'caching_sha2_password without SSL not supported').send()
- return False
+ f'switch auth method to {new_method}')
+ password = switch_auth(new_method)
+ if new_method == 'caching_sha2_password':
+ password = get_fast_auth_password()
+ else:
+ orig_password = orig_password_hash
+ elif orig_username == username and HARDCODED_PASSWORD == '':
log.info(f'Check auth, user={username}, ssl={self.session.is_ssl}, auth_method={client_auth_plugin}: '
- f'switch auth method to {new_method}')
- password = switch_auth(new_method)
-
- if new_method == 'caching_sha2_password':
- password = get_fast_auth_password()
- else:
- orig_password = orig_password_hash
-
+ 'empty password')
+ password = ''
elif 'caching_sha2_password' in client_auth_plugin:
log.info(f'Check auth, user={username}, ssl={self.session.is_ssl}, auth_method={client_auth_plugin}: '
'check auth using caching_sha2_password')
password = get_fast_auth_password()
orig_password = HARDCODED_PASSWORD
-
elif 'mysql_native_password' in client_auth_plugin:
log.info(f'Check auth, user={username}, ssl={self.session.is_ssl}, auth_method={client_auth_plugin}: '
'check auth using mysql_native_password')
@@ -290,11 +296,16 @@ def answerTableQuery(self, query):
self.sendPackageGroup(packages)
def insert_predictor_answer(self, sql):
+ global mdb, default_store
insert = SQLQuery.parse_insert(sql)
- datasources = default_store.get_datasources()
- if insert['name'] in [x['name'] for x in datasources]:
- self.packet(ErrPacket, err_code=ERR.ER_WRONG_ARGUMENTS, msg=f"datasource with name '{insert['name']}'' already exists").send()
+ models = mdb.get_models()
+ if insert['name'] in [x['name'] for x in models]:
+ self.packet(
+ ErrPacket,
+ err_code=ERR.ER_WRONG_ARGUMENTS,
+ msg=f"predictor with name '{insert['name']}'' already exists"
+ ).send()
return
kwargs = {}
@@ -318,6 +329,7 @@ def insert_predictor_answer(self, sql):
db = db[:db.find(' ')].strip(' `')
ds_type = db
ds = default_store.save_datasource(insert['name'], ds_type, insert['select_data_query'])
+ insert['predict_cols'] = [x.strip() for x in insert['predict_cols'].split(',')]
mdb.learn(insert['name'], ds, insert['predict_cols'], kwargs)
self.packet(OkPacket).send()
@@ -438,7 +450,7 @@ def queryAnswer(self, sql):
return
elif keyword == 'delete' and \
('mindsdb.predictors' in sql_lower or self.session.database == 'mindsdb' and 'predictors' in sql_lower):
- self.delete_predictor_answer(sql, db)
+ self.delete_predictor_answer(sql)
return
elif keyword == 'insert' and \
('mindsdb.commands' in sql_lower or self.session.database == 'mindsdb' and 'commands' in sql_lower):
@@ -847,7 +859,7 @@ def packet(self, packetClass=Packet, **kwargs):
:return:
"""
p = packetClass(socket=self.socket, seq=self.count, session=self.session, proxy=self, **kwargs)
- self.count += 1
+ self.count = (self.count + 1) % 256
return p
@staticmethod
@@ -865,7 +877,7 @@ def startProxy(config):
HARDCODED_USER = config['api']['mysql']['user']
HARDCODED_PASSWORD = config['api']['mysql']['password']
- CERT_PATH = config['api']['mysql']['certificate_path']
+ CERT_PATH = config['api']['mysql'].get('certificate_path')
default_store = DataStore(config)
mdb = MindsdbNative(config)
datahub = init_datahub(config)
@@ -886,7 +898,3 @@ def startProxy(config):
# interrupt the program with Ctrl-C
log.info('Waiting for incoming connections...')
server.serve_forever()
-
-
-if __name__ == "__main__":
- MysqlProxy.startProxy()
diff --git a/mindsdb/interfaces/clickhouse/__init__.py b/mindsdb/integrations/clickhouse/__init__.py
similarity index 100%
rename from mindsdb/interfaces/clickhouse/__init__.py
rename to mindsdb/integrations/clickhouse/__init__.py
diff --git a/mindsdb/interfaces/clickhouse/clickhouse.py b/mindsdb/integrations/clickhouse/clickhouse.py
similarity index 55%
rename from mindsdb/interfaces/clickhouse/clickhouse.py
rename to mindsdb/integrations/clickhouse/clickhouse.py
--- a/mindsdb/interfaces/clickhouse/clickhouse.py
+++ b/mindsdb/integrations/clickhouse/clickhouse.py
@@ -2,14 +2,9 @@
from mindsdb_native.libs.constants.mindsdb import DATA_TYPES, DATA_SUBTYPES
class Clickhouse():
- def __init__(self, config):
+ def __init__(self, config, name):
self.config = config
- self.host = config['integrations']['default_clickhouse']['host']
- self.port = config['integrations']['default_clickhouse']['port']
- self.user = config['integrations']['default_clickhouse']['user']
- self.password = config['integrations']['default_clickhouse']['password']
- self.setup_clickhouse()
-
+ self.name = name
def _to_clickhouse_table(self, stats):
subtype_map = {
@@ -23,7 +18,8 @@ def _to_clickhouse_table(self, stats):
DATA_SUBTYPES.IMAGE: 'Nullable(String)',
DATA_SUBTYPES.VIDEO: 'Nullable(String)',
DATA_SUBTYPES.AUDIO: 'Nullable(String)',
- DATA_SUBTYPES.TEXT: 'Nullable(String)',
+ DATA_SUBTYPES.SHORT: 'Nullable(String)',
+ DATA_SUBTYPES.RICH: 'Nullable(String)',
DATA_SUBTYPES.ARRAY: 'Array(Float64)'
}
@@ -42,23 +38,28 @@ def _to_clickhouse_table(self, stats):
def _query(self, query):
params = {'user': 'default'}
try:
- params['user'] = self.config['integrations']['default_clickhouse']['user']
+ params['user'] = self.config['integrations'][self.name]['user']
except:
pass
try:
- params['password'] = self.config['integrations']['default_clickhouse']['password']
+ params['password'] = self.config['integrations'][self.name]['password']
except:
pass
- host = self.config['integrations']['default_clickhouse']['host']
- port = self.config['integrations']['default_clickhouse']['port']
+ host = self.config['integrations'][self.name]['host']
+ port = self.config['integrations'][self.name]['port']
response = requests.post(f'http://{host}:{port}', data=query, params=params)
+ if response.status_code != 200:
+ raise Exception(f'Error: {response.content}\nQuery:{query}')
+
return response
- def setup_clickhouse(self):
+ def setup(self):
+ self._query('DROP DATABASE IF EXISTS mindsdb')
+
self._query('CREATE DATABASE IF NOT EXISTS mindsdb')
msqyl_conn = self.config['api']['mysql']['host'] + ':' + str(self.config['api']['mysql']['port'])
@@ -75,7 +76,6 @@ def setup_clickhouse(self):
training_options String
) ENGINE=MySQL('{msqyl_conn}', 'mindsdb', 'predictors_clickhouse', '{msqyl_user}', '{msqyl_pass}')
"""
- print(f'Executing table creation query to create predictors list:\n{q}\n')
self._query(q)
q = f"""
@@ -83,28 +83,46 @@ def setup_clickhouse(self):
command String
) ENGINE=MySQL('{msqyl_conn}', 'mindsdb', 'commands_clickhouse', '{msqyl_user}', '{msqyl_pass}')
"""
- print(f'Executing table creation query to create command table:\n{q}\n')
self._query(q)
- def register_predictor(self, name, stats):
- columns_sql = ','.join(self._to_clickhouse_table(stats))
- columns_sql += ',`$select_data_query` Nullable(String)'
- msqyl_conn = self.config['api']['mysql']['host'] + ':' + str(self.config['api']['mysql']['port'])
- msqyl_user = self.config['api']['mysql']['user']
- msqyl_pass = self.config['api']['mysql']['password']
-
- q = f"""
- CREATE TABLE mindsdb.{name}
- ({columns_sql}
- ) ENGINE=MySQL('{msqyl_conn}', 'mindsdb', '{name}_clickhouse', '{msqyl_user}', '{msqyl_pass}')
- """
- print(f'Executing table creation query to sync predictor:\n{q}\n')
- self._query(q)
+ def register_predictors(self, model_data_arr):
+ for model_meta in model_data_arr:
+ name = model_meta['name']
+ stats = model_meta['data_analysis']
+ if 'columns_to_ignore' in stats:
+ del stats['columns_to_ignore']
+ columns_sql = ','.join(self._to_clickhouse_table(stats))
+ columns_sql += ',`select_data_query` Nullable(String)'
+ for col in model_meta['predict_cols']:
+ columns_sql += f',`{col}_confidence` Nullable(Float64)'
+ if model_meta['data_analysis'][col]['typing']['data_type'] == 'Numeric':
+ columns_sql += f',`{col}_min` Nullable(Float64)'
+ columns_sql += f',`{col}_max` Nullable(Float64)'
+ columns_sql += f',`{col}_explain` Nullable(String)'
+
+ msqyl_conn = self.config['api']['mysql']['host'] + ':' + str(self.config['api']['mysql']['port'])
+ msqyl_user = self.config['api']['mysql']['user']
+ msqyl_pass = self.config['api']['mysql']['password']
+
+ q = f"""
+ CREATE TABLE mindsdb.{name}
+ ({columns_sql}
+ ) ENGINE=MySQL('{msqyl_conn}', 'mindsdb', '{name}_clickhouse', '{msqyl_user}', '{msqyl_pass}')
+ """
+ self._query(q)
def unregister_predictor(self, name):
q = f"""
drop table if exists mindsdb.{name};
"""
- print(f'Executing table creation query to sync predictor:\n{q}\n')
self._query(q)
+
+
+ def check_connection(self):
+ try:
+ res = self._query('select 1;')
+ connected = res.status_code == 200
+ except Exception:
+ connected = False
+ return connected
diff --git a/mindsdb/interfaces/mariadb/__init__.py b/mindsdb/integrations/mariadb/__init__.py
similarity index 100%
rename from mindsdb/interfaces/mariadb/__init__.py
rename to mindsdb/integrations/mariadb/__init__.py
diff --git a/mindsdb/interfaces/mariadb/mariadb.py b/mindsdb/integrations/mariadb/mariadb.py
similarity index 59%
rename from mindsdb/interfaces/mariadb/mariadb.py
rename to mindsdb/integrations/mariadb/mariadb.py
--- a/mindsdb/interfaces/mariadb/mariadb.py
+++ b/mindsdb/integrations/mariadb/mariadb.py
@@ -6,15 +6,9 @@
class Mariadb():
- def __init__(self, config):
+ def __init__(self, config, name):
self.config = config
- self.host = config['integrations']['default_mariadb']['host']
- self.port = config['integrations']['default_mariadb']['port']
- self.user = config['integrations']['default_mariadb']['user']
- self.password = config['integrations']['default_mariadb']['password']
-
- self.setup_mariadb()
-
+ self.name = name
def _to_mariadb_table(self, stats):
subtype_map = {
@@ -28,7 +22,8 @@ def _to_mariadb_table(self, stats):
DATA_SUBTYPES.IMAGE: 'VARCHAR(500)',
DATA_SUBTYPES.VIDEO: 'VARCHAR(500)',
DATA_SUBTYPES.AUDIO: 'VARCHAR(500)',
- DATA_SUBTYPES.TEXT: 'VARCHAR(500)',
+ DATA_SUBTYPES.SHORT: 'VARCHAR(500)',
+ DATA_SUBTYPES.RICH: 'VARCHAR(500)',
DATA_SUBTYPES.ARRAY: 'VARCHAR(500)'
}
@@ -44,7 +39,7 @@ def _to_mariadb_table(self, stats):
return column_declaration
def _query(self, query):
- con = mysql.connector.connect(host=self.host, port=self.port, user=self.user, password=self.password)
+ con = mysql.connector.connect(host=self.config['integrations'][self.name]['host'], port=self.config['integrations'][self.name]['port'], user=self.config['integrations'][self.name]['user'], password=self.config['integrations'][self.name]['password'])
cur = con.cursor(dictionary=True,buffered=True)
cur.execute(query)
@@ -71,7 +66,9 @@ def _get_connect_string(self, table):
return connect
- def setup_mariadb(self):
+ def setup(self):
+ self._query('DROP DATABASE IF EXISTS mindsdb')
+
self._query('CREATE DATABASE IF NOT EXISTS mindsdb')
connect = self._get_connect_string('predictors_mariadb')
@@ -86,34 +83,50 @@ def setup_mariadb(self):
training_options VARCHAR(500)
) ENGINE=CONNECT TABLE_TYPE=MYSQL CONNECTION='{connect}';
"""
- print(f'Executing table creation query to create predictors list:\n{q}\n')
self._query(q)
+ connect = self._get_connect_string('commands_mariadb')
+
q = f"""
CREATE TABLE IF NOT EXISTS mindsdb.commands (
command VARCHAR(500)
) ENGINE=CONNECT TABLE_TYPE=MYSQL CONNECTION='{connect}';
"""
- print(f'Executing table creation query to create command table:\n{q}\n')
self._query(q)
- def register_predictor(self, name, stats):
- columns_sql = ','.join(self._to_mariadb_table(stats))
- columns_sql += ',`$select_data_query` varchar(500)'
-
- connect = self._get_connect_string(f'{name}_mariadb')
-
- q = f"""
- CREATE TABLE mindsdb.{name}
- ({columns_sql}
- ) ENGINE=CONNECT TABLE_TYPE=MYSQL CONNECTION='{connect}';
- """
- print(f'Executing table creation query to sync predictor:\n{q}\n')
- self._query(q)
+ def register_predictors(self, model_data_arr):
+ for model_meta in model_data_arr:
+ name = model_meta['name']
+ stats = model_meta['data_analysis']
+ columns_sql = ','.join(self._to_mariadb_table(stats))
+ columns_sql += ',`select_data_query` varchar(500)'
+ for col in model_meta['predict_cols']:
+ columns_sql += f',`{col}_confidence` double'
+ if model_meta['data_analysis'][col]['typing']['data_type'] == 'Numeric':
+ columns_sql += f',`{col}_min` double'
+ columns_sql += f',`{col}_max` double'
+ columns_sql += f',`{col}_explain` varchar(500)'
+
+ connect = self._get_connect_string(f'{name}_mariadb')
+
+ q = f"""
+ CREATE TABLE mindsdb.{name}
+ ({columns_sql}
+ ) ENGINE=CONNECT TABLE_TYPE=MYSQL CONNECTION='{connect}';
+ """
+ self._query(q)
def unregister_predictor(self, name):
q = f"""
drop table if exists mindsdb.{name};
"""
- print(f'Executing table creation query to sync predictor:\n{q}\n')
self._query(q)
+
+ def check_connection(self):
+ try:
+ con = mysql.connector.connect(host=self.config['integrations'][self.name]['host'], port=self.config['integrations'][self.name]['port'], user=self.config['integrations'][self.name]['user'], password=self.config['integrations'][self.name]['password'])
+ connected = con.is_connected()
+ con.close()
+ except Exception:
+ connected = False
+ return connected
diff --git a/mindsdb/interfaces/database/database.py b/mindsdb/interfaces/database/database.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/interfaces/database/database.py
@@ -0,0 +1,51 @@
+from mindsdb.integrations.clickhouse.clickhouse import Clickhouse
+from mindsdb.integrations.mariadb.mariadb import Mariadb
+
+
+class DatabaseWrapper():
+
+ def __init__(self, config):
+ self.config = config
+ self._get_integrations()
+
+ def _setup_integrations(self, integration_arr):
+ # Doesn't really matter if we call this multiple times, but it will waste time so ideally don't
+ working_integration_arr = []
+ for integration in integration_arr:
+ try:
+ integration.setup()
+ working_integration_arr.append(integration)
+ except Exception as e:
+ print('Failed to integrate with database' + integration.name + f', error: {e}')
+
+ return working_integration_arr
+
+ def _get_integrations(self):
+ # @TODO Once we have a presistent state sorted out this should be simplified as to not refresh the existing integrations every single time
+ integration_arr = []
+ for db_alias in self.config['integrations']:
+ if self.config['integrations'][db_alias]['enabled']:
+ if self.config['integrations'][db_alias]['type'] == 'clickhouse':
+ integration_arr.append(Clickhouse(self.config,db_alias))
+ elif self.config['integrations'][db_alias]['type'] == 'mariadb':
+ integration_arr.append(Mariadb(self.config,db_alias))
+ else:
+ print('Uknown integration type: ' + self.config['integrations'][db_alias]['type'] + f' for database called: {db_alias}')
+
+ return integration_arr
+
+ def register_predictors(self, model_data_arr, setup=True):
+ it = self._get_integrations()
+ if setup:
+ it = self._setup_integrations(it)
+ for integration in it: integration.register_predictors(model_data_arr)
+
+ def unregister_predictor(self, name):
+ for integration in self._get_integrations(): integration.unregister_predictor(name)
+
+ def check_connections(self):
+ connections = {}
+ for integration in self._get_integrations():
+ connections[integration.name] = integration.check_connection()
+
+ return connections
diff --git a/mindsdb/interfaces/datastore/datastore.py b/mindsdb/interfaces/datastore/datastore.py
--- a/mindsdb/interfaces/datastore/datastore.py
+++ b/mindsdb/interfaces/datastore/datastore.py
@@ -24,10 +24,11 @@ def __init__(self, config, storage_dir=None):
self.mindsdb_native = MindsdbNative(config)
def get_analysis(self, ds):
- try:
- return self.mindsdb_native.analyse_dataset(ds)
- except:
+ if isinstance(ds,str):
return self.mindsdb_native.analyse_dataset(self.get_datasource_obj(ds))
+ else:
+ return self.mindsdb_native.analyse_dataset(ds)
+
def get_datasources(self):
datasource_arr = []
@@ -60,7 +61,6 @@ def delete_datasource(self, name):
shutil.rmtree(os.path.join(self.dir, data_sources['name']))
def save_datasource(self, name, source_type, source, file_path=None):
- print(name, source_type, source)
if source_type == 'file' and (file_path is None):
raise Exception('`file_path` argument required when source_type == "file"')
@@ -129,7 +129,6 @@ def save_datasource(self, name, source_type, source, file_path=None):
df_with_types = cast_df_columns_types(df, self.get_analysis(df)['data_analysis_v2'])
create_sqlite_db(os.path.join(ds_dir, 'sqlite.db'), df_with_types)
- print(picklable)
with open(os.path.join(ds_dir,'ds.pickle'), 'wb') as fp:
pickle.dump(picklable, fp)
@@ -144,9 +143,9 @@ def save_datasource(self, name, source_type, source, file_path=None):
'columns': [dict(name=x) for x in list(df.keys())]
}, fp)
- return self.get_datasource_obj(name, avoid_crash=True)
+ return self.get_datasource_obj(name, raw=True)
- def get_datasource_obj(self, name, avoid_crash=False):
+ def get_datasource_obj(self, name, raw=False):
ds_meta_dir = os.path.join(self.dir, name)
ds_dir = os.path.join(ds_meta_dir, 'datasource')
ds = None
@@ -155,7 +154,7 @@ def get_datasource_obj(self, name, avoid_crash=False):
#sys.setrecursionlimit(0x100000)
with open(os.path.join(ds_dir,'ds.pickle'), 'rb') as fp:
picklable = pickle.load(fp)
- if avoid_crash:
+ if raw:
return picklable
try:
ds = eval(picklable['class'])(*picklable['args'],**picklable['kwargs'])
diff --git a/mindsdb/interfaces/datastore/sqlite_helpers.py b/mindsdb/interfaces/datastore/sqlite_helpers.py
--- a/mindsdb/interfaces/datastore/sqlite_helpers.py
+++ b/mindsdb/interfaces/datastore/sqlite_helpers.py
@@ -28,8 +28,11 @@ def cast_df_columns_types(df, stats):
DATA_SUBTYPES.AUDIO: 'object'
},
DATA_TYPES.SEQUENTIAL: {
- DATA_SUBTYPES.TEXT: 'object',
DATA_SUBTYPES.ARRAY: 'object'
+ },
+ DATA_TYPES.TEXT: {
+ DATA_SUBTYPES.SHORT: 'object',
+ DATA_SUBTYPES.RICH: 'object'
}
}
diff --git a/mindsdb/interfaces/native/mindsdb.py b/mindsdb/interfaces/native/mindsdb.py
--- a/mindsdb/interfaces/native/mindsdb.py
+++ b/mindsdb/interfaces/native/mindsdb.py
@@ -1,39 +1,29 @@
# Mindsdb native interface
import mindsdb_native
+from mindsdb_native import F
from dateutil.parser import parse as parse_datetime
from mindsdb.interfaces.native.predictor_process import PredictorProcess
-
+from mindsdb.interfaces.database.database import DatabaseWrapper
class MindsdbNative():
def __init__(self, config):
self.config = config
- self.metapredictor = mindsdb_native.Predictor('metapredictor')
- self.unregister_from = []
-
- try:
- assert(config['integrations']['default_clickhouse']['enabled'] == True)
- from mindsdb.interfaces.clickhouse.clickhouse import Clickhouse
- clickhouse = Clickhouse(self.config)
- self.unregister_from.append(clickhouse)
- except Exception as e:
- print(e)
- pass
-
- try:
- assert(config['integrations']['default_mariadb']['enabled'] == True)
- from mindsdb.interfaces.mariadb.mariadb import Mariadb
- mariadb = Mariadb(self.config)
- self.unregister_from.append(mariadb)
- except Exception as e:
- print(e)
- pass
+ self.dbw = DatabaseWrapper(self.config)
def learn(self, name, from_data, to_predict, kwargs={}):
+ join_learn_process = kwargs.get('join_learn_process', False)
+ if 'join_learn_process' in kwargs:
+ del kwargs['join_learn_process']
+
p = PredictorProcess(name, from_data, to_predict, kwargs, self.config.get_all(), 'learn')
p.start()
+ if join_learn_process is True:
+ p.join()
+ if p.exitcode != 0:
+ raise Exception('Learning process failed !')
- def predict(self, name, when=None, when_data=None, kwargs={}):
+ def predict(self, name, when_data=None, kwargs={}):
# @TODO Separate into two paths, one for "normal" predictions and one for "real time" predictions. Use the multiprocessing code commented out bellow for normal (once we figure out how to return the prediction object... else use the inline code but with the "real time" predict functionality of mindsdb_native taht will be implemented later)
'''
from_data = when if when is not None else when_data
@@ -41,35 +31,24 @@ def predict(self, name, when=None, when_data=None, kwargs={}):
p.start()
predictions = p.join()
'''
-
mdb = mindsdb_native.Predictor(name=name)
- kwargs['use_gpu'] = self.config.get('use_gpu', None)
-
- if when is not None:
- predictions = mdb.predict(
- when=when,
- run_confidence_variation_analysis=True,
- **kwargs
- )
- else:
- predictions = mdb.predict(
- when_data=when_data,
- run_confidence_variation_analysis=False,
- **kwargs
- )
+ predictions = mdb.predict(
+ when_data=when_data,
+ run_confidence_variation_analysis=isinstance(when_data, list) is False or len(when_data) == 1,
+ **kwargs
+ )
return predictions
def analyse_dataset(self, ds):
- return self.metapredictor.analyse_dataset(ds, sample_margin_of_error=0.025)
+ return F.analyse_dataset(ds)
def get_model_data(self, name):
- return self.metapredictor.get_model_data(name)
+ return F.get_model_data(name)
def get_models(self, status='any'):
- models = self.metapredictor.get_models()
- models = [x for x in models if x['name'] != 'metapredictor']
+ models = F.get_models()
if status != 'any':
models = [x for x in models if x['status'] == status]
@@ -83,16 +62,18 @@ def get_models(self, status='any'):
return models
def delete_model(self, name):
- self.metapredictor.delete_model(name)
- for entity in self.unregister_from:
- unregister_func = getattr(entity, 'unregister_predictor')
- unregister_func(name)
+ F.delete_model(name)
+ self.dbw.unregister_predictor(name)
def rename_model(self, name, new_name):
- self.metapredictor.rename_model(name, new_name)
+ self.dbw.unregister_predictor(name)
+ F.rename_model(name, new_name)
+ self.dbw.register_predictors(new_name)
def load_model(self, fpath):
- self.metapredictor.load_model(model_archive_path=fpath)
+ F.load_model(model_archive_path=fpath)
+ # @TODO How do we figure out the name here ?
+ #dbw.register_predictor(...)
def export_model(self,name):
- self.metapredictor.export_model(model_name=name)
+ F.export_model(model_name=name)
diff --git a/mindsdb/interfaces/native/predictor_process.py b/mindsdb/interfaces/native/predictor_process.py
--- a/mindsdb/interfaces/native/predictor_process.py
+++ b/mindsdb/interfaces/native/predictor_process.py
@@ -1,4 +1,5 @@
import torch.multiprocessing as mp
+from mindsdb.interfaces.database.database import DatabaseWrapper
ctx = mp.get_context('spawn')
@@ -18,56 +19,38 @@ def run(self):
import sys
import mindsdb_native
- from mindsdb.utilities.config import Config
-
name, from_data, to_predict, kwargs, config, trx_type = self._args
- config = Config(config)
mdb = mindsdb_native.Predictor(name=name)
if trx_type == 'learn':
+ to_predict = to_predict if isinstance(to_predict, list) else [to_predict]
data_source = getattr(mindsdb_native, from_data['class'])(*from_data['args'], **from_data['kwargs'])
-
- kwargs['use_gpu'] = config.get('use_gpu', None)
mdb.learn(
from_data=data_source,
to_predict=to_predict,
**kwargs
)
- stats = mdb.get_model_data()['data_analysis_v2']
+ stats = mindsdb_native.F.get_model_data(name)['data_analysis_v2']
- try:
- assert(config['integrations']['default_clickhouse']['enabled'] == True)
- from mindsdb.interfaces.clickhouse.clickhouse import Clickhouse
- clickhouse = Clickhouse(config)
- clickhouse.register_predictor(name, stats)
- except:
- pass
-
- try:
- assert(config['integrations']['default_mariadb']['enabled'] == True)
- from mindsdb.interfaces.mariadb.mariadb import Mariadb
- mariadb = Mariadb(config)
- mariadb.register_predictor(name, stats)
- except:
- pass
+ DatabaseWrapper(config).register_predictors([{
+ 'name': name,
+ 'predict_cols': to_predict,
+ 'data_analysis': stats
+ }], setup = False)
if trx_type == 'predict':
- if isinstance(from_data,dict):
- when = from_data
- when_data = None
+ if isinstance(from_data, dict):
+ when_data = from_data
else:
when_data = getattr(mindsdb_native, from_data['class'])(*from_data['args'], **from_data['kwargs'])
- when = None
-
- kwargs['use_gpu'] = config.get('use_gpu', None)
predictions = mdb.predict(
- when=when,
when_data=when_data,
run_confidence_variation_analysis=True,
**kwargs
)
+ # @TODO Figure out a way to recover this since we are using `spawn` here... simple Queue or instiating a Multiprocessing manager and registering a value in a dict using that. Or using map from a multiprocessing pool with 1x process (though using a custom process there might be it's own bucket of annoying)
return predictions
diff --git a/mindsdb/utilities/config.py b/mindsdb/utilities/config.py
--- a/mindsdb/utilities/config.py
+++ b/mindsdb/utilities/config.py
@@ -1,39 +1,57 @@
import os
import json
-
+import hashlib
class Config(object):
_config = {}
- def __init__(self, config='mindsdb/default_config.json'):
- if isinstance(config, dict):
- self._update_recursive(self._config, config)
- elif isinstance(config, str):
- self.merge(config)
+ def __init__(self, config_path):
+ self._config_path = None
+ self._config_hash = None
+ self._config = None
+ if isinstance(config_path, str):
+ self.config_path = config_path
+ self._read()
+ self._config_hash = self._gen_hash()
+ else:
+ raise TypeError('Argument must be string representing a file path <Later on to be switched to file path and/or database connection info>')
+
+ def _read(self):
+ if isinstance(self.config_path, str) and os.path.isfile(self.config_path):
+ with open(self.config_path, 'r') as fp:
+ self._config = config = json.load(fp)
else:
- raise TypeError('argument must be string or dict')
-
- def merge(self, file_path):
- if os.path.isfile(file_path):
- with open(file_path, 'r') as fp:
- config = json.load(fp)
- self._update_recursive(self._config, config)
-
- def _update_recursive(self, a, b):
- for key in [x for x in a.keys() if x in b]:
- if isinstance(a[key], dict) and isinstance(b[key], dict):
- self._update_recursive(a[key], b[key])
- else:
- a[key] = b[key]
- for key in b.keys():
- if key not in a:
- a[key] = b[key]
+ raise TypeError('`self.config_path` must be a string representing a local file path to a json config')
+
+ def _gen_hash(self):
+ with open(self.config_path, 'rb') as fp:
+ return hashlib.md5(fp.read()).hexdigest()
+
+ def _set_updated(self, key):
+ # Only check this for dynamically generated keys, won't be needed once we switch to using a database here
+ if key in ['integrations']:
+ file_hash = self._gen_hash()
+ if file_hash != self._config_hash:
+ self._read()
+ self._config_hash = self._gen_hash()
def __getitem__(self, key):
+ self._set_updated(key)
return self._config[key]
def get(self, key, default=None):
+ self._set_updated(key)
return self._config.get(key, default)
def get_all(self):
return self._config
+
+ def set(self, key_chain, value):
+ pass
+
+ # Higher level interface
+ def add_db_integration(self, name, dict):
+ pass
+
+ def modify_db_integration(self, name, dict):
+ pass
diff --git a/mindsdb/utilities/fs.py b/mindsdb/utilities/fs.py
--- a/mindsdb/utilities/fs.py
+++ b/mindsdb/utilities/fs.py
@@ -53,7 +53,12 @@ def get_or_create_dir_struct():
for dir in tup:
assert(os.path.exists(dir))
assert(os.access(dir, os.W_OK) == True)
- return tup[0], tup[1], tup[2]
+
+ config_dir = tup[0]
+ if 'DEV_CONFIG_PATH' in os.environ:
+ config_dir = os.environ['DEV_CONFIG_PATH']
+
+ return config_dir, tup[1], tup[2]
except Exception as e:
pass
@@ -62,7 +67,13 @@ def get_or_create_dir_struct():
for dir in tup:
create_directory(dir)
assert(os.access(dir, os.W_OK) == True)
- return tup[0], tup[1], tup[2]
+
+ config_dir = tup[0]
+ if 'DEV_CONFIG_PATH' in os.environ:
+ config_dir = os.environ['DEV_CONFIG_PATH']
+
+ return config_dir, tup[1], tup[2]
+
except Exception as e:
pass
diff --git a/mindsdb/utilities/wizards.py b/mindsdb/utilities/wizards.py
--- a/mindsdb/utilities/wizards.py
+++ b/mindsdb/utilities/wizards.py
@@ -1,6 +1,6 @@
import os
import json
-
+from datetime import datetime, timedelta
def _in(ask, default, use_default):
if use_default:
@@ -13,6 +13,9 @@ def _in(ask, default, use_default):
if type(default) == int:
user_input = int(user_input)
+ if type(default) == bool:
+ user_input = int(user_input)
+
return user_input
def auto_config(python_path,pip_path,predictor_dir,datasource_dir):
@@ -51,6 +54,53 @@ def auto_config(python_path,pip_path,predictor_dir,datasource_dir):
return config
+def make_ssl_cert(file_path):
+ from cryptography import x509
+ from cryptography.x509.oid import NameOID
+ from cryptography.hazmat.backends import default_backend
+ from cryptography.hazmat.primitives import hashes
+ from cryptography.hazmat.primitives import serialization
+ from cryptography.hazmat.primitives.asymmetric import rsa
+
+ key = rsa.generate_private_key(
+ public_exponent=65537,
+ key_size=2048,
+ backend=default_backend(),
+ )
+
+ name = x509.Name([
+ x509.NameAttribute(NameOID.COMMON_NAME, 'mdb_autogen'),
+ x509.NameAttribute(NameOID.COUNTRY_NAME, 'US'),
+ x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, 'California'),
+ x509.NameAttribute(NameOID.LOCALITY_NAME, 'Berkeley'),
+ x509.NameAttribute(NameOID.ORGANIZATION_NAME, 'MindsDB')
+ ])
+
+ now = datetime.utcnow()
+ cert = (
+ x509.CertificateBuilder()
+ .subject_name(name)
+ .issuer_name(name)
+ .public_key(key.public_key())
+ .serial_number(1)
+ .not_valid_before(now)
+ .not_valid_after(now + timedelta(days=10*365))
+ .add_extension(
+ x509.BasicConstraints(ca=True, path_length=0),
+ False
+ )
+ .sign(key, hashes.SHA256(), default_backend())
+ )
+ cert_pem = cert.public_bytes(encoding=serialization.Encoding.PEM)
+ key_pem = key.private_bytes(
+ encoding=serialization.Encoding.PEM,
+ format=serialization.PrivateFormat.TraditionalOpenSSL,
+ encryption_algorithm=serialization.NoEncryption(),
+ )
+
+ with open(file_path, 'wb') as f:
+ f.write(key_pem + cert_pem)
+
def cli_config(python_path,pip_path,predictor_dir,datasource_dir,config_dir,use_default=False):
config = auto_config(python_path,pip_path,predictor_dir,datasource_dir)
@@ -60,64 +110,14 @@ def cli_config(python_path,pip_path,predictor_dir,datasource_dir,config_dir,use_
config['api']['http']['host'] = _in('HTTP interface host: ','0.0.0.0',use_default)
config['api']['http']['port'] = _in('HTTP interface port: ','47334',use_default)
- crt_path = os.path.join(config_dir, 'cert.pem')
- with open(crt_path, 'w') as fp:
- fp.write("""-----BEGIN PRIVATE KEY-----
-MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDeItDNfMko2uLe
-gVQUzPTLXx2hod9LKIN+S01TpwJvLi5185JZ8YhquwZnI19VvLQLyOIUMGeVxVvg
-GPgkRA58liO4oyCjZGYJygwAN0gkMvJXeMXuNeezlfm3xiVvCk0+l6Vb8FqyJod/
-P9sr6CyfQPolXSngZ8C7M/Fz3YWQ4O26L9in79eynCeJodXmzHsdrLHwKKOhv3F9
-Chp5Rx82KRvoFrdhmUEGM+l4Aq1J1iUVEsAQHHiMWhJhfsiC1GjGErOjSfWKw9e0
-ckbgqY/2ypXHdjsg0mI3eLO3E29SnG8OCWDLRcJVU1oHeX8hqk04c0urXhVQThN3
-X6ecXD1dAgMBAAECggEAEVxjOUwhZKIGzSEKcz25fBOI+1LtYrBd5ob4GiuZUxsm
-4m0Q6RqpcL4BOBpZnxfKcolWsgE+d0QfdBo/eoYfI7mQPSPyrxJvryAtY+7uInYg
-3pk/zuhDnZOBGs3PqygA9X1gnRjh3b6JJHbXKE1S/3dSlYz8ct9o/riGjGmpwLLO
-WuLbiRZoXRPCGWb1bIRpjVPn01YhlEvHyJsXktikm4pMUv+2QUZC7PU/eaAyY3eX
-Y0qdgaxza8q7toFXENG2nI/4dL5T9d4Bg2642zIk+Ki43NbQox4BDeWaSBWQK+bB
-DNDEjNnuGG0pTrdMD65TIOt7AoWeNCAqJZtSLDcoeQKBgQDwTHs1QX65Eo15cVh6
-sClRYTP1d01t39cSdV7sX1Vtp/z7C3FeUlWpb8zgyo3wYJlo+7hyOAcY4KyBpTeS
-aJGy1qIfD0qSt2ZNIvw1wfjiKa00ThMVW8urOcSMt+8+Q9SA/1nE/1iLNSc6M9Af
-ixx34zxlg25vbEaYcFKqiGYNgwKBgQDspoXV4hiufqoPf7F5bYYrv5j3SXoaJZnM
-RJngvBHohlSE9TwGvhHmy6xJAj1CRpoOOQpLoWgvxvWpdsCvcny3a8MY5AbyqOI4
-banVDCW5jnRe5ak/ECoxP4uPK+5/9CUlW3cP+GKfRGU3H7OhadopvNfwjUPg+wB4
-PXTCUvhznwKBgQCvKkFB//09Mb35QduKi7GCxgWXMKE7r8jahr5sNc5TQfqSkbPR
-WtlgysOhNWYkTHZn5d59PERIKTb2xpXs3tcec4D4fTASJSiooBETqtMfIdxFXYhh
-sGmV5mVVYps+Wzmj0wAAL1a/Gz7+GVjkNYbKCdYz9YviIx6O7ooED6u8uwKBgHuM
-aJ0EYExhVpmm2doCQyT973dTBgs2jDfnrMp2hYb28pNDkOYYPzJWLQkkwSSjxXQd
-dXGMv98JqWGi3O/7/n6oJQAOtE3lu80n+519rQhWBg0xK43/+3cgrNS/Y9GrfeUl
-/l/5Fkv+IjWIOHjR0ZMuwzIUHlcL0+/ybc2yEYITAoGAc5shIP9wvjEGexemj1u1
-mp2XwZ7zc0yyZA2icgsYAED6CVoNyrvU6KUm3m1fwEsHPdjk9vLhB5thgz1cjabr
-eoOAdPicwUjndabSor9ylCTDpYpTc8SwuM9KoZyk39DNMUcW3DtwWVZ8YBcm5j0X
-91+jp56NrKca0z1vcyRvy4Q=
------END PRIVATE KEY-----
------BEGIN CERTIFICATE-----
-MIIDazCCAlOgAwIBAgIUfo40Rk2dYhY8SO+yXL5vrvli+20wDQYJKoZIhvcNAQEL
-BQAwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
-GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0yMDA1MjExMzA4MzRaFw0yMTA1
-MjExMzA4MzRaMEUxCzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw
-HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggEiMA0GCSqGSIb3DQEB
-AQUAA4IBDwAwggEKAoIBAQDeItDNfMko2uLegVQUzPTLXx2hod9LKIN+S01TpwJv
-Li5185JZ8YhquwZnI19VvLQLyOIUMGeVxVvgGPgkRA58liO4oyCjZGYJygwAN0gk
-MvJXeMXuNeezlfm3xiVvCk0+l6Vb8FqyJod/P9sr6CyfQPolXSngZ8C7M/Fz3YWQ
-4O26L9in79eynCeJodXmzHsdrLHwKKOhv3F9Chp5Rx82KRvoFrdhmUEGM+l4Aq1J
-1iUVEsAQHHiMWhJhfsiC1GjGErOjSfWKw9e0ckbgqY/2ypXHdjsg0mI3eLO3E29S
-nG8OCWDLRcJVU1oHeX8hqk04c0urXhVQThN3X6ecXD1dAgMBAAGjUzBRMB0GA1Ud
-DgQWBBRXK63AaxqKc92abM3L9tM/sF1fmTAfBgNVHSMEGDAWgBRXK63AaxqKc92a
-bM3L9tM/sF1fmTAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQC8
-1/JSufP8yWnKWDXYrfWCM1ji+COiW3qrjeYxOyl6uvkJDDNFUt8MQUO2c4HFr4BE
-I7BGYbCfGT3dc1K3/JKtlGeoKqbKMgBWe+Lu12kkB5nrQdyqTVSgQnL1HHN7u7ED
-apSV9TzYcz6wbX4Yv27UMGpwbUypIG2EUVbBCkElZYoMn4TNlKF7uTH5dOmR+LNr
-zGvTvYkjMFLRtJ13SkRyfiMJkfJcM89czOVu4X/dljiHhGePfdbCUuGs1Gw759a8
-3l7b506sujWQEmuSe6UdOUws+gR82H7kb8n7qxcOa5HXiIE2MRdfHXx8AS0LGPsa
-n0PAUDF7eqI/kYskiWUX
------END CERTIFICATE-----
- """)
-
mysql = _in('Enable MYSQL API ? [Y/N]','Y',use_default)
if mysql in ['Y','y']:
+ crt_path = os.path.join(config_dir, 'cert.pem')
+ if os.path.isfile(crt_path) is False:
+ make_ssl_cert(crt_path)
config['api']['mysql'] = {
- "certificate_path": crt_path
- ,"log": {
+ "certificate_path": crt_path,
+ "log": {
"format": "%(asctime)s - %(levelname)s - %(message)s",
"folder": "logs/",
"file": "mysql.log",
@@ -133,16 +133,16 @@ def cli_config(python_path,pip_path,predictor_dir,datasource_dir,config_dir,use_
clickhouse = _in('Connect to clickhouse ? [Y/N]','Y',use_default)
if clickhouse in ['Y','y']:
- config['integrations']['default_clickhouse']['enabled'] = True
+ config['integrations']['default_clickhouse']['enabled'] = _in('Enable Clickhouse integration ?: ',False,use_default)
config['integrations']['default_clickhouse']['host'] = _in('Clickhouse host: ','localhost',use_default)
config['integrations']['default_clickhouse']['port'] = _in('Clickhouse port: ',8123,use_default)
config['integrations']['default_clickhouse']['user'] = _in('Clickhouse user: ','default',use_default)
config['integrations']['default_clickhouse']['password'] = _in('Clickhouse password: ','',use_default)
- config['integrations']['default_mariadb']['type'] = 'clickhouse'
+ config['integrations']['default_clickhouse']['type'] = 'clickhouse'
mariadb = _in('Connect to Mariadb ? [Y/N]','Y',use_default)
if mariadb in ['Y','y']:
- config['integrations']['default_mariadb']['enabled'] = True
+ config['integrations']['default_mariadb']['enabled'] = _in('Enable Mariadb integration ?: ',False,use_default)
config['integrations']['default_mariadb']['host'] = _in('Mariadb host: ','localhost',use_default)
config['integrations']['default_mariadb']['port'] = _in('Mariadb port: ',3306,use_default)
config['integrations']['default_mariadb']['user'] = _in('Mariadb user: ','root',use_default)
| diff --git a/tests/ci_tests/__init__.py b/mindsdb/interfaces/database/__init__.py
similarity index 100%
rename from tests/ci_tests/__init__.py
rename to mindsdb/interfaces/database/__init__.py
diff --git a/requirements_test.txt b/requirements_test.txt
new file mode 100644
--- /dev/null
+++ b/requirements_test.txt
@@ -0,0 +1,3 @@
+pytest>=3.3.2
+pytest-randomly>=3.3.1
+pytest-ordering >= 0.6
diff --git a/tests/ci_tests/test_endpoints.py b/tests/ci_tests/test_endpoints.py
deleted file mode 100644
--- a/tests/ci_tests/test_endpoints.py
+++ /dev/null
@@ -1,179 +0,0 @@
-import subprocess
-import unittest
-import requests
-import time
-
-class PredictorTest(unittest.TestCase):
-
- def setUp(self):
- pass
-
- def test_put_ds_put_pred(self):
- PRED_NAME = 'test_predictor_12'
- DS_NAME = 'test_ds_12'
-
- DS_URL = 'https://raw.githubusercontent.com/mindsdb/mindsdb-examples/master/benchmarks/home_rentals/dataset/train.csv'
-
- # PUT datasource
- params = {
- 'name': DS_NAME,
- 'source_type': 'url',
- 'source': DS_URL
- }
- url = f'http://localhost:47334/datasources/{DS_NAME}'
- res = requests.put(url, json=params)
- print(res)
- #assert res.status_code == 200
-
- # PUT predictor
- params = {
- 'data_source_name': DS_NAME,
- 'to_predict': 'rental_price'
- }
- url = f'http://localhost:47334/predictors/{PRED_NAME}'
- res = requests.put(url, json=params)
- assert res.status_code == 200
- time.sleep(50)
-
- # HTTP clickhouse interface: try to make a prediction
- where = {
- '`initial_price`': 2200,
- }
-
- query = "SELECT rental_price FROM {} WHERE {} FORMAT JSON".format(
- f'mindsdb.{PRED_NAME}',
- ' AND '.join('{} = {}'.format(k, v) for k, v in where.items())
- )
-
- print(query)
-
- res = requests.post('http://{}:{}'.format(
- 'localhost',
- 8123
- ), data=query)
- assert res.status_code == 200
-
- data = res.json()
- print(data)
- print(data['data'][0])
- assert 'rental_price' in data['data'][0] and data['data'][0]['rental_price'] is not None
-
-'''
-@TODO: Fix these
- def test_predictors(self):
- """
- Call list predictors endpoint
- THEN check the response is success
- """
- response = self.app.get('/predictors/')
- assert response.status_code == 200
-
- def test_columns_predictor_not_found(self):
- """
- Call unexisting predictor to analyse_dataset
- then check the response is NOT FOUND
- """
- response = self.app.get('/predictors/dummy_predictor/columns')
- assert response.status_code == 404
-
- def test_predictor_not_found(self):
- """
- Call unexisting predictor
- then check the response is NOT FOUND
- """
- response = self.app.get('/predictors/dummy_predictor')
- assert response.status_code == 404
-
- def test_analyse_invalid_datasource(self):
- """
- Don't provide datasource
- then check the response is No valid datasource given
- """
- response = self.app.get('/predictors/dummy_predictor/analyse_dataset')
- assert response.status_code == 400
-
- def test_analyse_valid_datasource(self):
- """
- Add valid datasource as parameter
- then check the response is 200
- """
- from_data = 'https://raw.githubusercontent.com/mindsdb/mindsdb-examples/master/benchmarks/heart_disease/processed_data/train.csv'
- response = self.app.get('/predictors/dummy_predictor/analyse_dataset?from_data='+ from_data)
- assert response.status_code == 200
-
-class DatasourceTest(unittest.TestCase):
-
- def setUp(self):
- pass
-
- def test_datasources(self):
- """
- Call list datasources endpoint
- THEN check the response is success
- """
- response = self.app.get('/datasources/')
- assert response.status_code == 200
-
- def test_datasource_not_found(self):
- """
- Call unexisting datasource
- then check the response is NOT FOUND
- """
- response = self.app.get('/datasource/dummy_source')
- assert response.status_code == 404
-
-
-class UtilTest(unittest.TestCase):
-
- def setUp(self):
- pass
-
- def test_ping(self):
- """
- Call utilities ping endpoint
- THEN check the response is success
- """
- response = self.app.get('/util/ping')
- assert response.status_code == 200
-
- def test_shotdown_throws_error(self):
- """
- Call shutdown endpoint
- localhost is not started raise error
- """
- response = self.app.get('/util/shutdown')
- assert response.status_code == 500
-'''
-
-if __name__ == "__main__":
- HOST = 'localhost'
- PORT = 47334
- sp = subprocess.Popen(['python3', '-m', 'mindsdb', '--api', 'mysql,http', '--config', 'mindsdb/default_config.json'])
-
- # less fancy
- try:
- time.sleep(12)
- unittest.main()
-
- '''
- t_0 = time.time()
- while True:
- try:
- res = requests.get('http://{}:{}/util/ping'.format(HOST, PORT), timeout=0.1)
- res.raise_for_status()
- unittest.main()
- break
- except requests.exceptions.ConnectionError:
- if (time.time() - t_0) > 15:
- print('Failed to connect to server')
- break
- time.sleep(1)
- '''
- print('Tests passed !')
- except:
- print('Tests Failed !')
- pass
- finally:
- print('Shutting Down Server !')
- time.sleep(2)
- sp.terminate()
diff --git a/tests/docker/cli.sh b/tests/docker/cli.sh
new file mode 100755
--- /dev/null
+++ b/tests/docker/cli.sh
@@ -0,0 +1,25 @@
+#!/bin/bash
+if [ $1 = "mariadb" ]; then
+ echo "prepare"
+ mkdir -p mariadb/jars/jdbc/
+ if [ ! -f "mariadb/jars/jdbc/dremio-jdbc-driver-4.2.2-202004211133290458-b550b6fa.jar" ]; then
+ wget -P mariadb/jars/jdbc/ http://download.dremio.com/jdbc-driver/4.2.2-202004211133290458-b550b6fa/dremio-jdbc-driver-4.2.2-202004211133290458-b550b6fa.jar
+ fi
+ mkdir -p mariadb/jars/wrapper/
+ if [ ! -f "mariadb/jars/wrapper/JavaWrappers.jar" ]; then
+ # wget -P mariadb/jars/wrapper/ https://jira.mariadb.org/secure/attachment/44179/JavaWrappers.jar
+ wget --no-check-certificate 'https://docs.google.com/uc?export=download&id=1MWPTvX_QDR9-7u_8qhwx9vF5RpeJc0za' -O mariadb/jars/wrapper/JavaWrappers.jar
+ fi
+ if [ ! -f "mariadb/jars/wrapper/JdbcInterface.jar" ]; then
+ wget --no-check-certificate 'https://docs.google.com/uc?export=download&id=1uH2yKnvLBCpDpQnhOmm_Us988-AcKkLw' -O mariadb/jars/wrapper/JdbcInterface.jar
+ fi
+ mkdir -p mariadb/connectData/
+ docker-compose up mariadb
+elif [ $1 = "mariadb-stop" ]; then
+ docker-compose stop mariadb
+elif [ $1 = "clickhouse" ]; then
+ mkdir -p clickhouse/database/
+ docker-compose up clickhouse
+elif [ $1 = "clickhouse-stop" ]; then
+ docker-compose stop clickhouse
+fi
diff --git a/docker-compose.yml b/tests/docker/docker-compose.yml
old mode 100644
new mode 100755
similarity index 58%
rename from docker-compose.yml
rename to tests/docker/docker-compose.yml
--- a/docker-compose.yml
+++ b/tests/docker/docker-compose.yml
@@ -4,22 +4,25 @@
version: "3"
services:
mariadb:
- build: .
+ build: mariadb/.
network_mode: host
- image: mariadb-connect:1
- container_name: connect
- command: --default-authentication-plugin=mysql_native_password
+ image: mariadb-test:1.0
+ container_name: mariadb-test
+ command: --default-authentication-plugin=caching_sha2_password
ports:
- 3306:3306
environment:
MYSQL_ALLOW_EMPTY_PASSWORD: "yes"
MYSQL_ROOT_PASSWORD: "root"
volumes:
- - ./connectData:/var/lib/mysql
- - ./connect.cnf:/etc/mysql/mariadb.conf.d/connect.cnf
+ - ./mariadb/connectData:/var/lib/mysql
+ - ./mariadb/connect.cnf:/etc/mysql/mariadb.conf.d/connect.cnf
clickhouse:
network_mode: host
image: yandex/clickhouse-server:20.1
+ container_name: clickhouse-test
+ volumes:
+ - ./clickhouse/database:/var/lib/clickhouse
ports:
- 8123:8123
- 9000:9000
diff --git a/tests/docker/mariadb/Dockerfile b/tests/docker/mariadb/Dockerfile
new file mode 100755
--- /dev/null
+++ b/tests/docker/mariadb/Dockerfile
@@ -0,0 +1,14 @@
+FROM mariadb:10.3
+
+RUN apt-get update && apt-get install -y --no-install-recommends \
+ curl \
+ git \
+ mariadb-plugin-connect
+
+RUN chmod 0444 /etc/mysql/mariadb.conf.d/connect.cnf
+
+VOLUME /var/lib/mysql
+
+EXPOSE 3306
+
+CMD ["mysqld"]
diff --git a/connect.cnf b/tests/docker/mariadb/connect.cnf
old mode 100644
new mode 100755
similarity index 100%
rename from connect.cnf
rename to tests/docker/mariadb/connect.cnf
diff --git a/tests/home_rentals.csv b/tests/home_rentals.csv
deleted file mode 100644
--- a/tests/home_rentals.csv
+++ /dev/null
@@ -1,5038 +0,0 @@
-number_of_rooms,number_of_bathrooms,sqft,location,days_on_market,initial_price,neighborhood,rental_price
-0,1,"484,8",great,10,2271,south_side,2271
-1,1,674,good,1,2167,downtown,2167
-1,1,554,poor,19,1883,westbrae,1883
-0,1,529,great,3,2431,south_side,2431
-3,2,1219,great,3,5510,south_side,5510
-1,1,398,great,11,2272,south_side,2272
-3,2,1190,poor,58,4463,westbrae,4123.812
-1,1,730,good,0,2224,downtown,2224
-0,1,298,great,9,2104,south_side,2104
-2,1,878,great,8,3861,south_side,3861
-1,1,677,good,14,2041,downtown,2041
-0,1,509,poor,18,1725,westbrae,1725
-0,1,481,poor,49,1388,westbrae,1307.496
-3,2,808,good,1,4677,downtown,4677
-1,1,522,poor,30,1713,westbrae,1678.74
-1,1,533,good,10,1903,downtown,1903
-3,2,937,good,2,4736,downtown,4736
-0,1,258,good,10,1544,downtown,1544
-1,1,630,great,11,2543,south_side,2543
-0,1,397,great,11,2168,south_side,2168
-2,1,932,good,10,3413,downtown,3413
-3,2,1069,good,9,4810,downtown,4810
-0,1,267,poor,32,1302,westbrae,1270.752
-0,1,332,good,6,1697,downtown,1697
-1,1,712,poor,56,1617,westbrae,1500.576
-3,2,1231,poor,25,4873,westbrae,4824.27
-2,1,818,good,3,3359,downtown,3359
-2,1,805,good,8,3358,downtown,3358
-3,2,1158,poor,44,4601,westbrae,4380.152
-3,2,952,great,7,5207,south_side,5207
-2,1,771,good,8,3305,downtown,3305
-1,1,333,great,6,2284,south_side,2284
-1,1,500,poor,54,1448,westbrae,1349.536
-2,1,690,poor,16,3095,westbrae,3095
-0,1,524,great,13,2317,south_side,2317
-2,1,762,good,1,3323,downtown,3323
-2,1,872,good,14,3375,downtown,3375
-1,1,673,great,7,2604,south_side,2604
-2,1,792,good,5,3390,downtown,3390
-2,1,640,good,8,3153,downtown,3153
-0,1,454,poor,47,1353,westbrae,1279.938
-2,1,932,good,10,3447,downtown,3447
-0,1,340,good,5,1722,downtown,1722
-1,1,595,good,1,2064,downtown,2064
-2,1,558,good,3,3118,downtown,3118
-3,2,823,good,10,4545,downtown,4545
-3,2,1104,poor,16,4750,westbrae,4750
-1,1,543,poor,18,1871,westbrae,1871
-0,1,455,great,13,2205,south_side,2205
-0,1,113,good,13,1378,downtown,1378
-2,1,553,good,8,3073,downtown,3073
-3,2,1030,great,14,5260,south_side,5260
-3,2,1175,great,6,5446,south_side,5446
-1,1,644,great,12,2572,south_side,2572
-1,1,421,great,8,2333,south_side,2333
-2,1,786,poor,63,2716,westbrae,2482.424
-0,1,518,poor,21,1674,westbrae,1670.652
-1,1,532,great,4,2491,south_side,2491
-1,1,533,poor,26,1748,westbrae,1727.024
-1,1,733,poor,56,1671,westbrae,1550.688
-0,1,417,good,9,1687,downtown,1687
-2,1,891,good,3,3504,downtown,3504
-2,1,938,poor,50,3008,westbrae,2827.52
-2,1,688,good,12,3154,downtown,3154
-1,1,504,poor,22,1791,westbrae,1783.836
-1,1,741,good,3,2226,downtown,2226
-2,1,564,good,13,3001,downtown,3001
-2,1,503,good,10,3026,downtown,3026
-2,1,755,good,7,3254,downtown,3254
-0,1,415,poor,39,1472,westbrae,1416.064
-2,1,911,good,6,3492,downtown,3492
-3,2,853,good,4,4710,downtown,4710
-0,1,337,good,1,1775,downtown,1775
-1,1,477,poor,64,1371,westbrae,1250.352
-3,2,1081,great,7,5351,south_side,5351
-0,1,464,poor,21,1645,westbrae,1641.71
-3,2,1219,good,1,5030,downtown,5030
-3,2,1106,good,10,4833,downtown,4833
-2,1,891,good,9,3371,downtown,3371
-1,1,518,good,6,2005,downtown,2005
-0,1,245,great,4,2094,south_side,2094
-3,2,1216,great,5,5495,south_side,5495
-0,1,381,poor,28,1483,westbrae,1459.272
-2,1,819,great,7,3806,south_side,3806
-2,1,787,good,9,3332,downtown,3332
-3,2,936,good,2,4738,downtown,4738
-2,1,740,good,6,3294,downtown,3294
-3,2,1215,great,13,5467,south_side,5467
-2,1,853,poor,40,3045,westbrae,2923.2
-2,1,942,poor,59,2977,westbrae,2744.794
-3,2,1204,good,7,5016,downtown,5016
-3,2,1098,great,10,5386,south_side,5386
-1,1,741,good,12,2170,downtown,2170
-1,1,603,great,10,2508,south_side,2508
-3,2,1074,good,12,4796,downtown,4796
-1,1,588,good,14,1961,downtown,1961
-0,1,334,poor,48,1243,westbrae,1173.392
-2,1,736,great,2,3854,south_side,3854
-3,2,1056,poor,54,4408,westbrae,4108.256
-1,1,625,great,2,2578,south_side,2578
-2,1,530,poor,47,2613,westbrae,2471.898
-2,1,626,great,4,3693,south_side,3693
-1,1,736,poor,30,1891,westbrae,1853.18
-3,2,882,good,10,4659,downtown,4659
-2,1,646,good,1,3280,downtown,3280
-0,1,231,good,2,1650,downtown,1650
-0,1,157,poor,36,1172,westbrae,1134.496
-1,1,489,good,9,1916,downtown,1916
-0,1,464,good,11,1793,downtown,1793
-1,1,320,great,8,2268,south_side,2268
-0,1,390,great,4,2230,south_side,2230
-2,1,682,great,5,3705,south_side,3705
-2,1,729,great,2,3856,south_side,3856
-3,2,837,poor,23,4439,westbrae,4412.366
-3,2,933,poor,22,4557,westbrae,4538.772
-1,1,606,great,14,2454,south_side,2454
-2,1,663,great,3,3691,south_side,3691
-1,1,407,good,3,1894,downtown,1894
-1,1,692,good,5,2115,downtown,2115
-3,2,975,great,6,5271,south_side,5271
-2,1,726,good,11,3242,downtown,3242
-1,1,329,good,10,1755,downtown,1755
-1,1,418,good,3,1907,downtown,1907
-1,1,627,good,2,2060,downtown,2060
-0,1,435,poor,26,1553,westbrae,1534.364
-2,1,533,great,2,3645,south_side,3645
-3,2,820,poor,23,4430,westbrae,4403.42
-2,1,935,great,13,3880,south_side,3880
-0,1,144,great,4,1979,south_side,1979
-2,1,872,poor,61,2884,westbrae,2647.512
-2,1,646,poor,53,2750,westbrae,2568.5
-0,1,141,poor,59,940,westbrae,866.68
-3,2,816,poor,25,4427,westbrae,4382.73
-0,1,320,poor,36,1322,westbrae,1279.696
-3,2,1143,poor,18,4846,westbrae,4846
-3,2,1087,good,2,4898,downtown,4898
-0,1,369,poor,43,1349,westbrae,1286.946
-1,1,358,good,3,1866,downtown,1866
-0,1,539,good,3,1899,downtown,1899
-2,1,885,poor,50,2974,westbrae,2795.56
-2,1,938,good,7,3497,downtown,3497
-0,1,488,poor,49,1418,westbrae,1335.756
-3,2,1157,good,10,4862,downtown,4862
-3,2,902,poor,29,4456,westbrae,4375.792
-2,1,623,poor,26,2961,westbrae,2925.468
-2,1,803,good,8,3279,downtown,3279
-3,2,1006,poor,44,4416,westbrae,4204.032
-3,2,916,poor,36,4378,westbrae,4237.904
-3,2,1180,good,5,5021,downtown,5021
-0,1,461,poor,31,1547,westbrae,1512.966
-0,1,390,poor,26,1531,westbrae,1512.628
-0,1,271,good,14,1557,downtown,1557
-2,1,745,poor,37,2951,westbrae,2850.666
-3,2,1080,good,14,4771,downtown,4771
-3,2,963,good,3,4734,downtown,4734
-2,1,901,poor,24,3226,westbrae,3200.192
-0,1,307,great,7,2151,south_side,2151
-3,2,871,good,11,4626,downtown,4626
-1,1,649,poor,46,1678,westbrae,1590.744
-0,1,459,poor,37,1464,westbrae,1414.224
-1,1,624,poor,31,1847,westbrae,1806.366
-1,1,350,good,11,1759,downtown,1759
-1,1,600,good,11,2009,downtown,2009
-0,1,541,good,8,1894,downtown,1894
-3,2,939,poor,29,4484,westbrae,4403.288
-2,1,606,great,4,3675,south_side,3675
-3,2,1139,good,11,4861,downtown,4861
-3,2,1234,good,3,5052,downtown,5052
-3,2,1150,good,2,4982,downtown,4982
-3,2,996,good,13,4758,downtown,4758
-2,1,559,great,1,3657,south_side,3657
-1,1,447,good,12,1784,downtown,1784
-1,1,698,poor,23,1998,westbrae,1986.012
-0,1,371,good,13,1639,downtown,1639
-0,1,425,poor,34,1449,westbrae,1408.428
-3,2,948,great,8,5224,south_side,5224
-0,1,441,poor,19,1613,westbrae,1613
-0,1,112,good,7,1487,downtown,1487
-2,1,876,poor,15,3347,westbrae,3347
-3,2,860,good,4,4631,downtown,4631
-0,1,312,poor,26,1479,westbrae,1461.252
-0,1,233,great,10,2079,south_side,2079
-1,1,743,great,3,2732,south_side,2732
-2,1,564,good,6,3137,downtown,3137
-0,1,335,good,12,1609,downtown,1609
-2,1,879,great,10,3923,south_side,3923
-3,2,1001,poor,57,4245,westbrae,3930.87
-1,1,721,great,3,2701,south_side,2701
-0,1,547,good,2,1936,downtown,1936
-2,1,871,good,3,3434,downtown,3434
-2,1,904,great,8,3938,south_side,3938
-0,1,150,poor,60,957,westbrae,880.44
-2,1,892,poor,18,3303,westbrae,3303
-2,1,886,good,10,3396,downtown,3396
-2,1,619,great,0,3671,south_side,3671
-2,1,598,poor,16,3049,westbrae,3049
-2,1,883,good,8,3411,downtown,3411
-1,1,443,good,10,1820,downtown,1820
-3,2,969,poor,56,4236,westbrae,3931.008
-0,1,457,good,4,1796,downtown,1796
-3,2,1175,good,0,4999,downtown,4999
-0,1,511,great,1,2431,south_side,2431
-1,1,514,poor,51,1536,westbrae,1440.768
-3,2,916,good,3,4716,downtown,4716
-1,1,625,poor,42,1699,westbrae,1624.244
-0,1,442,good,5,1779,downtown,1779
-0,1,310,poor,20,1465,westbrae,1465
-3,2,865,good,0,4742,downtown,4742
-3,2,819,great,12,5049,south_side,5049
-1,1,598,great,7,2499,south_side,2499
-0,1,383,good,11,1707,downtown,1707
-0,1,145,good,6,1442,downtown,1442
-2,1,878,good,8,3379,downtown,3379
-2,1,557,poor,29,2904,westbrae,2851.728
-0,1,505,good,2,1863,downtown,1863
-2,1,646,great,0,3727,south_side,3727
-0,1,123,great,9,1976,south_side,1976
-0,1,375,poor,17,1620,westbrae,1620
-3,2,1124,great,9,5420,south_side,5420
-1,1,704,good,14,2081,downtown,2081
-0,1,294,great,1,2221,south_side,2221
-2,1,850,good,2,3437,downtown,3437
-0,1,376,good,1,1766,downtown,1766
-0,1,150,poor,43,1083,westbrae,1033.182
-3,2,1054,poor,46,4411,westbrae,4181.628
-0,1,191,good,12,1492,downtown,1492
-0,1,300,good,8,1667,downtown,1667
-0,1,283,good,7,1582,downtown,1582
-0,1,454,good,13,1753,downtown,1753
-2,1,589,good,7,3077,downtown,3077
-2,1,906,good,9,3421,downtown,3421
-0,1,222,poor,58,1036,westbrae,957.264
-3,2,1088,poor,52,4393,westbrae,4111.848
-0,1,410,good,3,1740,downtown,1740
-1,1,597,good,9,1978,downtown,1978
-3,2,1006,poor,27,4580,westbrae,4515.88
-1,1,499,poor,38,1623,westbrae,1564.572
-0,1,149,poor,47,1046,westbrae,989.516
-0,1,426,good,3,1783,downtown,1783
-0,1,361,great,2,2210,south_side,2210
-3,2,880,good,2,4726,downtown,4726
-2,1,935,good,4,3477,downtown,3477
-1,1,363,good,12,1765,downtown,1765
-3,2,1037,poor,35,4578,westbrae,4440.66
-3,2,1145,good,11,4929,downtown,4929
-1,1,435,good,8,1855,downtown,1855
-3,2,1114,great,9,5363,south_side,5363
-3,2,1055,good,0,4933,downtown,4933
-1,1,503,good,9,1866,downtown,1866
-3,2,816,good,8,4594,downtown,4594
-1,1,435,good,6,1884,downtown,1884
-1,1,609,poor,15,1929,westbrae,1929
-3,2,984,good,0,4855,downtown,4855
-1,1,602,good,2,2043,downtown,2043
-2,1,892,good,3,3435,downtown,3435
-0,1,397,good,11,1698,downtown,1698
-3,2,871,poor,31,4397,westbrae,4300.266
-2,1,653,good,9,3147,downtown,3147
-1,1,748,poor,62,1631,westbrae,1493.996
-0,1,235,great,14,2000,south_side,2000
-3,2,1133,good,9,4874,downtown,4874
-0,1,272,poor,57,1117,westbrae,1034.342
-0,1,532,poor,63,1316,westbrae,1202.824
-2,1,855,good,9,3362,downtown,3362
-3,2,1165,good,9,4902,downtown,4902
-3,2,1085,great,13,5279,south_side,5279
-0,1,195,great,6,2039,south_side,2039
-1,1,409,great,4,2380,south_side,2380
-3,2,1186,poor,62,4437,westbrae,4064.292
-1,1,476,great,8,2370,south_side,2370
-1,1,438,great,6,2332,south_side,2332
-3,2,1182,good,6,4994,downtown,4994
-1,1,305,good,14,1675,downtown,1675
-3,2,1179,good,7,4977,downtown,4977
-3,2,1109,good,9,4906,downtown,4906
-0,1,322,good,13,1573,downtown,1573
-2,1,822,good,8,3374,downtown,3374
-0,1,525,good,0,1875,downtown,1875
-3,2,1022,poor,60,4300,westbrae,3956
-0,1,303,poor,48,1179,westbrae,1112.976
-1,1,656,poor,31,1872,westbrae,1830.816
-3,2,1212,poor,16,4906,westbrae,4906
-1,1,743,poor,44,1756,westbrae,1671.712
-1,1,354,good,0,1859,downtown,1859
-1,1,579,poor,51,1550,westbrae,1453.9
-1,1,327,poor,63,1201,westbrae,1097.714
-2,1,550,good,1,3155,downtown,3155
-3,2,914,good,0,4805,downtown,4805
-3,2,1225,poor,56,4504,westbrae,4179.712
-1,1,714,good,8,2135,downtown,2135
-2,1,679,good,1,3251,downtown,3251
-1,1,719,poor,64,1596,alcatraz_ave,1455.552
-0,1,461,great,6,2269,berkeley_hills,2269
-3,2,1097,good,7,4832,thowsand_oaks,4832
-2,1,525,good,3,3058,thowsand_oaks,3058
-3,2,850,good,1,4718,thowsand_oaks,4718
-3,2,1183,poor,23,4801,alcatraz_ave,4772.194
-3,2,915,great,1,5240,berkeley_hills,5240
-3,2,972,good,5,4794,thowsand_oaks,4794
-3,2,903,poor,27,4528,alcatraz_ave,4464.608
-2,1,738,good,11,3277,thowsand_oaks,3277
-1,1,608,poor,59,1477,alcatraz_ave,1361.794
-2,1,908,poor,49,3013,alcatraz_ave,2838.246
-3,2,961,good,9,4764,thowsand_oaks,4764
-0,1,225,good,3,1626,thowsand_oaks,1626
-1,1,635,good,1,2111,thowsand_oaks,2111
-1,1,519,good,8,1956,thowsand_oaks,1956
-2,1,917,great,13,3901,berkeley_hills,3901
-3,2,1094,great,12,5323,berkeley_hills,5323
-0,1,296,great,5,2151,berkeley_hills,2151
-2,1,536,poor,43,2751,alcatraz_ave,2624.454
-3,2,863,great,4,5151,berkeley_hills,5151
-3,2,953,good,10,4682,thowsand_oaks,4682
-0,1,451,poor,60,1234,alcatraz_ave,1135.28
-3,2,1236,good,11,4993,thowsand_oaks,4993
-2,1,624,poor,47,2738,alcatraz_ave,2590.148
-3,2,902,poor,42,4379,alcatraz_ave,4186.324
-0,1,310,good,12,1638,thowsand_oaks,1638
-2,1,882,good,13,3322,thowsand_oaks,3322
-2,1,939,good,12,3437,thowsand_oaks,3437
-2,1,659,good,14,3166,thowsand_oaks,3166
-2,1,740,good,1,3329,thowsand_oaks,3329
-2,1,697,good,3,3240,thowsand_oaks,3240
-1,1,580,poor,45,1593,alcatraz_ave,1513.35
-0,1,102,great,8,1895,berkeley_hills,1895
-2,1,921,poor,53,3026,alcatraz_ave,2826.284
-1,1,380,poor,30,1577,alcatraz_ave,1545.46
-3,2,869,great,2,5152,berkeley_hills,5152
-1,1,727,good,11,2078,thowsand_oaks,2078
-0,1,240,poor,45,1152,alcatraz_ave,1094.4
-0,1,535,good,14,1830,thowsand_oaks,1830
-2,1,677,poor,49,2778,alcatraz_ave,2616.876
-0,1,187,good,0,1566,thowsand_oaks,1566
-2,1,919,good,12,3419,thowsand_oaks,3419
-3,2,1171,poor,25,4775,alcatraz_ave,4727.25
-3,2,1121,good,10,4877,thowsand_oaks,4877
-0,1,124,good,6,1441,thowsand_oaks,1441
-1,1,326,good,9,1782,thowsand_oaks,1782
-0,1,469,good,14,1713,thowsand_oaks,1713
-3,2,1102,poor,64,4297,alcatraz_ave,3918.864
-0,1,379,poor,28,1534,alcatraz_ave,1509.456
-3,2,983,great,14,5145,berkeley_hills,5145
-1,1,686,good,4,2175,thowsand_oaks,2175
-0,1,194,great,10,2042,berkeley_hills,2042
-2,1,890,good,6,3476,thowsand_oaks,3476
-0,1,497,good,4,1877,thowsand_oaks,1877
-3,2,831,good,4,4621,thowsand_oaks,4621
-1,1,472,poor,17,1771,alcatraz_ave,1771
-3,2,1032,poor,51,4394,alcatraz_ave,4121.572
-0,1,398,great,12,2151,berkeley_hills,2151
-3,2,913,great,4,5269,berkeley_hills,5269
-0,1,438,great,8,2273,berkeley_hills,2273
-0,1,220,good,3,1548,thowsand_oaks,1548
-3,2,1009,great,8,5326,berkeley_hills,5326
-1,1,649,great,5,2563,berkeley_hills,2563
-0,1,499,good,5,1839,thowsand_oaks,1839
-2,1,677,good,0,3232,thowsand_oaks,3232
-3,2,926,good,13,4679,thowsand_oaks,4679
-3,2,1005,poor,59,4255,alcatraz_ave,3923.11
-0,1,137,good,9,1479,thowsand_oaks,1479
-3,2,1066,good,13,4774,thowsand_oaks,4774
-0,1,436,good,14,1658,thowsand_oaks,1658
-2,1,773,good,12,3207,thowsand_oaks,3207
-3,2,1038,great,8,5318,berkeley_hills,5318
-0,1,103,good,12,1413,thowsand_oaks,1413
-0,1,498,good,10,1801,thowsand_oaks,1801
-0,1,432,poor,55,1303,alcatraz_ave,1211.79
-0,1,127,poor,61,923,alcatraz_ave,847.314
-1,1,490,great,6,2452,berkeley_hills,2452
-3,2,987,great,6,5298,berkeley_hills,5298
-1,1,435,poor,49,1457,alcatraz_ave,1372.494
-2,1,606,good,11,3068,thowsand_oaks,3068
-1,1,638,poor,41,1774,alcatraz_ave,1699.492
-3,2,944,great,2,5321,berkeley_hills,5321
-1,1,601,poor,47,1637,alcatraz_ave,1548.602
-3,2,1024,poor,17,4745,alcatraz_ave,4745
-1,1,729,poor,63,1554,alcatraz_ave,1420.356
-3,2,1226,great,11,5436,berkeley_hills,5436
-0,1,519,good,12,1760,thowsand_oaks,1760
-0,1,399,good,9,1672,thowsand_oaks,1672
-2,1,665,good,12,3106,thowsand_oaks,3106
-2,1,566,great,9,3569,berkeley_hills,3569
-2,1,549,poor,19,2997,alcatraz_ave,2997
-2,1,934,good,2,3529,thowsand_oaks,3529
-0,1,500,poor,42,1482,alcatraz_ave,1416.792
-0,1,111,great,9,1878,berkeley_hills,1878
-3,2,848,good,6,4599,thowsand_oaks,4599
-0,1,305,good,3,1687,thowsand_oaks,1687
-0,1,361,poor,56,1245,alcatraz_ave,1155.36
-1,1,565,good,6,2038,thowsand_oaks,2038
-0,1,283,poor,54,1115,alcatraz_ave,1039.18
-2,1,505,great,11,3478,berkeley_hills,3478
-0,1,228,great,10,2067,berkeley_hills,2067
-3,2,1119,poor,54,4405,alcatraz_ave,4105.46
-0,1,267,good,2,1635,thowsand_oaks,1635
-3,2,916,good,0,4727,thowsand_oaks,4727
-2,1,903,poor,58,2923,alcatraz_ave,2700.852
-3,2,1046,good,13,4722,thowsand_oaks,4722
-1,1,513,good,11,1924,thowsand_oaks,1924
-3,2,1211,good,1,5043,thowsand_oaks,5043
-0,1,105,poor,28,1207,alcatraz_ave,1187.688
-2,1,553,good,1,3177,thowsand_oaks,3177
-3,2,1044,good,9,4847,thowsand_oaks,4847
-0,1,492,poor,64,1265,alcatraz_ave,1153.68
-2,1,847,good,7,3419,thowsand_oaks,3419
-0,1,232,poor,62,987,alcatraz_ave,904.092
-2,1,774,great,3,3875,berkeley_hills,3875
-2,1,582,poor,60,2591,alcatraz_ave,2383.72
-2,1,652,good,0,3212,thowsand_oaks,3212
-2,1,538,good,10,2988,thowsand_oaks,2988
-0,1,391,poor,53,1245,alcatraz_ave,1162.83
-1,1,723,great,3,2649,berkeley_hills,2649
-3,2,1043,great,2,5356,berkeley_hills,5356
-3,2,893,poor,38,4391,alcatraz_ave,4232.924
-2,1,608,good,9,3109,thowsand_oaks,3109
-0,1,511,great,12,2242,berkeley_hills,2242
-2,1,791,great,1,3870,berkeley_hills,3870
-2,1,792,poor,33,3014,alcatraz_ave,2935.636
-0,1,110,poor,20,1315,alcatraz_ave,1315
-1,1,616,poor,17,1943,alcatraz_ave,1943
-2,1,505,good,2,3080,thowsand_oaks,3080
-1,1,411,good,8,1810,thowsand_oaks,1810
-2,1,846,good,12,3282,thowsand_oaks,3282
-3,2,965,good,4,4811,thowsand_oaks,4811
-1,1,440,good,6,1847,thowsand_oaks,1847
-1,1,386,poor,56,1345,alcatraz_ave,1248.16
-3,2,1247,good,11,4974,thowsand_oaks,4974
-3,2,1058,great,11,5336,berkeley_hills,5336
-2,1,587,poor,63,2572,alcatraz_ave,2350.808
-0,1,145,great,1,2049,berkeley_hills,2049
-3,2,928,poor,47,4324,alcatraz_ave,4090.504
-2,1,735,good,4,3288,thowsand_oaks,3288
-1,1,433,poor,51,1430,alcatraz_ave,1341.34
-3,2,1009,good,10,4760,thowsand_oaks,4760
-0,1,369,poor,42,1308,alcatraz_ave,1250.448
-1,1,654,poor,36,1841,alcatraz_ave,1782.088
-0,1,144,good,5,1531,thowsand_oaks,1531
-0,1,425,great,13,2162,berkeley_hills,2162
-1,1,509,poor,20,1765,alcatraz_ave,1765
-2,1,615,good,9,3085,thowsand_oaks,3085
-1,1,519,great,6,2453,berkeley_hills,2453
-3,2,1235,great,1,5602,berkeley_hills,5602
-0,1,434,good,0,1800,thowsand_oaks,1800
-3,2,1028,poor,15,4693,alcatraz_ave,4693
-2,1,852,good,12,3369,thowsand_oaks,3369
-3,2,1039,good,1,4887,thowsand_oaks,4887
-3,2,1020,great,5,5319,berkeley_hills,5319
-0,1,354,great,13,2162,berkeley_hills,2162
-3,2,1034,great,12,5259,berkeley_hills,5259
-3,2,800,good,2,4618,thowsand_oaks,4618
-2,1,815,good,0,3436,thowsand_oaks,3436
-0,1,350,poor,61,1096,alcatraz_ave,1006.128
-0,1,489,great,6,2307,berkeley_hills,2307
-1,1,744,poor,37,1919,alcatraz_ave,1853.754
-3,2,874,good,1,4671,thowsand_oaks,4671
-0,1,392,great,7,2205,berkeley_hills,2205
-0,1,485,great,12,2285,berkeley_hills,2285
-0,1,500,poor,27,1632,alcatraz_ave,1609.152
-3,2,1239,poor,24,4876,alcatraz_ave,4836.992
-1,1,702,good,13,2068,thowsand_oaks,2068
-0,1,467,good,1,1869,thowsand_oaks,1869
-2,1,564,good,7,3119,thowsand_oaks,3119
-1,1,560,great,2,2497,berkeley_hills,2497
-0,1,219,good,8,1529,thowsand_oaks,1529
-3,2,1154,great,11,5396,berkeley_hills,5396
-1,1,309,good,2,1783,thowsand_oaks,1783
-2,1,655,poor,32,2921,alcatraz_ave,2850.896
-1,1,338,good,3,1835,thowsand_oaks,1835
-3,2,908,good,9,4620,thowsand_oaks,4620
-1,1,518,poor,60,1385,alcatraz_ave,1274.2
-0,1,279,good,11,1601,thowsand_oaks,1601
-0,1,267,good,7,1580,thowsand_oaks,1580
-2,1,587,good,14,3062,thowsand_oaks,3062
-1,1,648,poor,62,1557,alcatraz_ave,1426.212
-2,1,845,poor,37,3100,alcatraz_ave,2994.6
-2,1,549,great,6,3575,berkeley_hills,3575
-1,1,660,good,8,2092,thowsand_oaks,2092
-2,1,585,good,10,3060,thowsand_oaks,3060
-0,1,279,good,3,1681,thowsand_oaks,1681
-0,1,538,poor,43,1490,alcatraz_ave,1421.46
-2,1,932,poor,53,3006,alcatraz_ave,2807.604
-2,1,814,great,5,3868,berkeley_hills,3868
-2,1,709,poor,27,3018,alcatraz_ave,2975.748
-2,1,896,good,5,3483,thowsand_oaks,3483
-3,2,1140,good,10,4853,thowsand_oaks,4853
-3,2,1065,poor,32,4633,alcatraz_ave,4521.808
-2,1,735,poor,33,2980,alcatraz_ave,2902.52
-2,1,613,poor,39,2847,alcatraz_ave,2738.814
-1,1,666,poor,38,1775,alcatraz_ave,1711.1
-1,1,300,good,5,1739,thowsand_oaks,1739
-3,2,959,great,3,5286,berkeley_hills,5286
-0,1,135,great,1,2026,berkeley_hills,2026
-1,1,568,poor,32,1722,alcatraz_ave,1680.672
-2,1,824,poor,47,2962,alcatraz_ave,2802.052
-1,1,493,poor,24,1798,alcatraz_ave,1783.616
-2,1,666,great,10,3671,berkeley_hills,3671
-1,1,432,good,14,1782,thowsand_oaks,1782
-2,1,768,great,7,3795,berkeley_hills,3795
-0,1,223,poor,57,1068,alcatraz_ave,988.968
-1,1,568,poor,15,1885,alcatraz_ave,1885
-0,1,503,good,9,1839,thowsand_oaks,1839
-3,2,872,poor,53,4211,alcatraz_ave,3933.074
-1,1,407,great,3,2338,berkeley_hills,2338
-0,1,150,great,3,2041,berkeley_hills,2041
-1,1,701,good,10,2074,thowsand_oaks,2074
-3,2,811,poor,47,4193,alcatraz_ave,3966.578
-3,2,1086,poor,25,4639,alcatraz_ave,4592.61
-3,2,1002,good,2,4824,thowsand_oaks,4824
-1,1,618,poor,16,1954,alcatraz_ave,1954
-2,1,884,great,9,3855,berkeley_hills,3855
-3,2,1125,good,8,4895,thowsand_oaks,4895
-1,1,320,good,1,1789,thowsand_oaks,1789
-2,1,853,good,12,3336,thowsand_oaks,3336
-1,1,399,great,12,2236,berkeley_hills,2236
-3,2,980,great,8,5239,berkeley_hills,5239
-0,1,400,poor,24,1527,alcatraz_ave,1514.784
-2,1,836,great,5,3927,berkeley_hills,3927
-3,2,1175,poor,57,4465,alcatraz_ave,4134.59
-3,2,1094,good,6,4883,thowsand_oaks,4883
-2,1,925,good,13,3355,thowsand_oaks,3355
-2,1,736,poor,60,2708,alcatraz_ave,2491.36
-2,1,729,good,9,3280,thowsand_oaks,3280
-1,1,540,poor,60,1463,alcatraz_ave,1345.96
-3,2,858,poor,31,4366,alcatraz_ave,4269.948
-1,1,570,great,13,2421,berkeley_hills,2421
-2,1,664,great,0,3749,berkeley_hills,3749
-0,1,378,good,13,1653,thowsand_oaks,1653
-1,1,670,good,3,2149,thowsand_oaks,2149
-0,1,474,great,4,2382,berkeley_hills,2382
-3,2,1157,great,10,5453,berkeley_hills,5453
-1,1,417,great,11,2276,berkeley_hills,2276
-0,1,197,good,7,1576,thowsand_oaks,1576
-1,1,668,good,0,2191,thowsand_oaks,2191
-0,1,123,good,1,1485,thowsand_oaks,1485
-3,2,875,poor,58,4096,alcatraz_ave,3784.704
-3,2,1131,good,6,4957,thowsand_oaks,4957
-1,1,339,poor,22,1660,alcatraz_ave,1653.36
-2,1,921,good,5,3473,thowsand_oaks,3473
-3,2,894,poor,57,4200,alcatraz_ave,3889.2
-1,1,594,poor,57,1487,alcatraz_ave,1376.962
-2,1,935,great,14,3884,berkeley_hills,3884
-0,1,523,poor,38,1561,alcatraz_ave,1504.804
-0,1,535,poor,45,1451,alcatraz_ave,1378.45
-1,1,585,poor,56,1509,alcatraz_ave,1400.352
-1,1,385,poor,28,1639,alcatraz_ave,1612.776
-2,1,578,poor,16,3065,alcatraz_ave,3065
-1,1,678,good,14,2081,thowsand_oaks,2081
-0,1,493,poor,24,1636,alcatraz_ave,1622.912
-2,1,874,good,8,3359,thowsand_oaks,3359
-1,1,328,great,0,2285,berkeley_hills,2285
-0,1,549,poor,59,1397,alcatraz_ave,1288.034
-1,1,519,good,1,2014,thowsand_oaks,2014
-2,1,554,good,13,2996,thowsand_oaks,2996
-0,1,283,great,10,2072,berkeley_hills,2072
-1,1,557,good,14,1960,thowsand_oaks,1960
-0,1,148,great,6,2028,berkeley_hills,2028
-1,1,560,good,9,1992,thowsand_oaks,1992
-3,2,824,poor,49,4217,alcatraz_ave,3972.414
-0,1,177,good,7,1471,thowsand_oaks,1471
-2,1,776,great,2,3881,berkeley_hills,3881
-1,1,332,poor,15,1703,alcatraz_ave,1703
-1,1,722,great,13,2633,berkeley_hills,2633
-2,1,641,great,3,3751,berkeley_hills,3751
-3,2,829,poor,16,4508,alcatraz_ave,4508
-2,1,647,poor,19,3028,alcatraz_ave,3028
-2,1,619,good,5,3208,thowsand_oaks,3208
-0,1,188,good,7,1477,thowsand_oaks,1477
-3,2,1146,good,14,4836,thowsand_oaks,4836
-1,1,338,good,10,1783,thowsand_oaks,1783
-3,2,1232,good,0,5124,thowsand_oaks,5124
-3,2,1054,great,8,5335,berkeley_hills,5335
-2,1,916,great,3,3974,berkeley_hills,3974
-0,1,169,poor,24,1361,alcatraz_ave,1350.112
-1,1,431,good,13,1775,thowsand_oaks,1775
-3,2,840,great,9,5130,berkeley_hills,5130
-3,2,926,good,0,4820,thowsand_oaks,4820
-3,2,1245,great,8,5506,berkeley_hills,5506
-2,1,694,poor,41,2874,alcatraz_ave,2753.292
-3,2,881,great,0,5199,berkeley_hills,5199
-3,2,1199,poor,62,4418,alcatraz_ave,4046.888
-2,1,913,great,12,3901,berkeley_hills,3901
-0,1,409,poor,60,1190,alcatraz_ave,1094.8
-1,1,627,great,13,2505,berkeley_hills,2505
-1,1,510,great,10,2438,berkeley_hills,2438
-3,2,1218,poor,20,4906,alcatraz_ave,4906
-2,1,778,poor,34,3057,alcatraz_ave,2971.404
-3,2,891,good,12,4644,thowsand_oaks,4644
-2,1,587,poor,27,2928,alcatraz_ave,2887.008
-3,2,1020,good,4,4825,thowsand_oaks,4825
-3,2,1226,good,11,4936,thowsand_oaks,4936
-2,1,696,good,10,3182,thowsand_oaks,3182
-1,1,486,great,4,2412,berkeley_hills,2412
-1,1,426,good,12,1810,thowsand_oaks,1810
-2,1,845,poor,32,3121,alcatraz_ave,3046.096
-1,1,689,good,5,2089,thowsand_oaks,2089
-2,1,918,great,10,3929,berkeley_hills,3929
-3,2,1104,poor,43,4573,alcatraz_ave,4362.642
-3,2,1182,good,11,4944,thowsand_oaks,4944
-1,1,461,good,2,1965,thowsand_oaks,1965
-0,1,461,good,1,1819,thowsand_oaks,1819
-3,2,1206,good,2,5015,thowsand_oaks,5015
-0,1,237,poor,21,1409,alcatraz_ave,1406.182
-1,1,451,poor,41,1520,alcatraz_ave,1456.16
-2,1,903,poor,26,3193,alcatraz_ave,3154.684
-2,1,560,good,5,3089,thowsand_oaks,3089
-1,1,406,poor,55,1397,alcatraz_ave,1299.21
-0,1,547,poor,29,1641,alcatraz_ave,1611.462
-1,1,533,good,1,2000,thowsand_oaks,2000
-1,1,588,poor,50,1623,alcatraz_ave,1525.62
-1,1,471,poor,53,1450,northwest,1354.3
-3,2,1066,great,8,5301,east_elmwood,5301
-1,1,503,poor,17,1860,northwest,1860
-3,2,1024,good,14,4704,west_welmwood,4704
-0,1,323,poor,59,1127,northwest,1039.094
-1,1,687,poor,61,1616,northwest,1483.488
-3,2,995,good,2,4861,west_welmwood,4861
-0,1,357,good,3,1701,west_welmwood,1701
-2,1,723,poor,15,3170,northwest,3170
-3,2,841,good,11,4609,west_welmwood,4609
-0,1,276,great,4,2104,east_elmwood,2104
-3,2,1077,poor,62,4344,northwest,3979.104
-0,1,304,good,0,1683,west_welmwood,1683
-3,2,990,good,6,4820,west_welmwood,4820
-3,2,863,good,12,4600,west_welmwood,4600
-0,1,119,poor,25,1264,northwest,1251.36
-0,1,234,good,12,1492,west_welmwood,1492
-0,1,157,good,1,1536,west_welmwood,1536
-0,1,524,great,10,2358,east_elmwood,2358
-0,1,502,poor,47,1436,northwest,1358.456
-1,1,449,poor,51,1472,northwest,1380.736
-2,1,592,good,12,3074,west_welmwood,3074
-1,1,387,good,11,1813,west_welmwood,1813
-2,1,873,good,3,3435,west_welmwood,3435
-3,2,827,good,0,4632,west_welmwood,4632
-1,1,593,poor,63,1505,northwest,1375.57
-1,1,479,good,2,1995,west_welmwood,1995
-2,1,788,good,0,3396,west_welmwood,3396
-1,1,737,good,9,2146,west_welmwood,2146
-1,1,467,good,2,1979,west_welmwood,1979
-0,1,424,good,1,1813,west_welmwood,1813
-1,1,471,great,5,2402,east_elmwood,2402
-1,1,547,good,5,2040,west_welmwood,2040
-1,1,328,poor,35,1512,northwest,1466.64
-0,1,154,good,9,1432,west_welmwood,1432
-2,1,701,poor,31,3003,northwest,2936.934
-3,2,1169,poor,31,4758,northwest,4653.324
-3,2,1024,good,1,4856,west_welmwood,4856
-1,1,633,great,12,2550,east_elmwood,2550
-0,1,501,good,10,1825,west_welmwood,1825
-2,1,771,good,0,3340,west_welmwood,3340
-1,1,730,poor,59,1641,northwest,1513.002
-0,1,545,good,0,1980,west_welmwood,1980
-2,1,667,great,9,3647,east_elmwood,3647
-0,1,513,good,10,1779,west_welmwood,1779
-3,2,1152,good,12,4870,west_welmwood,4870
-0,1,333,good,2,1748,west_welmwood,1748
-2,1,919,good,2,3450,west_welmwood,3450
-1,1,372,poor,64,1261,northwest,1150.032
-1,1,671,poor,35,1823,northwest,1768.31
-0,1,319,good,0,1680,west_welmwood,1680
-3,2,867,poor,63,4060,northwest,3710.84
-0,1,320,good,12,1594,west_welmwood,1594
-1,1,594,great,8,2484,east_elmwood,2484
-2,1,882,good,14,3389,west_welmwood,3389
-3,2,1062,poor,56,4374,northwest,4059.072
-3,2,823,poor,59,4046,northwest,3730.412
-2,1,874,good,6,3450,west_welmwood,3450
-2,1,687,poor,30,2947,northwest,2888.06
-2,1,621,good,3,3150,west_welmwood,3150
-2,1,705,great,2,3812,east_elmwood,3812
-1,1,658,poor,41,1741,northwest,1667.878
-1,1,314,poor,58,1211,northwest,1118.964
-2,1,831,good,2,3439,west_welmwood,3439
-0,1,290,poor,59,1068,northwest,984.696
-0,1,398,poor,31,1499,northwest,1466.022
-2,1,729,good,12,3195,west_welmwood,3195
-0,1,238,poor,17,1471,northwest,1471
-0,1,538,good,10,1863,west_welmwood,1863
-2,1,631,poor,23,3019,northwest,3000.886
-0,1,542,poor,45,1474,northwest,1400.3
-3,2,883,poor,34,4365,northwest,4242.78
-2,1,703,poor,46,2811,northwest,2664.828
-1,1,441,poor,55,1396,northwest,1298.28
-3,2,1231,poor,62,4425,northwest,4053.3
-1,1,692,great,8,2590,east_elmwood,2590
-0,1,540,good,7,1834,west_welmwood,1834
-1,1,326,good,3,1773,west_welmwood,1773
-2,1,839,great,9,3816,east_elmwood,3816
-3,2,1173,great,4,5445,east_elmwood,5445
-1,1,569,poor,17,1937,northwest,1937
-2,1,622,great,10,3635,east_elmwood,3635
-2,1,716,poor,17,3183,northwest,3183
-1,1,548,good,12,1973,west_welmwood,1973
-2,1,591,poor,28,2954,northwest,2906.736
-3,2,868,poor,22,4453,northwest,4435.188
-2,1,853,poor,27,3213,northwest,3168.018
-1,1,733,great,2,2760,east_elmwood,2760
-2,1,710,good,14,3196,west_welmwood,3196
-0,1,151,good,0,1535,west_welmwood,1535
-3,2,1013,poor,25,4624,northwest,4577.76
-0,1,289,poor,54,1157,northwest,1078.324
-0,1,440,good,12,1746,west_welmwood,1746
-2,1,714,good,11,3202,west_welmwood,3202
-3,2,931,good,1,4762,west_welmwood,4762
-3,2,938,poor,19,4634,northwest,4634
-3,2,1191,great,0,5558,east_elmwood,5558
-1,1,495,good,6,1945,west_welmwood,1945
-3,2,1077,poor,20,4722,northwest,4722
-2,1,782,great,11,3744,east_elmwood,3744
-3,2,1099,good,7,4893,west_welmwood,4893
-0,1,398,great,13,2128,east_elmwood,2128
-1,1,385,good,13,1775,west_welmwood,1775
-0,1,213,good,13,1526,west_welmwood,1526
-0,1,180,poor,37,1199,northwest,1158.234
-3,2,979,poor,34,4456,northwest,4331.232
-2,1,757,good,5,3312,west_welmwood,3312
-1,1,673,great,9,2619,east_elmwood,2619
-1,1,438,good,9,1798,west_welmwood,1798
-0,1,110,great,2,2037,east_elmwood,2037
-1,1,577,good,5,2017,west_welmwood,2017
-2,1,905,good,4,3459,west_welmwood,3459
-3,2,856,good,4,4712,west_welmwood,4712
-3,2,1082,great,12,5273,east_elmwood,5273
-0,1,174,good,14,1429,west_welmwood,1429
-1,1,435,poor,60,1303,northwest,1198.76
-2,1,765,poor,38,2952,northwest,2845.728
-1,1,628,good,10,2040,west_welmwood,2040
-1,1,735,great,10,2621,east_elmwood,2621
-1,1,568,good,14,1947,west_welmwood,1947
-1,1,625,good,10,2008,west_welmwood,2008
-2,1,846,good,2,3470,west_welmwood,3470
-2,1,518,good,3,3094,west_welmwood,3094
-3,2,1185,poor,53,4487,northwest,4190.858
-0,1,531,good,7,1906,west_welmwood,1906
-3,2,1245,good,3,5057,west_welmwood,5057
-0,1,384,poor,36,1416,northwest,1370.688
-3,2,940,good,9,4741,west_welmwood,4741
-1,1,740,good,1,2265,west_welmwood,2265
-1,1,307,great,0,2348,east_elmwood,2348
-2,1,548,good,10,3086,west_welmwood,3086
-0,1,461,great,1,2390,east_elmwood,2390
-3,2,1145,good,4,4947,west_welmwood,4947
-0,1,522,good,7,1827,west_welmwood,1827
-0,1,324,good,14,1570,west_welmwood,1570
-2,1,528,poor,34,2823,northwest,2743.956
-0,1,498,poor,63,1259,northwest,1150.726
-1,1,725,good,8,2100,west_welmwood,2100
-2,1,612,good,7,3183,west_welmwood,3183
-1,1,496,poor,59,1430,northwest,1318.46
-2,1,769,poor,49,2845,northwest,2679.99
-3,2,1034,good,6,4811,west_welmwood,4811
-2,1,646,poor,57,2718,northwest,2516.868
-3,2,901,good,12,4635,west_welmwood,4635
-3,2,1167,good,11,4895,west_welmwood,4895
-0,1,405,poor,45,1319,northwest,1253.05
-3,2,1242,good,14,4936,west_welmwood,4936
-1,1,599,good,3,2084,west_welmwood,2084
-1,1,587,good,3,2101,west_welmwood,2101
-2,1,810,good,3,3398,west_welmwood,3398
-1,1,709,good,9,2161,west_welmwood,2161
-0,1,497,good,0,1906,west_welmwood,1906
-1,1,305,good,0,1794,west_welmwood,1794
-2,1,667,good,6,3220,west_welmwood,3220
-3,2,895,good,11,4663,west_welmwood,4663
-3,2,1161,good,9,4906,west_welmwood,4906
-0,1,387,poor,29,1501,northwest,1473.982
-1,1,503,good,0,2014,west_welmwood,2014
-2,1,856,great,4,3881,east_elmwood,3881
-1,1,444,great,11,2327,east_elmwood,2327
-0,1,207,good,3,1563,west_welmwood,1563
-1,1,676,good,1,2204,west_welmwood,2204
-3,2,1059,poor,25,4689,northwest,4642.11
-1,1,573,great,9,2436,east_elmwood,2436
-0,1,285,good,13,1595,west_welmwood,1595
-0,1,490,good,1,1864,west_welmwood,1864
-3,2,824,good,3,4631,west_welmwood,4631
-0,1,234,good,7,1562,west_welmwood,1562
-2,1,920,poor,15,3354,northwest,3354
-3,2,966,good,14,4689,west_welmwood,4689
-0,1,423,good,12,1725,west_welmwood,1725
-2,1,932,great,2,3980,east_elmwood,3980
-0,1,419,poor,28,1506,northwest,1481.904
-2,1,752,good,5,3342,west_welmwood,3342
-2,1,671,poor,51,2775,northwest,2602.95
-2,1,628,poor,51,2671,northwest,2505.398
-3,2,1058,good,12,4808,west_welmwood,4808
-2,1,668,poor,62,2605,northwest,2386.18
-0,1,131,poor,37,1125,northwest,1086.75
-0,1,269,good,9,1565,west_welmwood,1565
-1,1,663,good,9,2083,west_welmwood,2083
-3,2,1219,poor,34,4769,northwest,4635.468
-1,1,536,poor,41,1627,northwest,1558.666
-0,1,215,good,11,1532,west_welmwood,1532
-1,1,558,poor,51,1500,northwest,1407
-3,2,1242,great,1,5577,east_elmwood,5577
-1,1,672,poor,30,1845,northwest,1808.1
-1,1,497,good,4,1940,west_welmwood,1940
-1,1,598,great,2,2549,east_elmwood,2549
-3,2,1134,good,0,4959,west_welmwood,4959
-3,2,1174,great,11,5380,east_elmwood,5380
-1,1,476,poor,30,1634,northwest,1601.32
-0,1,228,great,1,2129,east_elmwood,2129
-0,1,389,great,12,2136,east_elmwood,2136
-0,1,489,great,7,2285,east_elmwood,2285
-1,1,352,poor,48,1357,northwest,1281.008
-0,1,544,good,7,1923,west_welmwood,1923
-1,1,604,poor,29,1776,northwest,1744.032
-1,1,528,good,5,2023,west_welmwood,2023
-3,2,1036,great,12,5305,east_elmwood,5305
-3,2,1060,poor,42,4462,northwest,4265.672
-2,1,872,good,2,3433,west_welmwood,3433
-1,1,477,good,4,1894,west_welmwood,1894
-2,1,933,poor,16,3398,northwest,3398
-1,1,511,good,10,1941,west_welmwood,1941
-0,1,228,poor,19,1443,northwest,1443
-2,1,819,poor,55,2869,northwest,2668.17
-1,1,556,great,10,2447,east_elmwood,2447
-0,1,318,poor,33,1376,northwest,1340.224
-3,2,814,good,3,4680,west_welmwood,4680
-3,2,1215,poor,48,4631,northwest,4371.664
-0,1,190,poor,44,1195,northwest,1137.64
-2,1,594,good,1,3217,west_welmwood,3217
-0,1,310,poor,39,1358,northwest,1306.396
-2,1,629,great,7,3690,east_elmwood,3690
-3,2,957,good,7,4687,west_welmwood,4687
-3,2,1056,great,10,5328,east_elmwood,5328
-2,1,741,good,0,3370,west_welmwood,3370
-1,1,447,good,10,1889,west_welmwood,1889
-2,1,772,great,0,3839,east_elmwood,3839
-2,1,669,poor,32,2932,northwest,2861.632
-1,1,485,poor,39,1613,northwest,1551.706
-1,1,462,great,5,2387,east_elmwood,2387
-1,1,430,great,6,2332,east_elmwood,2332
-3,2,1183,good,7,4920,west_welmwood,4920
-3,2,875,poor,27,4471,northwest,4408.406
-1,1,496,good,14,1839,west_welmwood,1839
-3,2,993,great,6,5290,east_elmwood,5290
-0,1,446,poor,38,1448,northwest,1395.872
-0,1,431,good,1,1855,west_welmwood,1855
-0,1,300,great,1,2228,east_elmwood,2228
-0,1,304,good,5,1610,west_welmwood,1610
-2,1,748,poor,38,3003,northwest,2894.892
-0,1,182,poor,49,1116,northwest,1051.272
-0,1,198,good,1,1577,west_welmwood,1577
-3,2,1129,poor,52,4475,northwest,4188.6
-3,2,1005,good,2,4807,west_welmwood,4807
-2,1,509,great,14,3502,east_elmwood,3502
-2,1,731,poor,29,3058,northwest,3002.956
-2,1,809,poor,45,2996,northwest,2846.2
-0,1,232,poor,44,1239,northwest,1179.528
-0,1,247,poor,27,1422,northwest,1402.092
-1,1,682,great,4,2689,east_elmwood,2689
-1,1,493,poor,34,1617,northwest,1571.724
-1,1,536,good,2,2043,west_welmwood,2043
-0,1,513,poor,59,1343,northwest,1238.246
-3,2,1218,good,1,5076,west_welmwood,5076
-3,2,1072,good,4,4875,west_welmwood,4875
-2,1,778,good,0,3362,west_welmwood,3362
-2,1,773,good,12,3290,west_welmwood,3290
-0,1,372,great,1,2268,east_elmwood,2268
-3,2,1146,poor,31,4694,northwest,4590.732
-3,2,904,good,7,4670,west_welmwood,4670
-0,1,263,good,6,1621,west_welmwood,1621
-2,1,784,good,5,3295,west_welmwood,3295
-1,1,669,great,13,2555,east_elmwood,2555
-0,1,488,good,4,1891,west_welmwood,1891
-0,1,231,good,11,1510,west_welmwood,1510
-2,1,849,good,2,3399,west_welmwood,3399
-2,1,621,good,3,3227,west_welmwood,3227
-1,1,483,good,8,1884,west_welmwood,1884
-0,1,243,good,5,1561,west_welmwood,1561
-2,1,840,good,10,3349,west_welmwood,3349
-3,2,1141,good,11,4852,west_welmwood,4852
-0,1,311,poor,40,1295,northwest,1243.2
-3,2,832,great,3,5146,east_elmwood,5146
-2,1,546,good,9,3092,west_welmwood,3092
-0,1,483,good,10,1801,west_welmwood,1801
-1,1,390,poor,63,1301,northwest,1189.114
-1,1,473,poor,37,1636,northwest,1580.376
-3,2,1080,good,3,4884,west_welmwood,4884
-2,1,815,poor,37,3029,northwest,2926.014
-0,1,289,poor,22,1430,northwest,1424.28
-1,1,372,good,11,1797,west_welmwood,1797
-2,1,949,great,10,3939,east_elmwood,3939
-2,1,754,poor,31,3015,northwest,2948.67
-0,1,105,poor,53,926,northwest,864.884
-2,1,848,good,11,3306,west_welmwood,3306
-1,1,737,poor,35,1901,northwest,1843.97
-2,1,856,poor,26,3175,northwest,3136.9
-3,2,953,poor,46,4294,northwest,4070.712
-0,1,449,poor,45,1435,northwest,1363.25
-3,2,1134,good,7,4922,west_welmwood,4922
-2,1,601,good,0,3153,west_welmwood,3153
-2,1,743,good,10,3230,west_welmwood,3230
-1,1,436,good,9,1850,west_welmwood,1850
-0,1,280,good,1,1701,west_welmwood,1701
-2,1,923,good,3,3522,west_welmwood,3522
-0,1,364,great,5,2237,east_elmwood,2237
-0,1,246,good,8,1608,west_welmwood,1608
-0,1,202,poor,23,1399,northwest,1390.606
-0,1,489,good,12,1738,west_welmwood,1738
-1,1,517,good,8,1963,west_welmwood,1963
-0,1,300,good,6,1661,west_welmwood,1661
-2,1,587,good,2,3132,west_welmwood,3132
-1,1,350,good,1,1852,west_welmwood,1852
-2,1,776,good,1,3354,west_welmwood,3354
-0,1,142,good,12,1418,west_welmwood,1418
-2,1,802,great,1,3885,east_elmwood,3885
-0,1,454,poor,17,1662,northwest,1662
-1,1,524,poor,64,1380,northwest,1258.56
-2,1,553,great,14,3485,east_elmwood,3485
-3,2,1139,great,13,5397,east_elmwood,5397
-2,1,820,good,8,3324,west_welmwood,3324
-0,1,505,good,14,1738,west_welmwood,1738
-0,1,340,poor,57,1190,northwest,1101.94
-3,2,1189,good,8,4956,west_welmwood,4956
-3,2,1165,great,0,5525,east_elmwood,5525
-0,1,400,poor,38,1407,northwest,1356.348
-3,2,1214,great,14,5405,east_elmwood,5405
-1,1,493,good,13,1846,west_welmwood,1846
-0,1,344,poor,64,1078,northwest,983.136
-0,1,367,poor,57,1230,northwest,1138.98
-2,1,864,great,5,3935,east_elmwood,3935
-1,1,384,poor,36,1499,northwest,1451.032
-2,1,948,good,3,3549,west_welmwood,3549
-0,1,390,poor,32,1479,northwest,1443.504
-0,1,435,good,12,1683,west_welmwood,1683
-1,1,725,great,10,2672,east_elmwood,2672
-0,1,456,poor,39,1418,northwest,1364.116
-2,1,788,good,5,3295,west_welmwood,3295
-2,1,810,good,10,3331,west_welmwood,3331
-0,1,456,good,13,1721,west_welmwood,1721
-1,1,349,great,0,2333,east_elmwood,2333
-3,2,1065,good,3,4848,west_welmwood,4848
-2,1,623,poor,36,2911,northwest,2817.848
-1,1,543,good,8,1952,west_welmwood,1952
-3,2,1047,good,8,4816,west_welmwood,4816
-0,1,189,poor,63,967,northwest,883.838
-1,1,323,great,4,2239,east_elmwood,2239
-0,1,221,poor,20,1454,northwest,1454
-3,2,1022,poor,20,4672,northwest,4672
-1,1,539,good,10,1987,west_welmwood,1987
-1,1,619,poor,29,1848,northwest,1814.736
-2,1,742,poor,42,2971,northwest,2840.276
-0,1,481,poor,45,1471,northwest,1397.45
-3,2,1246,great,10,5540,east_elmwood,5540
-1,1,530,great,1,2546,east_elmwood,2546
-2,1,536,poor,37,2790,northwest,2695.14
-3,2,1193,great,13,5424,east_elmwood,5424
-1,1,347,good,3,1806,west_welmwood,1806
-3,2,970,good,14,4640,west_welmwood,4640
-3,2,1081,poor,26,4643,northwest,4587.284
-3,2,885,good,4,4701,west_welmwood,4701
-3,2,803,great,1,5129,east_elmwood,5129
-2,1,785,poor,25,3177,northwest,3145.23
-3,2,1042,poor,36,4498,northwest,4354.064
-2,1,576,good,1,3173,west_welmwood,3173
-0,1,412,poor,41,1365,northwest,1307.67
-3,2,1189,good,8,4996,west_welmwood,4996
-2,1,771,great,2,3826,east_elmwood,3826
-2,1,847,poor,59,2876,northwest,2651.672
-3,2,1114,great,9,5355,east_elmwood,5355
-2,1,618,great,2,3746,east_elmwood,3746
-0,1,424,good,7,1732,west_welmwood,1732
-0,1,494,good,12,1786,west_welmwood,1786
-3,2,1113,great,3,5447,east_elmwood,5447
-3,2,1155,great,10,5451,east_elmwood,5451
-0,1,123,good,0,1527,west_welmwood,1527
-0,1,264,poor,62,1037,northwest,949.892
-2,1,864,good,3,3456,west_welmwood,3456
-1,1,440,good,3,1894,west_welmwood,1894
-3,2,1023,poor,54,4351,northwest,4055.132
-1,1,600,good,5,2020,west_welmwood,2020
-1,1,671,good,5,2156,west_welmwood,2156
-1,1,400,good,11,1789,west_welmwood,1789
-3,2,813,great,4,5096,east_elmwood,5096
-2,1,563,good,8,3096,west_welmwood,3096
-1,1,304,good,9,1759,west_welmwood,1759
-2,1,783,poor,26,3143,northwest,3105.284
-1,1,698,poor,39,1845,northwest,1774.89
-2,1,935,great,13,3903,east_elmwood,3903
-1,1,711,good,13,2033,west_welmwood,2033
-3,2,926,good,1,4723,west_welmwood,4723
-3,2,901,poor,31,4471,northwest,4372.638
-3,2,1002,poor,39,4485,northwest,4314.57
-1,1,442,good,0,1929,west_welmwood,1929
-0,1,483,poor,55,1337,northwest,1243.41
-0,1,191,poor,51,1077,northwest,1010.226
-3,2,882,poor,29,4466,northwest,4385.612
-1,1,531,poor,28,1739,northwest,1711.176
-1,1,495,good,4,1938,west_welmwood,1938
-3,2,857,poor,53,4161,northwest,3886.374
-1,1,561,poor,47,1543,northwest,1459.678
-0,1,256,poor,21,1399,northwest,1396.202
-2,1,643,good,14,3123,west_welmwood,3123
-3,2,1112,good,14,4782,west_welmwood,4782
-3,2,1153,good,13,4882,west_welmwood,4882
-2,1,532,great,10,3509,east_elmwood,3509
-2,1,519,poor,53,2588,northwest,2417.192
-1,1,661,poor,61,1591,northwest,1460.538
-0,1,226,poor,56,1085,northwest,1006.88
-0,1,286,great,6,2129,east_elmwood,2129
-0,1,205,good,9,1508,west_welmwood,1508
-0,1,231,good,1,1575,west_welmwood,1575
-3,2,1103,good,12,4872,west_welmwood,4872
-1,1,586,poor,48,1577,northwest,1488.688
-2,1,552,good,9,3046,west_welmwood,3046
-2,1,927,poor,26,3240,northwest,3201.12
-1,1,395,poor,60,1325,northwest,1219
-0,1,220,great,10,2010,east_elmwood,2010
-3,2,1054,good,11,4794,west_welmwood,4794
-1,1,611,good,0,2151,west_welmwood,2151
-0,1,128,good,14,1343,west_welmwood,1343
-3,2,813,great,13,5004,east_elmwood,5004
-3,2,1041,poor,48,4394,northwest,4147.936
-0,1,316,great,9,2138,east_elmwood,2138
-3,2,853,good,14,4546,west_welmwood,4546
-2,1,662,poor,61,2610,northwest,2395.98
-3,2,831,great,9,5102,east_elmwood,5102
-2,1,897,good,3,3432,west_welmwood,3432
-3,2,1066,good,13,4777,west_welmwood,4777
-1,1,663,poor,53,1650,northwest,1541.1
-0,1,463,poor,33,1492,northwest,1453.208
-0,1,442,great,14,2174,east_elmwood,2174
-0,1,226,good,5,1583,west_welmwood,1583
-1,1,519,poor,59,1391,northwest,1282.502
-0,1,438,good,11,1739,west_welmwood,1739
-2,1,663,good,1,3234,west_welmwood,3234
-1,1,323,great,9,2229,east_elmwood,2229
-3,2,898,great,10,5150,east_elmwood,5150
-3,2,1097,poor,56,4358,northwest,4044.224
-3,2,1094,good,7,4862,west_welmwood,4862
-3,2,1245,great,13,5482,east_elmwood,5482
-3,2,967,good,12,4706,west_welmwood,4706
-2,1,866,poor,46,3038,northwest,2880.024
-3,2,972,good,2,4766,west_welmwood,4766
-0,1,531,poor,15,1819,northwest,1819
-1,1,429,good,11,1785,west_welmwood,1785
-0,1,530,good,10,1853,west_welmwood,1853
-1,1,710,good,9,2089,west_welmwood,2089
-3,2,1166,poor,37,4682,northwest,4522.812
-3,2,1184,good,9,4962,west_welmwood,4962
-3,2,872,poor,52,4167,northwest,3900.312
-2,1,606,good,12,3071,west_welmwood,3071
-2,1,724,poor,55,2790,northwest,2594.7
-0,1,176,great,1,2035,east_elmwood,2035
-1,1,578,good,14,1903,west_welmwood,1903
-1,1,665,great,4,2633,east_elmwood,2633
-3,2,813,poor,58,4065,northwest,3756.06
-1,1,499,poor,46,1498,northwest,1420.104
-0,1,225,good,7,1578,west_welmwood,1578
-3,2,1157,great,14,5317,east_elmwood,5317
-0,1,454,poor,60,1301,northwest,1196.92
-1,1,407,good,14,1750,west_welmwood,1750
-0,1,197,poor,58,992,northwest,916.608
-2,1,774,great,3,3821,east_elmwood,3821
-3,2,1027,good,6,4843,west_welmwood,4843
-0,1,293,poor,35,1379,northwest,1337.63
-0,1,446,good,11,1780,west_welmwood,1780
-3,2,1134,poor,59,4438,northwest,4091.836
-0,1,493,good,9,1798,west_welmwood,1798
-3,2,1098,poor,63,4331,northwest,3958.534
-3,2,1096,poor,27,4639,northwest,4574.054
-2,1,689,good,5,3261,west_welmwood,3261
-2,1,699,great,14,3682,east_elmwood,3682
-1,1,637,great,11,2546,east_elmwood,2546
-1,1,344,good,2,1810,west_welmwood,1810
-0,1,154,good,9,1454,west_welmwood,1454
-1,1,726,good,11,2079,west_welmwood,2079
-1,1,698,good,5,2159,west_welmwood,2159
-3,2,1177,good,4,4958,west_welmwood,4958
-3,2,1058,great,6,5359,east_elmwood,5359
-0,1,489,great,4,2324,east_elmwood,2324
-1,1,347,poor,59,1304,northwest,1202.288
-2,1,901,poor,21,3307,northwest,3300.386
-3,2,1238,poor,53,4517,northwest,4218.878
-3,2,883,poor,64,4064,northwest,3706.368
-0,1,366,good,11,1635,west_welmwood,1635
-2,1,922,great,2,4049,east_elmwood,4049
-3,2,1000,good,13,4694,west_welmwood,4694
-1,1,601,great,13,2520,east_elmwood,2520
-2,1,911,poor,40,3153,northwest,3026.88
-1,1,518,good,13,1922,west_welmwood,1922
-2,1,504,good,12,2990,west_welmwood,2990
-1,1,350,good,6,1800,west_welmwood,1800
-0,1,224,poor,55,1094,northwest,1017.42
-3,2,830,poor,15,4536,northwest,4536
-2,1,659,poor,53,2693,northwest,2515.262
-1,1,525,poor,21,1789,northwest,1785.422
-3,2,1169,poor,61,4381,northwest,4021.758
-1,1,421,poor,27,1634,northwest,1611.124
-0,1,235,poor,21,1436,northwest,1433.128
-1,1,391,poor,19,1710,northwest,1710
-0,1,322,good,0,1713,west_welmwood,1713
-3,2,921,great,8,5141,east_elmwood,5141
-0,1,428,poor,33,1484,northwest,1445.416
-3,2,929,poor,31,4510,northwest,4410.78
-1,1,749,good,7,2148,west_welmwood,2148
-2,1,632,great,2,3692,east_elmwood,3692
-2,1,780,great,1,3856,east_elmwood,3856
-2,1,809,good,13,3238,west_welmwood,3238
-0,1,456,good,1,1850,west_welmwood,1850
-2,1,783,good,1,3379,west_welmwood,3379
-0,1,546,poor,44,1517,northwest,1444.184
-0,1,254,good,1,1662,west_welmwood,1662
-0,1,117,great,14,1879,east_elmwood,1879
-1,1,541,poor,51,1482,northwest,1390.116
-1,1,702,good,6,2182,west_welmwood,2182
-1,1,404,good,6,1880,west_welmwood,1880
-1,1,437,great,6,2392,east_elmwood,2392
-3,2,1059,poor,62,4335,zaytuna_college,3970.86
-2,1,547,good,7,3028,north_berkeley,3028
-2,1,814,poor,52,2891,zaytuna_college,2705.976
-2,1,647,good,14,3059,north_berkeley,3059
-2,1,846,good,0,3434,north_berkeley,3434
-3,2,1009,good,7,4826,north_berkeley,4826
-3,2,1137,good,3,4949,north_berkeley,4949
-3,2,1054,good,4,4820,north_berkeley,4820
-3,2,818,good,0,4651,north_berkeley,4651
-3,2,1155,good,4,4992,north_berkeley,4992
-1,1,355,good,11,1703,north_berkeley,1703
-0,1,309,great,7,2105,east_elmwood,2105
-2,1,565,great,3,3644,east_elmwood,3644
-2,1,745,good,12,3178,north_berkeley,3178
-2,1,644,poor,36,2924,zaytuna_college,2830.432
-0,1,506,poor,19,1740,zaytuna_college,1740
-1,1,300,great,6,2273,east_elmwood,2273
-1,1,530,poor,20,1853,zaytuna_college,1853
-3,2,911,great,5,5199,east_elmwood,5199
-1,1,616,poor,44,1667,zaytuna_college,1586.984
-0,1,234,poor,49,1125,zaytuna_college,1059.75
-1,1,636,poor,53,1623,zaytuna_college,1515.882
-0,1,397,good,5,1716,north_berkeley,1716
-3,2,1235,good,14,4914,north_berkeley,4914
-3,2,1061,poor,63,4281,zaytuna_college,3912.834
-0,1,270,good,1,1661,north_berkeley,1661
-1,1,620,good,12,1950,north_berkeley,1950
-3,2,875,poor,30,4448,zaytuna_college,4359.04
-1,1,689,good,10,2117,north_berkeley,2117
-1,1,716,good,12,2102,north_berkeley,2102
-3,2,1149,poor,49,4464,zaytuna_college,4205.088
-1,1,403,poor,16,1760,zaytuna_college,1760
-1,1,588,good,10,2023,north_berkeley,2023
-2,1,668,good,4,3183,north_berkeley,3183
-3,2,1211,good,7,4966,north_berkeley,4966
-1,1,467,good,10,1839,north_berkeley,1839
-3,2,987,good,12,4689,north_berkeley,4689
-1,1,617,good,0,2067,north_berkeley,2067
-0,1,171,poor,28,1260,zaytuna_college,1239.84
-0,1,440,good,3,1854,north_berkeley,1854
-1,1,326,good,3,1809,north_berkeley,1809
-3,2,1157,good,10,4922,north_berkeley,4922
-0,1,249,good,5,1561,north_berkeley,1561
-0,1,288,good,0,1696,north_berkeley,1696
-1,1,455,poor,20,1799,zaytuna_college,1799
-2,1,682,poor,46,2781,zaytuna_college,2636.388
-3,2,1143,poor,63,4406,zaytuna_college,4027.084
-2,1,684,great,6,3740,east_elmwood,3740
-1,1,433,good,8,1840,north_berkeley,1840
-3,2,874,good,7,4698,north_berkeley,4698
-1,1,488,great,10,2379,east_elmwood,2379
-0,1,371,great,7,2243,east_elmwood,2243
-1,1,572,good,0,2057,north_berkeley,2057
-0,1,196,great,1,2045,east_elmwood,2045
-3,2,1229,poor,19,4921,zaytuna_college,4921
-3,2,868,great,9,5112,east_elmwood,5112
-0,1,548,good,1,1923,north_berkeley,1923
-1,1,739,poor,48,1739,zaytuna_college,1641.616
-1,1,429,good,1,1896,north_berkeley,1896
-1,1,559,great,5,2480,east_elmwood,2480
-1,1,571,good,0,2038,north_berkeley,2038
-1,1,684,good,2,2213,north_berkeley,2213
-0,1,469,great,5,2350,east_elmwood,2350
-0,1,424,good,1,1783,north_berkeley,1783
-2,1,550,good,1,3157,north_berkeley,3157
-1,1,384,poor,49,1354,zaytuna_college,1275.468
-3,2,1165,poor,60,4463,zaytuna_college,4105.96
-3,2,1078,poor,24,4709,zaytuna_college,4671.328
-2,1,788,good,8,3319,north_berkeley,3319
-2,1,577,poor,58,2595,zaytuna_college,2397.78
-0,1,185,poor,23,1377,zaytuna_college,1368.738
-2,1,586,good,5,3154,north_berkeley,3154
-3,2,1105,good,8,4847,north_berkeley,4847
-3,2,933,good,2,4792,north_berkeley,4792
-1,1,315,poor,17,1672,zaytuna_college,1672
-1,1,466,good,9,1887,north_berkeley,1887
-1,1,637,good,8,2095,north_berkeley,2095
-2,1,536,good,14,3036,north_berkeley,3036
-0,1,235,great,4,2047,east_elmwood,2047
-3,2,813,good,10,4533,north_berkeley,4533
-1,1,748,poor,39,1809,zaytuna_college,1740.258
-2,1,926,poor,61,2885,zaytuna_college,2648.43
-0,1,348,good,8,1638,north_berkeley,1638
-2,1,777,good,8,3342,north_berkeley,3342
-0,1,245,poor,48,1124,zaytuna_college,1061.056
-1,1,319,great,6,2295,east_elmwood,2295
-3,2,1150,great,11,5416,east_elmwood,5416
-3,2,1198,good,6,4981,north_berkeley,4981
-0,1,155,good,4,1510,north_berkeley,1510
-1,1,562,good,5,2015,north_berkeley,2015
-1,1,608,poor,63,1457,zaytuna_college,1331.698
-3,2,915,great,3,5275,east_elmwood,5275
-2,1,719,good,11,3218,north_berkeley,3218
-0,1,357,poor,30,1420,zaytuna_college,1391.6
-2,1,759,great,2,3791,east_elmwood,3791
-1,1,659,poor,37,1764,zaytuna_college,1704.024
-2,1,760,good,2,3381,north_berkeley,3381
-2,1,875,great,3,3949,east_elmwood,3949
-2,1,637,great,8,3698,east_elmwood,3698
-3,2,1245,poor,51,4628,zaytuna_college,4341.064
-0,1,542,good,3,1951,north_berkeley,1951
-3,2,1165,great,10,5370,east_elmwood,5370
-2,1,583,great,0,3709,east_elmwood,3709
-1,1,659,great,8,2552,east_elmwood,2552
-1,1,564,great,2,2586,east_elmwood,2586
-1,1,300,poor,27,1557,zaytuna_college,1535.202
-2,1,688,great,14,3697,east_elmwood,3697
-2,1,935,good,12,3444,north_berkeley,3444
-1,1,721,great,10,2589,east_elmwood,2589
-3,2,1164,good,5,4924,north_berkeley,4924
-0,1,455,good,9,1812,north_berkeley,1812
-3,2,935,poor,61,4224,zaytuna_college,3877.632
-1,1,584,poor,41,1644,zaytuna_college,1574.952
-0,1,501,great,3,2366,east_elmwood,2366
-1,1,679,poor,29,1930,zaytuna_college,1895.26
-0,1,439,poor,54,1290,zaytuna_college,1202.28
-1,1,705,poor,56,1641,zaytuna_college,1522.848
-1,1,597,good,8,2061,north_berkeley,2061
-3,2,1127,great,12,5402,east_elmwood,5402
-0,1,493,good,6,1830,north_berkeley,1830
-2,1,619,good,3,3160,north_berkeley,3160
-3,2,1102,poor,50,4418,zaytuna_college,4152.92
-1,1,554,poor,37,1680,zaytuna_college,1622.88
-0,1,359,poor,15,1627,zaytuna_college,1627
-3,2,982,great,9,5243,east_elmwood,5243
-3,2,1084,poor,18,4793,zaytuna_college,4793
-0,1,257,good,13,1518,north_berkeley,1518
-3,2,1111,poor,64,4317,zaytuna_college,3937.104
-1,1,432,great,8,2320,east_elmwood,2320
-3,2,1063,great,10,5292,east_elmwood,5292
-0,1,546,great,12,2286,east_elmwood,2286
-2,1,752,poor,26,3082,zaytuna_college,3045.016
-0,1,478,good,6,1814,north_berkeley,1814
-2,1,739,great,3,3775,east_elmwood,3775
-1,1,506,good,6,1920,north_berkeley,1920
-3,2,1237,good,12,4938,north_berkeley,4938
-0,1,427,poor,55,1293,zaytuna_college,1202.49
-2,1,773,poor,32,3088,zaytuna_college,3013.888
-1,1,386,good,5,1861,north_berkeley,1861
-3,2,1232,good,11,5015,north_berkeley,5015
-0,1,468,poor,38,1449,zaytuna_college,1396.836
-0,1,235,good,3,1597,north_berkeley,1597
-0,1,135,good,7,1417,north_berkeley,1417
-1,1,537,poor,43,1595,zaytuna_college,1521.63
-2,1,663,good,8,3146,north_berkeley,3146
-1,1,662,great,9,2538,east_elmwood,2538
-3,2,1175,poor,33,4677,zaytuna_college,4555.398
-1,1,452,good,0,1962,north_berkeley,1962
-3,2,1224,good,6,5040,north_berkeley,5040
-2,1,651,great,4,3721,east_elmwood,3721
-1,1,304,great,2,2288,east_elmwood,2288
-3,2,1211,great,14,5418,east_elmwood,5418
-3,2,1123,poor,20,4727,zaytuna_college,4727
-1,1,654,poor,49,1630,zaytuna_college,1535.46
-1,1,630,good,6,2079,north_berkeley,2079
-1,1,631,poor,28,1894,zaytuna_college,1863.696
-0,1,435,good,14,1681,north_berkeley,1681
-0,1,400,good,1,1747,north_berkeley,1747
-3,2,1162,good,14,4824,north_berkeley,4824
-2,1,734,poor,37,2958,zaytuna_college,2857.428
-0,1,444,good,12,1722,north_berkeley,1722
-1,1,470,good,3,1912,north_berkeley,1912
-0,1,248,good,3,1594,north_berkeley,1594
-2,1,724,great,7,3770,east_elmwood,3770
-3,2,1136,good,3,4930,north_berkeley,4930
-3,2,891,poor,39,4333,zaytuna_college,4168.346
-2,1,629,great,12,3623,east_elmwood,3623
-0,1,484,great,6,2346,east_elmwood,2346
-0,1,203,good,8,1554,north_berkeley,1554
-3,2,1031,great,0,5336,east_elmwood,5336
-1,1,306,good,11,1727,north_berkeley,1727
-0,1,170,good,11,1455,north_berkeley,1455
-2,1,749,great,8,3799,east_elmwood,3799
-2,1,781,good,14,3197,north_berkeley,3197
-1,1,550,poor,17,1913,zaytuna_college,1913
-1,1,435,good,8,1826,north_berkeley,1826
-3,2,957,good,4,4751,north_berkeley,4751
-1,1,604,good,0,2082,north_berkeley,2082
-1,1,548,good,1,2014,north_berkeley,2014
-0,1,153,poor,17,1424,zaytuna_college,1424
-2,1,635,great,1,3696,east_elmwood,3696
-0,1,113,good,4,1461,north_berkeley,1461
-0,1,296,great,13,2095,east_elmwood,2095
-2,1,743,great,9,3758,east_elmwood,3758
-0,1,453,good,11,1735,north_berkeley,1735
-0,1,113,poor,31,1182,zaytuna_college,1155.996
-1,1,663,poor,37,1771,zaytuna_college,1710.786
-1,1,633,good,0,2179,north_berkeley,2179
-2,1,835,poor,33,3092,zaytuna_college,3011.608
-0,1,349,good,4,1687,north_berkeley,1687
-2,1,698,poor,41,2897,zaytuna_college,2775.326
-2,1,504,good,2,3098,north_berkeley,3098
-0,1,121,poor,54,999,frontage_rd,931.068
-1,1,684,good,1,2192,north_berkeley,2192
-2,1,901,good,7,3473,north_berkeley,3473
-0,1,478,great,1,2414,east_elmwood,2414
-2,1,898,poor,41,3082,frontage_rd,2952.556
-1,1,482,good,14,1804,north_berkeley,1804
-0,1,239,poor,49,1168,frontage_rd,1100.256
-1,1,572,great,13,2403,east_elmwood,2403
-2,1,815,good,0,3378,north_berkeley,3378
-1,1,565,poor,61,1470,frontage_rd,1349.46
-3,2,1004,good,11,4706,north_berkeley,4706
-0,1,393,poor,49,1289,frontage_rd,1214.238
-0,1,117,great,3,1939,east_elmwood,1939
-0,1,448,poor,47,1337,frontage_rd,1264.802
-0,1,104,poor,20,1322,frontage_rd,1322
-1,1,391,good,10,1761,north_berkeley,1761
-0,1,542,good,5,1889,north_berkeley,1889
-0,1,365,good,9,1716,north_berkeley,1716
-0,1,510,poor,19,1730,frontage_rd,1730
-1,1,354,good,7,1811,north_berkeley,1811
-1,1,634,good,11,1991,north_berkeley,1991
-1,1,526,good,5,1933,north_berkeley,1933
-0,1,167,good,13,1447,north_berkeley,1447
-3,2,1018,poor,37,4453,frontage_rd,4301.598
-2,1,783,poor,54,2804,frontage_rd,2613.328
-1,1,416,good,3,1862,north_berkeley,1862
-3,2,916,poor,56,4246,frontage_rd,3940.288
-3,2,1223,good,1,5092,north_berkeley,5092
-1,1,359,poor,29,1577,frontage_rd,1548.614
-2,1,917,poor,29,3270,frontage_rd,3211.14
-1,1,338,great,9,2266,east_elmwood,2266
-3,2,926,poor,59,4218,frontage_rd,3888.996
-3,2,1116,great,14,5361,east_elmwood,5361
-3,2,1160,great,5,5470,east_elmwood,5470
-3,2,813,good,11,4568,north_berkeley,4568
-3,2,854,great,14,5103,east_elmwood,5103
-3,2,1068,great,11,5347,east_elmwood,5347
-2,1,758,great,4,3824,east_elmwood,3824
-2,1,741,good,11,3184,north_berkeley,3184
-2,1,761,poor,31,3055,frontage_rd,2987.79
-3,2,811,good,12,4580,north_berkeley,4580
-3,2,1005,great,9,5305,east_elmwood,5305
-0,1,190,good,6,1550,north_berkeley,1550
-3,2,953,poor,48,4293,frontage_rd,4052.592
-3,2,1206,good,1,5011,north_berkeley,5011
-2,1,637,great,7,3692,east_elmwood,3692
-3,2,980,good,4,4791,north_berkeley,4791
-1,1,318,good,11,1671,north_berkeley,1671
-3,2,1227,poor,32,4784,frontage_rd,4669.184
-0,1,398,poor,60,1166,frontage_rd,1072.72
-1,1,543,great,8,2431,east_elmwood,2431
-3,2,1207,poor,28,4746,frontage_rd,4670.064
-0,1,509,great,6,2333,east_elmwood,2333
-2,1,685,poor,47,2844,frontage_rd,2690.424
-3,2,823,good,6,4589,north_berkeley,4589
-2,1,846,poor,48,2929,frontage_rd,2764.976
-2,1,846,good,3,3437,north_berkeley,3437
-0,1,397,poor,34,1462,frontage_rd,1421.064
-0,1,261,poor,55,1126,frontage_rd,1047.18
-1,1,733,good,13,2056,north_berkeley,2056
-0,1,265,poor,35,1308,frontage_rd,1268.76
-1,1,326,great,6,2314,east_elmwood,2314
-3,2,1020,great,8,5302,east_elmwood,5302
-3,2,1027,poor,44,4451,frontage_rd,4237.352
-2,1,551,poor,63,2502,frontage_rd,2286.828
-3,2,807,poor,55,4133,frontage_rd,3843.69
-3,2,1139,poor,48,4468,frontage_rd,4217.792
-1,1,635,good,9,2017,north_berkeley,2017
-1,1,520,good,13,1851,north_berkeley,1851
-2,1,765,great,14,3693,east_elmwood,3693
-0,1,505,good,6,1804,north_berkeley,1804
-3,2,837,poor,27,4428,frontage_rd,4366.008
-3,2,921,good,5,4744,north_berkeley,4744
-2,1,924,great,12,3902,east_elmwood,3902
-3,2,976,poor,38,4406,frontage_rd,4247.384
-2,1,918,good,0,3524,north_berkeley,3524
-0,1,332,good,0,1714,north_berkeley,1714
-1,1,463,good,7,1885,north_berkeley,1885
-3,2,822,poor,44,4201,frontage_rd,3999.352
-1,1,626,poor,55,1588,frontage_rd,1476.84
-0,1,424,poor,60,1235,frontage_rd,1136.2
-1,1,383,poor,49,1436,frontage_rd,1352.712
-1,1,496,good,4,1917,north_berkeley,1917
-3,2,1166,poor,52,4524,frontage_rd,4234.464
-3,2,1211,good,14,4970,north_berkeley,4970
-3,2,896,poor,25,4498,frontage_rd,4453.02
-0,1,377,good,8,1698,north_berkeley,1698
-1,1,381,good,10,1784,north_berkeley,1784
-1,1,429,poor,53,1361,frontage_rd,1271.174
-2,1,750,good,9,3217,north_berkeley,3217
-1,1,718,poor,45,1806,frontage_rd,1715.7
-1,1,527,poor,47,1575,frontage_rd,1489.95
-2,1,652,good,4,3196,north_berkeley,3196
-3,2,1067,poor,24,4688,frontage_rd,4650.496
-3,2,1075,good,13,4841,north_berkeley,4841
-3,2,1154,great,1,5459,east_elmwood,5459
-1,1,649,great,1,2592,east_elmwood,2592
-0,1,195,good,9,1500,north_berkeley,1500
-2,1,774,poor,59,2778,frontage_rd,2561.316
-2,1,803,great,6,3822,east_elmwood,3822
-2,1,529,great,3,3633,east_elmwood,3633
-2,1,584,good,12,3042,north_berkeley,3042
-0,1,102,good,6,1472,north_berkeley,1472
-3,2,899,good,2,4696,north_berkeley,4696
-3,2,1237,good,14,4972,north_berkeley,4972
-0,1,228,poor,62,986,frontage_rd,903.176
-2,1,604,poor,40,2835,frontage_rd,2721.6
-0,1,518,poor,35,1607,frontage_rd,1558.79
-2,1,571,good,4,3100,north_berkeley,3100
-0,1,351,good,11,1657,north_berkeley,1657
-0,1,111,poor,29,1203,frontage_rd,1181.346
-1,1,345,poor,59,1273,frontage_rd,1173.706
-1,1,632,good,3,2082,north_berkeley,2082
-2,1,711,good,12,3151,north_berkeley,3151
-1,1,466,good,12,1848,north_berkeley,1848
-1,1,727,good,0,2190,north_berkeley,2190
-1,1,404,poor,63,1235,frontage_rd,1128.79
-1,1,374,poor,34,1498,frontage_rd,1456.056
-3,2,1108,great,13,5284,east_elmwood,5284
-3,2,1236,good,1,5119,north_berkeley,5119
-2,1,870,good,4,3433,north_berkeley,3433
-0,1,144,good,6,1523,north_berkeley,1523
-3,2,1221,good,11,4990,north_berkeley,4990
-3,2,985,good,10,4763,north_berkeley,4763
-1,1,318,great,7,2288,east_elmwood,2288
-2,1,538,good,12,3067,north_berkeley,3067
-1,1,487,great,13,2365,east_elmwood,2365
-2,1,836,good,13,3303,north_berkeley,3303
-3,2,1156,good,3,5025,north_berkeley,5025
-1,1,516,great,12,2439,east_elmwood,2439
-1,1,580,poor,53,1585,frontage_rd,1480.39
-1,1,713,good,9,2124,north_berkeley,2124
-2,1,724,good,2,3305,north_berkeley,3305
-3,2,1226,good,3,5005,north_berkeley,5005
-1,1,674,good,8,2128,north_berkeley,2128
-0,1,532,good,1,1932,north_berkeley,1932
-3,2,1099,good,12,4789,north_berkeley,4789
-3,2,807,good,7,4608,north_berkeley,4608
-3,2,959,great,6,5244,east_elmwood,5244
-0,1,260,good,5,1591,north_berkeley,1591
-0,1,203,poor,38,1250,frontage_rd,1205
-3,2,1112,poor,29,4693,frontage_rd,4608.526
-2,1,655,good,11,3144,north_berkeley,3144
-0,1,355,poor,61,1130,frontage_rd,1037.34
-3,2,1146,good,5,4995,north_berkeley,4995
-3,2,1098,great,8,5396,east_elmwood,5396
-0,1,117,good,5,1465,north_berkeley,1465
-0,1,474,great,9,2297,east_elmwood,2297
-0,1,463,great,10,2272,east_elmwood,2272
-2,1,770,good,9,3277,north_berkeley,3277
-1,1,642,great,11,2502,east_elmwood,2502
-3,2,1044,poor,26,4599,frontage_rd,4543.812
-0,1,425,poor,45,1335,frontage_rd,1268.25
-2,1,681,poor,38,2928,frontage_rd,2822.592
-2,1,509,good,6,3054,north_berkeley,3054
-1,1,568,great,3,2575,east_elmwood,2575
-1,1,749,poor,21,1990,frontage_rd,1986.02
-2,1,651,great,2,3701,east_elmwood,3701
-0,1,448,good,14,1696,north_berkeley,1696
-0,1,492,poor,20,1696,frontage_rd,1696
-0,1,516,poor,59,1288,frontage_rd,1187.536
-1,1,625,good,0,2162,north_berkeley,2162
-2,1,874,good,13,3320,north_berkeley,3320
-1,1,738,good,2,2253,north_berkeley,2253
-0,1,148,good,4,1537,north_berkeley,1537
-3,2,819,good,3,4646,north_berkeley,4646
-0,1,101,good,5,1483,north_berkeley,1483
-0,1,131,poor,22,1308,frontage_rd,1302.768
-0,1,158,poor,57,1019,frontage_rd,943.594
-1,1,725,poor,44,1744,frontage_rd,1660.288
-2,1,616,great,2,3678,east_elmwood,3678
-0,1,341,great,7,2195,east_elmwood,2195
-1,1,668,good,0,2190,north_berkeley,2190
-0,1,491,good,2,1850,north_berkeley,1850
-3,2,895,good,12,4638,north_berkeley,4638
-2,1,787,good,1,3407,north_berkeley,3407
-2,1,873,poor,15,3317,frontage_rd,3317
-1,1,525,poor,47,1569,frontage_rd,1484.274
-2,1,941,poor,56,2987,frontage_rd,2771.936
-1,1,537,good,9,1905,north_berkeley,1905
-3,2,1070,great,5,5367,east_elmwood,5367
-0,1,440,good,12,1751,north_berkeley,1751
-0,1,255,good,13,1513,north_berkeley,1513
-0,1,464,poor,45,1424,frontage_rd,1352.8
-3,2,974,good,6,4750,north_berkeley,4750
-1,1,322,poor,40,1452,frontage_rd,1393.92
-1,1,371,poor,47,1416,frontage_rd,1339.536
-1,1,487,poor,34,1685,frontage_rd,1637.82
-3,2,985,great,14,5221,east_elmwood,5221
-0,1,460,good,3,1853,north_berkeley,1853
-0,1,384,good,13,1667,north_berkeley,1667
-2,1,755,good,12,3205,north_berkeley,3205
-2,1,765,good,4,3362,north_berkeley,3362
-2,1,580,great,2,3692,east_elmwood,3692
-1,1,472,great,3,2479,east_elmwood,2479
-0,1,193,poor,33,1277,frontage_rd,1243.798
-1,1,312,poor,31,1509,frontage_rd,1475.802
-0,1,488,great,8,2304,east_elmwood,2304
-0,1,214,poor,33,1268,frontage_rd,1235.032
-3,2,939,good,7,4750,north_berkeley,4750
-0,1,502,poor,17,1743,frontage_rd,1743
-0,1,540,good,10,1835,north_berkeley,1835
-0,1,139,good,10,1487,north_berkeley,1487
-0,1,447,poor,29,1520,frontage_rd,1492.64
-2,1,776,great,4,3828,east_elmwood,3828
-1,1,695,great,6,2604,east_elmwood,2604
-0,1,449,good,7,1820,north_berkeley,1820
-3,2,984,great,6,5312,east_elmwood,5312
-0,1,255,good,9,1542,north_berkeley,1542
-3,2,811,poor,20,4428,frontage_rd,4428
-0,1,371,good,12,1643,north_berkeley,1643
-1,1,444,great,5,2399,east_elmwood,2399
-1,1,410,good,9,1800,north_berkeley,1800
-1,1,315,poor,39,1451,frontage_rd,1395.862
-0,1,368,poor,36,1412,frontage_rd,1366.816
-1,1,738,poor,51,1769,frontage_rd,1659.322
-1,1,372,good,1,1868,north_berkeley,1868
-3,2,899,poor,17,4620,frontage_rd,4620
-2,1,919,good,1,3538,north_berkeley,3538
-3,2,1082,good,7,4878,north_berkeley,4878
-3,2,1114,poor,44,4556,frontage_rd,4337.312
-0,1,163,poor,37,1155,frontage_rd,1115.73
-3,2,1090,poor,59,4367,frontage_rd,4026.374
-3,2,989,good,0,4856,north_berkeley,4856
-0,1,479,great,12,2275,east_elmwood,2275
-2,1,617,good,8,3124,north_berkeley,3124
-3,2,959,poor,61,4195,frontage_rd,3851.01
-1,1,735,good,12,2154,north_berkeley,2154
-3,2,1084,great,14,5272,east_elmwood,5272
-1,1,480,good,4,1956,north_berkeley,1956
-2,1,813,poor,58,2874,frontage_rd,2655.576
-0,1,103,great,1,2023,east_elmwood,2023
-2,1,695,great,9,3701,east_elmwood,3701
-0,1,186,poor,20,1427,frontage_rd,1427
-0,1,270,poor,44,1231,frontage_rd,1171.912
-2,1,800,poor,51,2845,frontage_rd,2668.61
-2,1,502,great,4,3570,east_elmwood,3570
-2,1,680,poor,60,2697,frontage_rd,2481.24
-3,2,1071,poor,58,4345,frontage_rd,4014.78
-1,1,662,good,10,2060,north_berkeley,2060
-0,1,499,good,13,1818,north_berkeley,1818
-1,1,308,poor,33,1446,frontage_rd,1408.404
-0,1,236,good,13,1507,north_berkeley,1507
-2,1,889,good,7,3458,north_berkeley,3458
-2,1,884,good,6,3381,north_berkeley,3381
-0,1,454,good,11,1699,north_berkeley,1699
-3,2,1036,good,4,4820,north_berkeley,4820
-2,1,637,good,9,3105,north_berkeley,3105
-1,1,339,poor,51,1294,frontage_rd,1213.772
-2,1,587,poor,25,2917,frontage_rd,2887.83
-0,1,489,poor,36,1529,frontage_rd,1480.072
-0,1,440,good,2,1843,north_berkeley,1843
-2,1,573,good,2,3143,north_berkeley,3143
-1,1,575,good,9,1949,north_berkeley,1949
-0,1,207,good,13,1481,north_berkeley,1481
-0,1,407,great,0,2320,east_elmwood,2320
-0,1,295,poor,43,1228,frontage_rd,1171.512
-0,1,434,poor,17,1633,frontage_rd,1633
-0,1,184,great,13,1938,east_elmwood,1938
-2,1,895,poor,58,2881,frontage_rd,2662.044
-0,1,323,great,7,2182,east_elmwood,2182
-2,1,747,great,4,3774,east_elmwood,3774
-3,2,1166,poor,39,4612,frontage_rd,4436.744
-1,1,689,poor,29,1917,frontage_rd,1882.494
-2,1,663,great,8,3636,east_elmwood,3636
-1,1,412,poor,50,1425,frontage_rd,1339.5
-3,2,1131,good,14,4875,north_berkeley,4875
-1,1,300,good,9,1736,north_berkeley,1736
-2,1,786,good,12,3262,north_berkeley,3262
-0,1,402,poor,48,1350,frontage_rd,1274.4
-2,1,535,great,2,3639,east_elmwood,3639
-1,1,360,good,9,1740,north_berkeley,1740
-0,1,276,great,9,2065,east_elmwood,2065
-2,1,647,poor,43,2797,frontage_rd,2668.338
-1,1,611,good,5,2075,north_berkeley,2075
-3,2,956,good,6,4777,north_berkeley,4777
-1,1,426,good,14,1788,north_berkeley,1788
-0,1,155,great,12,1945,east_elmwood,1945
-2,1,643,poor,40,2871,frontage_rd,2756.16
-1,1,593,good,10,1944,north_berkeley,1944
-3,2,1093,poor,39,4509,frontage_rd,4337.658
-1,1,487,good,12,1854,north_berkeley,1854
-1,1,349,good,10,1768,north_berkeley,1768
-0,1,171,poor,61,983,frontage_rd,902.394
-3,2,1024,good,1,4904,north_berkeley,4904
-0,1,116,good,10,1374,north_berkeley,1374
-2,1,561,poor,49,2708,frontage_rd,2550.936
-1,1,741,great,10,2637,east_elmwood,2637
-2,1,901,great,5,3952,east_elmwood,3952
-1,1,318,poor,57,1230,frontage_rd,1138.98
-1,1,688,great,8,2605,east_elmwood,2605
-3,2,828,good,7,4584,north_berkeley,4584
-2,1,938,great,4,3973,east_elmwood,3973
-2,1,669,poor,25,2970,frontage_rd,2940.3
-3,2,840,poor,64,4024,frontage_rd,3669.888
-2,1,638,poor,50,2712,frontage_rd,2549.28
-2,1,759,great,14,3712,east_elmwood,3712
-2,1,934,great,0,4058,east_elmwood,4058
-3,2,991,great,10,5287,east_elmwood,5287
-1,1,411,great,8,2316,east_elmwood,2316
-2,1,883,poor,35,3173,frontage_rd,3077.81
-3,2,1152,good,3,4986,north_berkeley,4986
-0,1,230,good,11,1490,north_berkeley,1490
-3,2,934,good,9,4733,north_berkeley,4733
-3,2,941,great,5,5274,east_elmwood,5274
-3,2,933,great,9,5156,east_elmwood,5156
-3,2,839,poor,55,4172,frontage_rd,3879.96
-0,1,216,great,1,2092,east_elmwood,2092
-3,2,1235,good,3,5088,north_berkeley,5088
-0,1,132,good,11,1445,north_berkeley,1445
-2,1,832,poor,34,3073,frontage_rd,2986.956
-2,1,900,great,13,3881,east_elmwood,3881
-3,2,897,good,8,4658,north_berkeley,4658
-1,1,698,great,11,2563,east_elmwood,2563
-1,1,368,good,1,1865,north_berkeley,1865
-2,1,664,good,8,3159,north_berkeley,3159
-3,2,1162,good,14,4883,north_berkeley,4883
-2,1,743,good,8,3300,north_berkeley,3300
-1,1,427,poor,64,1282,frontage_rd,1169.184
-1,1,632,good,1,2106,north_berkeley,2106
-2,1,698,poor,16,3173,frontage_rd,3173
-0,1,275,poor,18,1456,frontage_rd,1456
-3,2,1128,good,7,4889,north_berkeley,4889
-1,1,352,great,0,2346,east_elmwood,2346
-3,2,1174,good,3,4956,north_berkeley,4956
-1,1,440,great,5,2420,east_elmwood,2420
-1,1,460,good,4,1949,north_berkeley,1949
-2,1,870,good,7,3435,north_berkeley,3435
-3,2,1240,great,14,5497,east_elmwood,5497
-0,1,326,poor,45,1259,frontage_rd,1196.05
-0,1,311,good,12,1542,north_berkeley,1542
-2,1,656,poor,24,2990,frontage_rd,2966.08
-1,1,560,good,14,1899,north_berkeley,1899
-0,1,352,poor,59,1211,frontage_rd,1116.542
-2,1,714,good,1,3282,north_berkeley,3282
-3,2,834,poor,32,4396,frontage_rd,4290.496
-0,1,348,good,0,1730,north_berkeley,1730
-0,1,388,good,3,1746,north_berkeley,1746
-0,1,328,great,13,2126,east_elmwood,2126
-2,1,718,good,6,3230,north_berkeley,3230
-0,1,435,poor,64,1177,frontage_rd,1073.424
-3,2,1182,great,6,5483,east_elmwood,5483
-1,1,632,great,11,2503,east_elmwood,2503
-3,2,902,great,2,5186,east_elmwood,5186
-1,1,612,good,6,2034,north_berkeley,2034
-0,1,217,good,11,1516,north_berkeley,1516
-0,1,126,good,11,1428,north_berkeley,1428
-1,1,586,good,7,2018,north_berkeley,2018
-3,2,831,good,1,4659,north_berkeley,4659
-1,1,380,good,12,1758,north_berkeley,1758
-3,2,1224,good,6,4976,north_berkeley,4976
-3,2,1009,poor,60,4283,frontage_rd,3940.36
-2,1,872,great,12,3868,east_elmwood,3868
-0,1,116,great,9,1964,east_elmwood,1964
-3,2,1060,good,8,4862,north_berkeley,4862
-1,1,442,poor,62,1292,frontage_rd,1183.472
-0,1,442,good,14,1737,north_berkeley,1737
-0,1,519,poor,43,1531,frontage_rd,1460.574
-0,1,118,good,1,1489,north_berkeley,1489
-2,1,674,good,13,3110,north_berkeley,3110
-3,2,1182,great,14,5364,east_elmwood,5364
-3,2,1056,poor,20,4663,frontage_rd,4663
-1,1,357,poor,55,1301,frontage_rd,1209.93
-1,1,507,great,7,2401,east_elmwood,2401
-2,1,793,good,4,3303,north_berkeley,3303
-0,1,254,good,12,1578,north_berkeley,1578
-0,1,375,good,4,1767,north_berkeley,1767
-3,2,934,poor,15,4637,frontage_rd,4637
-2,1,677,poor,15,3123,frontage_rd,3123
-2,1,582,good,8,3057,north_berkeley,3057
-2,1,918,poor,33,3210,frontage_rd,3126.54
-3,2,994,good,7,4801,north_berkeley,4801
-0,1,144,poor,50,1045,frontage_rd,982.3
-3,2,997,good,11,4749,north_berkeley,4749
-0,1,430,great,8,2248,east_elmwood,2248
-0,1,443,poor,20,1602,frontage_rd,1602
-2,1,785,great,7,3832,east_elmwood,3832
-1,1,426,great,9,2344,east_elmwood,2344
-1,1,312,good,13,1645,north_berkeley,1645
-0,1,116,great,7,1929,east_elmwood,1929
-2,1,949,good,0,3513,north_berkeley,3513
-1,1,501,great,8,2390,east_elmwood,2390
-1,1,564,great,3,2538,east_elmwood,2538
-0,1,333,poor,26,1457,frontage_rd,1439.516
-3,2,879,great,9,5169,east_elmwood,5169
-2,1,817,good,4,3379,north_berkeley,3379
-3,2,1123,poor,49,4513,frontage_rd,4251.246
-1,1,"640,56",good,0,2184,north_berkeley,2184
-1,1,707,good,1,2168,north_berkeley,2168
-1,1,347,poor,63,1174,frontage_rd,1073.036
-0,1,245,poor,63,1060,frontage_rd,968.84
-3,2,1116,good,13,4808,north_berkeley,4808
-3,2,1083,great,9,5304,east_elmwood,5304
-3,2,911,great,0,5285,east_elmwood,5285
-1,1,724,good,12,2097,north_berkeley,2097
-1,1,627,good,8,2084,north_berkeley,2084
-0,1,259,great,10,2095,east_elmwood,2095
-0,1,216,poor,50,1079,frontage_rd,1014.26
-0,1,490,good,12,1732,north_berkeley,1732
-3,2,931,good,14,4617,north_berkeley,4617
-0,1,174,poor,43,1129,frontage_rd,1077.066
-3,2,956,great,10,5204,east_elmwood,5204
-2,1,937,poor,61,2962,frontage_rd,2719.116
-3,2,1220,great,11,5459,east_elmwood,5459
-0,1,240,good,12,1514,north_berkeley,1514
-3,2,964,great,2,5270,east_elmwood,5270
-1,1,589,poor,33,1770,frontage_rd,1723.98
-0,1,217,good,14,1447,north_berkeley,1447
-2,1,520,great,4,3540,east_elmwood,3540
-0,1,220,poor,49,1098,frontage_rd,1034.316
-3,2,1176,poor,19,4849,frontage_rd,4849
-3,2,1008,good,9,4794,north_berkeley,4794
-3,2,1202,good,11,4950,north_berkeley,4950
-3,2,1117,poor,57,4387,frontage_rd,4062.362
-3,2,1140,good,0,4996,north_berkeley,4996
-2,1,734,poor,35,2980,frontage_rd,2890.6
-0,1,419,good,6,1758,north_berkeley,1758
-3,2,1117,good,13,4796,north_berkeley,4796
-1,1,516,great,13,2352,east_elmwood,2352
-2,1,555,poor,51,2632,frontage_rd,2468.816
-1,1,407,good,12,1797,north_berkeley,1797
-0,1,356,good,5,1671,north_berkeley,1671
-2,1,860,good,1,3434,north_berkeley,3434
-1,1,637,great,5,2582,east_elmwood,2582
-3,2,995,poor,39,4491,frontage_rd,4320.342
-1,1,623,poor,38,1766,frontage_rd,1702.424
-3,2,968,good,9,4699,north_berkeley,4699
-3,2,1182,great,8,5404,east_elmwood,5404
-0,1,147,poor,47,1084,frontage_rd,1025.464
-0,1,534,good,0,1963,north_berkeley,1963
-3,2,871,good,11,4648,north_berkeley,4648
-1,1,348,great,3,2363,east_elmwood,2363
-1,1,427,poor,17,1712,frontage_rd,1712
-0,1,140,good,3,1488,north_berkeley,1488
-2,1,898,great,10,3922,east_elmwood,3922
-0,1,532,poor,36,1523,frontage_rd,1474.264
-0,1,432,poor,19,1628,frontage_rd,1628
-3,2,1160,great,10,5360,east_elmwood,5360
-2,1,775,poor,43,2926,frontage_rd,2791.404
-0,1,178,great,14,1898,east_elmwood,1898
-1,1,395,good,1,1858,north_berkeley,1858
-1,1,304,good,0,1777,north_berkeley,1777
-0,1,438,great,12,2258,east_elmwood,2258
-3,2,1037,good,5,4849,north_berkeley,4849
-0,1,246,good,7,1528,north_berkeley,1528
-3,2,948,good,4,4752,north_berkeley,4752
-1,1,505,good,10,1907,north_berkeley,1907
-1,1,562,good,10,1917,north_berkeley,1917
-0,1,248,poor,29,1365,frontage_rd,1340.43
-1,1,737,great,12,2573,east_elmwood,2573
-3,2,1100,good,2,4926,north_berkeley,4926
-3,2,804,good,6,4591,north_berkeley,4591
-0,1,104,great,11,1867,east_elmwood,1867
-1,1,444,good,14,1801,north_berkeley,1801
-0,1,186,poor,30,1324,frontage_rd,1297.52
-0,1,383,great,4,2202,east_elmwood,2202
-2,1,620,good,8,3107,north_berkeley,3107
-3,2,1098,good,14,4856,north_berkeley,4856
-1,1,718,poor,39,1778,frontage_rd,1710.436
-2,1,866,good,3,3418,north_berkeley,3418
-1,1,404,poor,53,1348,frontage_rd,1259.032
-3,2,1022,good,8,4829,north_berkeley,4829
-1,1,508,good,5,1971,north_berkeley,1971
-2,1,854,great,5,3862,east_elmwood,3862
-0,1,542,good,13,1791,north_berkeley,1791
-3,2,1240,great,10,5503,east_elmwood,5503
-0,1,128,good,6,1432,north_berkeley,1432
-0,1,365,good,2,1702,north_berkeley,1702
-0,1,156,poor,62,887,frontage_rd,812.492
-2,1,933,good,4,3449,north_berkeley,3449
-0,1,404,poor,61,1163,frontage_rd,1067.634
-2,1,822,good,8,3309,north_berkeley,3309
-2,1,744,great,13,3692,east_elmwood,3692
-1,1,507,good,12,1846,north_berkeley,1846
-1,1,505,great,11,2349,east_elmwood,2349
-2,1,946,poor,34,3251,frontage_rd,3159.972
-0,1,146,good,14,1387,north_berkeley,1387
-3,2,808,good,8,4612,north_berkeley,4612
-3,2,936,good,13,4692,north_berkeley,4692
-2,1,569,poor,19,2949,frontage_rd,2949
-2,1,508,great,2,3599,east_elmwood,3599
-3,2,1041,good,7,4801,north_berkeley,4801
-3,2,840,poor,50,4227,frontage_rd,3973.38
-3,2,1246,good,12,5011,north_berkeley,5011
-1,1,481,great,6,2418,east_elmwood,2418
-0,1,503,good,7,1849,north_berkeley,1849
-1,1,340,great,3,2307,east_elmwood,2307
-0,1,397,good,9,1703,north_berkeley,1703
-3,2,808,poor,45,4177,frontage_rd,3968.15
-2,1,794,poor,43,2992,frontage_rd,2854.368
-3,2,1209,good,10,4916,north_berkeley,4916
-1,1,358,poor,37,1471,frontage_rd,1420.986
-1,1,552,great,4,2485,east_elmwood,2485
-3,2,904,good,14,4576,north_berkeley,4576
-2,1,539,poor,27,2887,frontage_rd,2846.582
-2,1,761,good,3,3283,north_berkeley,3283
-1,1,639,great,8,2576,east_elmwood,2576
-0,1,191,poor,23,1375,frontage_rd,1366.75
-1,1,676,great,5,2620,east_elmwood,2620
-2,1,735,poor,31,2999,frontage_rd,2933.022
-3,2,941,good,9,4745,north_berkeley,4745
-3,2,1212,poor,21,4849,frontage_rd,4839.302
-2,1,556,poor,52,2627,frontage_rd,2458.872
-1,1,574,good,8,2008,north_berkeley,2008
-0,1,209,great,2,2042,east_elmwood,2042
-2,1,753,poor,16,3203,frontage_rd,3203
-0,1,276,poor,62,1069,frontage_rd,979.204
-3,2,1234,good,10,5025,north_berkeley,5025
-0,1,505,poor,24,1683,frontage_rd,1669.536
-1,1,661,great,7,2547,east_elmwood,2547
-3,2,885,great,8,5184,east_elmwood,5184
-2,1,865,great,9,3849,east_elmwood,3849
-0,1,150,great,14,1916,east_elmwood,1916
-3,2,838,poor,56,4139,frontage_rd,3840.992
-0,1,288,poor,22,1517,frontage_rd,1510.932
-2,1,868,good,7,3436,north_berkeley,3436
-3,2,1178,good,5,4932,north_berkeley,4932
-3,2,915,great,7,5205,east_elmwood,5205
-1,1,355,poor,64,1263,frontage_rd,1151.856
-0,1,478,good,10,1775,north_berkeley,1775
-0,1,352,great,11,2174,east_elmwood,2174
-0,1,281,good,10,1567,north_berkeley,1567
-0,1,178,poor,22,1322,frontage_rd,1316.712
-3,2,1124,poor,34,4603,frontage_rd,4474.116
-2,1,858,good,12,3307,north_berkeley,3307
-0,1,143,poor,21,1310,frontage_rd,1307.38
-3,2,1042,good,3,4836,north_berkeley,4836
-1,1,383,good,10,1740,north_berkeley,1740
-1,1,324,poor,50,1324,frontage_rd,1244.56
-2,1,508,great,2,3559,east_elmwood,3559
-1,1,398,good,6,1868,north_berkeley,1868
-1,1,408,good,6,1810,north_berkeley,1810
-2,1,813,poor,34,3057,frontage_rd,2971.404
-0,1,380,good,11,1627,north_berkeley,1627
-2,1,747,poor,16,3219,frontage_rd,3219
-2,1,791,poor,24,3175,frontage_rd,3149.6
-0,1,185,good,0,1578,north_berkeley,1578
-3,2,963,good,9,4736,north_berkeley,4736
-1,1,327,poor,49,1379,frontage_rd,1299.018
-0,1,125,great,10,1930,east_elmwood,1930
-3,2,1140,great,2,5507,east_elmwood,5507
-2,1,654,good,2,3270,north_berkeley,3270
-0,1,405,good,3,1754,north_berkeley,1754
-3,2,1206,good,7,4996,north_berkeley,4996
-0,1,261,good,7,1639,north_berkeley,1639
-2,1,772,poor,16,3224,frontage_rd,3224
-0,1,379,good,11,1649,north_berkeley,1649
-2,1,732,great,6,3730,east_elmwood,3730
-3,2,1187,good,9,4964,north_berkeley,4964
-2,1,712,great,14,3707,east_elmwood,3707
-1,1,662,poor,31,1849,frontage_rd,1808.322
-0,1,339,poor,30,1452,frontage_rd,1422.96
-2,1,880,poor,49,2957,frontage_rd,2785.494
-1,1,359,poor,44,1392,frontage_rd,1325.184
-0,1,163,poor,52,1037,frontage_rd,970.632
-0,1,365,poor,22,1525,frontage_rd,1518.9
-3,2,1091,good,13,4804,north_berkeley,4804
-1,1,697,poor,62,1599,frontage_rd,1464.684
-3,2,1092,poor,45,4470,frontage_rd,4246.5
-1,1,512,poor,54,1493,frontage_rd,1391.476
-2,1,710,poor,40,2891,frontage_rd,2775.36
-2,1,655,poor,62,2629,frontage_rd,2408.164
-3,2,807,good,12,4572,north_berkeley,4572
-0,1,147,good,4,1483,north_berkeley,1483
-2,1,797,good,9,3296,north_berkeley,3296
-0,1,442,good,10,1717,north_berkeley,1717
-0,1,325,great,11,2079,east_elmwood,2079
-2,1,704,great,6,3707,east_elmwood,3707
-0,1,495,great,13,2226,east_elmwood,2226
-3,2,957,good,11,4708,north_berkeley,4708
-2,1,"944,98",good,0,3581,north_berkeley,3581
-1,1,698,good,10,2132,north_berkeley,2132
-3,2,897,good,3,4677,north_berkeley,4677
-1,1,643,poor,23,1942,frontage_rd,1930.348
-2,1,757,good,9,3271,north_berkeley,3271
-3,2,1189,poor,26,4740,frontage_rd,4683.12
-3,2,932,good,9,4738,north_berkeley,4738
-3,2,948,good,9,4670,north_berkeley,4670
-1,1,488,poor,61,1377,frontage_rd,1264.086
-3,2,1185,poor,41,4578,frontage_rd,4385.724
-3,2,824,good,7,4646,north_berkeley,4646
-1,1,472,good,12,1896,north_berkeley,1896
-0,1,479,good,10,1770,north_berkeley,1770
-0,1,490,poor,17,1733,frontage_rd,1733
-1,1,457,great,6,2441,east_elmwood,2441
-1,1,520,good,7,1926,north_berkeley,1926
-1,1,322,great,5,2296,east_elmwood,2296
-0,1,482,poor,54,1295,frontage_rd,1206.94
-2,1,740,poor,40,2931,frontage_rd,2813.76
-2,1,760,good,0,3367,north_berkeley,3367
-2,1,758,poor,24,3120,frontage_rd,3095.04
-2,1,919,great,14,3917,east_elmwood,3917
-3,2,1011,good,4,4853,north_berkeley,4853
-1,1,665,great,6,2638,east_elmwood,2638
-3,2,1194,great,10,5429,east_elmwood,5429
-1,1,714,great,14,2526,east_elmwood,2526
-2,1,922,poor,29,3251,frontage_rd,3192.482
-1,1,649,great,9,2514,east_elmwood,2514
-1,1,380,poor,25,1651,frontage_rd,1634.49
-2,1,875,good,11,3322,north_berkeley,3322
-3,2,883,good,6,4685,north_berkeley,4685
-2,1,531,great,4,3634,east_elmwood,3634
-2,1,712,good,1,3282,north_berkeley,3282
-2,1,841,good,8,3369,north_berkeley,3369
-1,1,433,poor,60,1354,frontage_rd,1245.68
-2,1,929,poor,62,2956,frontage_rd,2707.696
-0,1,116,poor,50,998,frontage_rd,938.12
-1,1,325,good,13,1651,north_berkeley,1651
-3,2,1218,poor,60,4505,frontage_rd,4144.6
-1,1,335,poor,17,1645,frontage_rd,1645
-1,1,653,poor,28,1899,frontage_rd,1868.616
-3,2,1102,great,8,5390,east_elmwood,5390
-2,1,921,great,11,3888,east_elmwood,3888
-3,2,1207,poor,24,4809,frontage_rd,4770.528
-3,2,1054,good,9,4852,north_berkeley,4852
-3,2,1159,great,10,5383,east_elmwood,5383
-0,1,192,poor,41,1135,frontage_rd,1087.33
-0,1,469,good,7,1755,north_berkeley,1755
-2,1,687,good,12,3166,north_berkeley,3166
-3,2,829,good,13,4590,north_berkeley,4590
-0,1,445,good,12,1724,north_berkeley,1724
-0,1,226,poor,46,1202,frontage_rd,1139.496
-1,1,466,poor,30,1672,frontage_rd,1638.56
-3,2,1131,good,0,4950,north_berkeley,4950
-1,1,465,poor,15,1787,frontage_rd,1787
-0,1,454,great,13,2206,east_elmwood,2206
-2,1,545,good,3,3150,north_berkeley,3150
-1,1,696,good,11,2113,north_berkeley,2113
-3,2,1064,poor,27,4634,frontage_rd,4569.124
-0,1,193,poor,62,942,frontage_rd,862.872
-0,1,410,poor,46,1389,frontage_rd,1316.772
-3,2,1174,good,9,4919,north_berkeley,4919
-0,1,382,great,7,2204,east_elmwood,2204
-3,2,1002,good,4,4860,north_berkeley,4860
-0,1,542,great,3,2377,east_elmwood,2377
-1,1,621,great,0,2577,east_elmwood,2577
-3,2,1159,good,3,5002,north_berkeley,5002
-2,1,741,poor,16,3158,frontage_rd,3158
-3,2,1101,great,13,5282,east_elmwood,5282
-3,2,857,great,3,5208,east_elmwood,5208
-1,1,336,great,1,2351,east_elmwood,2351
-1,1,429,poor,38,1516,frontage_rd,1461.424
-3,2,1192,great,12,5421,east_elmwood,5421
-2,1,532,poor,20,2909,frontage_rd,2909
-0,1,399,good,5,1784,north_berkeley,1784
-1,1,652,good,12,2020,north_berkeley,2020
-2,1,879,great,2,3912,east_elmwood,3912
-3,2,833,poor,30,4378,frontage_rd,4290.44
-0,1,119,good,4,1485,north_berkeley,1485
-1,1,670,good,3,2179,north_berkeley,2179
-2,1,837,good,11,3301,north_berkeley,3301
-3,2,1094,poor,36,4542,frontage_rd,4396.656
-3,2,1132,good,6,4961,north_berkeley,4961
-3,2,1089,great,8,5328,east_elmwood,5328
-1,1,483,poor,31,1663,frontage_rd,1626.414
-0,1,456,poor,38,1525,frontage_rd,1470.1
-1,1,550,good,14,1952,north_berkeley,1952
-0,1,144,good,0,1580,north_berkeley,1580
-1,1,543,poor,63,1372,frontage_rd,1254.008
-2,1,733,good,6,3281,north_berkeley,3281
-2,1,773,good,8,3293,north_berkeley,3293
-0,1,508,good,1,1928,north_berkeley,1928
-1,1,685,poor,25,1965,frontage_rd,1945.35
-0,1,402,poor,46,1324,frontage_rd,1255.152
-2,1,571,good,12,3054,north_berkeley,3054
-1,1,336,poor,24,1580,frontage_rd,1567.36
-0,1,406,good,12,1668,north_berkeley,1668
-0,1,499,good,4,1816,north_berkeley,1816
-0,1,313,poor,40,1339,frontage_rd,1285.44
-1,1,681,good,4,2109,north_berkeley,2109
-2,1,721,poor,59,2752,frontage_rd,2537.344
-1,1,305,poor,48,1311,frontage_rd,1237.584
-3,2,923,poor,38,4418,frontage_rd,4258.952
-1,1,379,poor,34,1524,frontage_rd,1481.328
-0,1,421,great,3,2331,east_elmwood,2331
-2,1,594,poor,34,2841,frontage_rd,2761.452
-1,1,658,great,12,2491,east_elmwood,2491
-3,2,978,good,5,4820,north_berkeley,4820
-0,1,247,poor,36,1301,frontage_rd,1259.368
-2,1,812,great,12,3836,east_elmwood,3836
-1,1,326,good,9,1782,north_berkeley,1782
-1,1,661,poor,16,2004,frontage_rd,2004
-0,1,415,good,8,1753,north_berkeley,1753
-0,1,371,poor,63,1101,frontage_rd,1006.314
-3,2,1165,poor,33,4733,frontage_rd,4609.942
-3,2,915,good,1,4740,north_berkeley,4740
-0,1,270,good,8,1598,north_berkeley,1598
-0,1,239,good,7,1574,north_berkeley,1574
-3,2,1183,great,3,5474,east_elmwood,5474
-1,1,332,great,0,2302,east_elmwood,2302
-3,2,853,great,0,5187,east_elmwood,5187
-2,1,852,poor,54,2953,frontage_rd,2752.196
-2,1,764,poor,64,2751,frontage_rd,2508.912
-1,1,338,good,0,1826,north_berkeley,1826
-2,1,905,poor,46,3031,frontage_rd,2873.388
-2,1,711,good,13,3177,north_berkeley,3177
-1,1,580,poor,42,1630,frontage_rd,1558.28
-3,2,1111,poor,62,4325,frontage_rd,3961.7
-2,1,557,good,2,3181,north_berkeley,3181
-3,2,1192,poor,24,4759,frontage_rd,4720.928
-1,1,548,great,6,2463,east_elmwood,2463
-1,1,369,great,11,2278,east_elmwood,2278
-1,1,742,good,8,2120,north_berkeley,2120
-0,1,291,good,12,1546,north_berkeley,1546
-3,2,1096,great,1,5464,east_elmwood,5464
-2,1,936,poor,61,2950,frontage_rd,2708.1
-0,1,181,great,11,1964,east_elmwood,1964
-0,1,490,good,11,1760,north_berkeley,1760
-1,1,516,poor,44,1622,frontage_rd,1544.144
-1,1,344,poor,33,1508,frontage_rd,1468.792
-2,1,821,good,1,3408,north_berkeley,3408
-2,1,702,great,8,3695,east_elmwood,3695
-3,2,882,poor,49,4218,frontage_rd,3973.356
-2,1,945,poor,21,3367,frontage_rd,3360.266
-3,2,999,good,5,4830,north_berkeley,4830
-1,1,395,poor,30,1578,frontage_rd,1546.44
-0,1,527,great,8,2387,east_elmwood,2387
-3,2,1006,poor,48,4345,frontage_rd,4101.68
-1,1,696,good,10,2103,north_berkeley,2103
-0,1,309,good,0,1697,north_berkeley,1697
-3,2,923,good,1,4774,north_berkeley,4774
-3,2,879,good,7,4674,north_berkeley,4674
-0,1,227,good,7,1593,north_berkeley,1593
-0,1,150,poor,49,1053,frontage_rd,991.926
-2,1,912,good,6,3473,north_berkeley,3473
-1,1,353,poor,54,1293,frontage_rd,1205.076
-0,1,235,poor,52,1160,frontage_rd,1085.76
-2,1,584,poor,26,2922,frontage_rd,2886.936
-0,1,505,good,9,1827,north_berkeley,1827
-1,1,489,great,2,2514,east_elmwood,2514
-1,1,694,poor,28,1931,frontage_rd,1900.104
-2,1,638,good,1,3237,north_berkeley,3237
-0,1,285,good,0,1664,north_berkeley,1664
-3,2,1041,good,8,4829,north_berkeley,4829
-1,1,622,great,6,2604,east_elmwood,2604
-0,1,290,poor,20,1527,frontage_rd,1527
-2,1,823,good,3,3385,north_berkeley,3385
-0,1,330,poor,19,1522,frontage_rd,1522
-2,1,830,great,11,3857,east_elmwood,3857
-2,1,774,good,7,3322,north_berkeley,3322
-1,1,628,poor,52,1644,frontage_rd,1538.784
-3,2,1030,great,7,5351,east_elmwood,5351
-0,1,378,great,10,2200,east_elmwood,2200
-0,1,289,poor,44,1250,frontage_rd,1190
-1,1,519,good,4,1993,north_berkeley,1993
-0,1,144,good,11,1468,north_berkeley,1468
-2,1,585,poor,36,2806,frontage_rd,2716.208
-2,1,500,good,5,3001,north_berkeley,3001
-2,1,850,poor,24,3255,frontage_rd,3228.96
-3,2,970,good,13,4645,north_berkeley,4645
-2,1,728,great,9,3688,east_elmwood,3688
-3,2,1031,great,13,5242,east_elmwood,5242
-0,1,526,great,2,2393,east_elmwood,2393
-0,1,505,good,4,1882,north_berkeley,1882
-0,1,205,good,0,1579,north_berkeley,1579
-0,1,508,poor,49,1393,frontage_rd,1312.206
-1,1,471,good,13,1876,north_berkeley,1876
-1,1,359,poor,24,1628,frontage_rd,1614.976
-1,1,739,poor,64,1617,frontage_rd,1474.704
-3,2,1164,good,6,4945,north_berkeley,4945
-1,1,509,great,13,2402,east_elmwood,2402
-2,1,804,good,13,3227,north_berkeley,3227
-0,1,502,good,11,1748,north_berkeley,1748
-3,2,1152,poor,52,4524,frontage_rd,4234.464
-2,1,934,great,5,3947,east_elmwood,3947
-3,2,823,great,4,5149,east_elmwood,5149
-2,1,556,great,4,3646,east_elmwood,3646
-0,1,196,poor,41,1226,frontage_rd,1174.508
-2,1,665,poor,26,3048,frontage_rd,3011.424
-1,1,458,good,11,1826,north_berkeley,1826
-1,1,664,good,10,2024,north_berkeley,2024
-3,2,813,poor,26,4417,frontage_rd,4363.996
-3,2,1105,great,5,5374,east_elmwood,5374
-2,1,732,good,13,3152,north_berkeley,3152
-1,1,364,poor,49,1327,frontage_rd,1250.034
-1,1,498,good,2,1949,north_berkeley,1949
-1,1,731,good,13,2137,north_berkeley,2137
-3,2,1004,good,1,4870,north_berkeley,4870
-0,1,417,great,9,2265,east_elmwood,2265
-2,1,771,good,9,3328,north_berkeley,3328
-2,1,503,poor,32,2763,frontage_rd,2696.688
-3,2,1080,great,0,5459,east_elmwood,5459
-1,1,458,great,3,2384,east_elmwood,2384
-3,2,1240,poor,27,4817,frontage_rd,4749.562
-1,1,703,good,3,2174,north_berkeley,2174
-3,2,1100,good,10,4819,north_berkeley,4819
-3,2,1233,good,1,5037,north_berkeley,5037
-2,1,748,poor,25,3113,frontage_rd,3081.87
-3,2,954,poor,22,4632,frontage_rd,4613.472
-1,1,512,poor,49,1552,frontage_rd,1461.984
-0,1,141,great,9,1996,east_elmwood,1996
-2,1,888,great,1,3970,east_elmwood,3970
-2,1,944,poor,54,3019,frontage_rd,2813.708
-3,2,853,poor,50,4219,frontage_rd,3965.86
-2,1,686,good,12,3201,north_berkeley,3201
-2,1,763,poor,48,2850,frontage_rd,2690.4
-3,2,1107,poor,43,4567,frontage_rd,4356.918
-1,1,615,poor,31,1757,frontage_rd,1718.346
-3,2,1132,good,4,4938,north_berkeley,4938
-3,2,1038,poor,26,4655,frontage_rd,4599.14
-1,1,701,great,11,2570,east_elmwood,2570
-2,1,510,good,6,3053,north_berkeley,3053
-3,2,951,poor,57,4229,frontage_rd,3916.054
-2,1,764,poor,51,2878,frontage_rd,2699.564
-3,2,1148,good,0,5024,north_berkeley,5024
-2,1,827,poor,50,2966,frontage_rd,2788.04
-1,1,664,poor,52,1613,frontage_rd,1509.768
-3,2,1061,good,11,4811,north_berkeley,4811
-0,1,449,good,7,1773,north_berkeley,1773
-0,1,284,good,2,1695,north_berkeley,1695
-3,2,1191,good,7,5020,north_berkeley,5020
-3,2,1039,good,13,4765,north_berkeley,4765
-1,1,365,good,2,1828,north_berkeley,1828
-2,1,781,good,11,3299,north_berkeley,3299
-3,2,889,great,3,5256,east_elmwood,5256
-2,1,827,good,9,3305,north_berkeley,3305
-1,1,414,great,6,2379,east_elmwood,2379
-1,1,454,good,12,1793,north_berkeley,1793
-0,1,201,good,5,1594,north_berkeley,1594
-0,1,320,poor,64,1085,frontage_rd,989.52
-2,1,946,poor,56,3027,frontage_rd,2809.056
-3,2,1076,great,13,5332,east_elmwood,5332
-1,1,643,good,14,2034,north_berkeley,2034
-2,1,791,poor,62,2738,frontage_rd,2508.008
-1,1,580,great,4,2571,east_elmwood,2571
-0,1,104,great,0,2043,east_elmwood,2043
-1,1,584,good,12,1916,north_berkeley,1916
-3,2,1006,good,4,4784,north_berkeley,4784
-1,1,516,good,13,1915,north_berkeley,1915
-0,1,447,poor,15,1730,frontage_rd,1730
-1,1,722,good,8,2169,north_berkeley,2169
-3,2,811,poor,34,4343,frontage_rd,4221.396
-2,1,560,great,12,3517,east_elmwood,3517
-0,1,397,good,13,1694,north_berkeley,1694
-3,2,1061,good,2,4911,north_berkeley,4911
-3,2,888,good,10,4679,north_berkeley,4679
-1,1,358,good,0,1828,north_berkeley,1828
-3,2,982,great,12,5245,east_elmwood,5245
-2,1,864,good,14,3307,north_berkeley,3307
-0,1,188,poor,25,1364,frontage_rd,1350.36
-1,1,523,good,6,1942,north_berkeley,1942
-3,2,865,great,0,5248,east_elmwood,5248
-0,1,353,good,5,1722,north_berkeley,1722
-0,1,407,good,7,1753,north_berkeley,1753
-1,1,306,great,4,2226,east_elmwood,2226
-0,1,388,poor,60,1152,frontage_rd,1059.84
-0,1,110,poor,22,1319,frontage_rd,1313.724
-1,1,553,poor,35,1689,frontage_rd,1638.33
-1,1,339,poor,15,1698,frontage_rd,1698
-3,2,897,poor,17,4543,frontage_rd,4543
-2,1,574,good,9,3036,north_berkeley,3036
-0,1,449,good,10,1731,north_berkeley,1731
-0,1,441,good,5,1799,north_berkeley,1799
-0,1,502,great,4,2330,east_elmwood,2330
-2,1,757,poor,44,2956,frontage_rd,2814.112
-3,2,962,good,3,4830,north_berkeley,4830
-1,1,649,great,4,2612,east_elmwood,2612
-0,1,256,good,11,1577,north_berkeley,1577
-1,1,747,great,12,2654,east_elmwood,2654
-3,2,833,great,10,5104,east_elmwood,5104
-1,1,687,good,12,2060,north_berkeley,2060
-2,1,552,good,8,3065,north_berkeley,3065
-3,2,1241,great,2,5569,east_elmwood,5569
-0,1,459,poor,61,1298,frontage_rd,1191.564
-3,2,873,poor,26,4444,frontage_rd,4390.672
-0,1,152,poor,22,1356,frontage_rd,1350.576
-3,2,922,good,7,4670,north_berkeley,4670
-1,1,720,good,3,2198,north_berkeley,2198
-2,1,931,good,2,3497,north_berkeley,3497
-0,1,524,good,6,1893,north_berkeley,1893
-2,1,555,poor,54,2575,frontage_rd,2399.9
-0,1,159,good,7,1488,north_berkeley,1488
-1,1,380,good,8,1768,north_berkeley,1768
-1,1,685,good,0,2155,north_berkeley,2155
-2,1,848,good,11,3354,north_berkeley,3354
-1,1,547,poor,27,1751,frontage_rd,1726.486
-2,1,896,good,12,3363,north_berkeley,3363
-0,1,269,good,6,1560,north_berkeley,1560
-3,2,903,good,0,4764,north_berkeley,4764
-2,1,615,great,4,3633,east_elmwood,3633
-2,1,881,poor,35,3161,frontage_rd,3066.17
-0,1,331,good,13,1583,north_berkeley,1583
-0,1,498,poor,64,1238,frontage_rd,1129.056
-0,1,308,poor,28,1387,frontage_rd,1364.808
-0,1,103,great,3,1973,east_elmwood,1973
-3,2,1044,poor,52,4405,frontage_rd,4123.08
-2,1,788,poor,33,3010,frontage_rd,2931.74
-0,1,524,poor,33,1621,northwest,1578.854
-1,1,397,poor,25,1696,northwest,1679.04
-2,1,876,poor,27,3215,northwest,3169.99
-2,1,751,poor,64,2677,northwest,2441.424
-2,1,622,poor,32,2886,northwest,2816.736
-0,1,316,good,10,1596,west_welmwood,1596
-3,2,1171,poor,54,4473,northwest,4168.836
-3,2,1108,good,5,4875,west_welmwood,4875
-2,1,541,good,0,3103,west_welmwood,3103
-3,2,865,good,12,4643,west_welmwood,4643
-3,2,1165,great,3,5491,east_elmwood,5491
-2,1,728,good,2,3280,west_welmwood,3280
-0,1,298,great,13,2071,east_elmwood,2071
-3,2,923,good,12,4683,west_welmwood,4683
-0,1,274,great,5,2130,east_elmwood,2130
-0,1,349,poor,32,1444,northwest,1409.344
-2,1,796,great,4,3896,east_elmwood,3896
-1,1,726,poor,28,1898,northwest,1867.632
-2,1,501,great,5,3542,east_elmwood,3542
-2,1,714,good,4,3284,west_welmwood,3284
-3,2,988,great,4,5289,east_elmwood,5289
-2,1,546,poor,34,2757,northwest,2679.804
-0,1,484,poor,20,1678,northwest,1678
-3,2,1230,good,5,5059,west_welmwood,5059
-0,1,142,good,5,1492,west_welmwood,1492
-2,1,774,poor,53,2872,northwest,2682.448
-2,1,902,good,11,3375,west_welmwood,3375
-0,1,457,good,11,1763,west_welmwood,1763
-3,2,1034,good,7,4828,west_welmwood,4828
-3,2,838,poor,25,4412,northwest,4367.88
-1,1,503,good,14,1900,west_welmwood,1900
-0,1,445,good,7,1737,west_welmwood,1737
-2,1,914,poor,34,3189,northwest,3099.708
-2,1,867,great,6,3952,east_elmwood,3952
-2,1,583,good,4,3109,west_welmwood,3109
-0,1,161,great,7,2014,east_elmwood,2014
-1,1,707,good,14,2058,west_welmwood,2058
-1,1,343,great,9,2270,east_elmwood,2270
-2,1,746,great,14,3710,east_elmwood,3710
-2,1,706,good,14,3151,west_welmwood,3151
-1,1,610,good,5,2047,west_welmwood,2047
-3,2,1205,good,12,4969,west_welmwood,4969
-3,2,827,good,7,4655,west_welmwood,4655
-3,2,1055,good,0,4930,west_welmwood,4930
-2,1,625,good,14,3083,west_welmwood,3083
-2,1,630,good,14,3053,west_welmwood,3053
-1,1,654,poor,59,1575,northwest,1452.15
-3,2,1195,poor,17,4831,northwest,4831
-3,2,1227,good,11,4951,west_welmwood,4951
-3,2,1242,good,3,5081,west_welmwood,5081
-2,1,881,good,11,3337,west_welmwood,3337
-1,1,689,poor,35,1880,northwest,1823.6
-2,1,501,poor,35,2704,northwest,2622.88
-1,1,412,poor,21,1751,northwest,1747.498
-3,2,1081,poor,15,4793,northwest,4793
-2,1,725,great,2,3807,east_elmwood,3807
-2,1,768,great,11,3790,east_elmwood,3790
-0,1,152,good,2,1530,west_welmwood,1530
-3,2,1074,good,9,4804,west_welmwood,4804
-1,1,559,good,12,1890,west_welmwood,1890
-2,1,706,great,10,3747,east_elmwood,3747
-2,1,738,poor,23,3095,northwest,3076.43
-2,1,893,great,4,3962,east_elmwood,3962
-1,1,468,great,7,2405,east_elmwood,2405
-2,1,652,poor,22,3017,northwest,3004.932
-1,1,327,good,6,1769,west_welmwood,1769
-2,1,899,great,3,3930,east_elmwood,3930
-3,2,997,good,11,4733,west_welmwood,4733
-3,2,920,poor,20,4602,northwest,4602
-1,1,300,poor,55,1211,northwest,1126.23
-3,2,1174,great,0,5496,east_elmwood,5496
-1,1,679,good,14,2049,west_welmwood,2049
-3,2,887,good,0,4781,west_welmwood,4781
-2,1,856,good,9,3377,west_welmwood,3377
-0,1,216,good,14,1484,west_welmwood,1484
-0,1,476,great,3,2312,east_elmwood,2312
-2,1,592,good,12,3042,west_welmwood,3042
-0,1,325,poor,42,1337,northwest,1278.172
-2,1,790,poor,19,3170,northwest,3170
-1,1,354,good,12,1728,west_welmwood,1728
-1,1,529,good,12,1949,west_welmwood,1949
-1,1,522,great,12,2420,east_elmwood,2420
-0,1,358,good,13,1660,west_welmwood,1660
-0,1,468,poor,47,1447,northwest,1368.862
-0,1,429,great,11,2255,east_elmwood,2255
-3,2,1176,great,0,5535,east_elmwood,5535
-0,1,361,good,12,1656,west_welmwood,1656
-3,2,1214,good,2,5046,west_welmwood,5046
-1,1,738,good,3,2224,west_welmwood,2224
-3,2,852,poor,59,4128,northwest,3806.016
-3,2,1178,good,9,4951,west_welmwood,4951
-3,2,956,good,5,4746,west_welmwood,4746
-1,1,568,good,14,1884,west_welmwood,1884
-1,1,704,great,7,2638,east_elmwood,2638
-1,1,519,poor,64,1386,northwest,1264.032
-1,1,737,good,5,2185,west_welmwood,2185
-1,1,455,poor,47,1510,northwest,1428.46
-0,1,346,poor,61,1118,northwest,1026.324
-0,1,285,good,5,1627,west_welmwood,1627
-3,2,829,good,0,4646,west_welmwood,4646
-2,1,672,good,6,3202,west_welmwood,3202
-1,1,451,good,6,1873,west_welmwood,1873
-1,1,697,good,8,2119,west_welmwood,2119
-2,1,691,poor,34,2913,northwest,2831.436
-1,1,610,poor,49,1598,northwest,1505.316
-0,1,410,great,2,2287,east_elmwood,2287
-2,1,529,great,2,3606,east_elmwood,3606
-0,1,273,good,0,1656,west_welmwood,1656
-0,1,482,poor,55,1299,northwest,1208.07
-0,1,170,good,10,1421,west_welmwood,1421
-1,1,565,good,2,2001,west_welmwood,2001
-1,1,712,good,2,2186,west_welmwood,2186
-0,1,246,poor,16,1465,northwest,1465
-1,1,379,good,12,1725,west_welmwood,1725
-2,1,575,good,3,3095,west_welmwood,3095
-1,1,477,good,4,1900,west_welmwood,1900
-3,2,1081,good,9,4847,west_welmwood,4847
-2,1,908,good,13,3348,west_welmwood,3348
-3,2,1119,poor,47,4530,northwest,4285.38
-3,2,943,good,13,4693,west_welmwood,4693
-2,1,763,good,2,3339,west_welmwood,3339
-3,2,1192,poor,39,4630,northwest,4454.06
-2,1,600,great,14,3547,east_elmwood,3547
-0,1,361,poor,60,1186,northwest,1091.12
-2,1,847,poor,29,3160,northwest,3103.12
-0,1,206,poor,42,1215,northwest,1161.54
-1,1,300,great,12,2133,east_elmwood,2133
-2,1,927,good,4,3440,west_welmwood,3440
-0,1,524,poor,15,1726,northwest,1726
-1,1,469,poor,38,1637,northwest,1578.068
-0,1,193,poor,40,1165,northwest,1118.4
-2,1,627,poor,29,2924,northwest,2871.368
-2,1,603,good,6,3180,west_welmwood,3180
-0,1,424,poor,21,1639,northwest,1635.722
-2,1,785,poor,28,3149,northwest,3098.616
-3,2,1020,poor,15,4676,northwest,4676
-2,1,526,great,3,3602,east_elmwood,3602
-2,1,555,good,11,3089,west_welmwood,3089
-2,1,730,great,7,3781,east_elmwood,3781
-3,2,1228,good,10,4955,west_welmwood,4955
-0,1,437,poor,30,1585,northwest,1553.3
-1,1,511,poor,31,1746,northwest,1707.588
-3,2,1125,great,7,5430,east_elmwood,5430
-2,1,838,poor,16,3259,northwest,3259
-2,1,736,great,4,3837,east_elmwood,3837
-2,1,591,great,3,3670,east_elmwood,3670
-0,1,537,good,9,1858,west_welmwood,1858
-2,1,690,good,3,3216,west_welmwood,3216
-0,1,475,poor,36,1486,northwest,1438.448
-0,1,245,great,9,2039,east_elmwood,2039
-2,1,568,good,0,3137,west_welmwood,3137
-1,1,466,poor,23,1764,northwest,1753.416
-0,1,489,great,12,2311,east_elmwood,2311
-1,1,727,good,4,2217,west_welmwood,2217
-2,1,541,good,0,3148,west_welmwood,3148
-2,1,531,good,10,3053,west_welmwood,3053
-0,1,548,good,3,1931,west_welmwood,1931
-1,1,548,good,8,1975,west_welmwood,1975
-1,1,394,good,6,1800,west_welmwood,1800
-3,2,1231,good,12,4988,west_welmwood,4988
-2,1,532,great,3,3614,east_elmwood,3614
-0,1,256,good,4,1582,west_welmwood,1582
-2,1,613,good,1,3202,west_welmwood,3202
-3,2,1186,good,13,4928,west_welmwood,4928
-1,1,432,good,1,1910,west_welmwood,1910
-1,1,351,good,7,1804,west_welmwood,1804
-1,1,300,poor,27,1528,northwest,1506.608
-0,1,325,poor,51,1220,northwest,1144.36
-3,2,1101,great,11,5388,east_elmwood,5388
-3,2,1029,good,0,4831,west_welmwood,4831
-1,1,662,great,14,2517,east_elmwood,2517
-2,1,611,poor,48,2772,northwest,2616.768
-3,2,1155,good,7,4951,west_welmwood,4951
-2,1,683,great,14,3670,east_elmwood,3670
-0,1,361,great,13,2098,east_elmwood,2098
-1,1,589,poor,20,1872,northwest,1872
-2,1,764,poor,40,2915,northwest,2798.4
-1,1,522,good,12,1945,west_welmwood,1945
-0,1,227,poor,52,1120,northwest,1048.32
-2,1,525,good,3,3112,west_welmwood,3112
-2,1,656,good,4,3234,west_welmwood,3234
-1,1,630,poor,61,1505,northwest,1381.59
-3,2,842,great,14,5011,east_elmwood,5011
-0,1,520,poor,33,1552,northwest,1511.648
-3,2,1013,poor,46,4403,northwest,4174.044
-2,1,747,poor,20,3170,northwest,3170
-0,1,217,great,1,2075,east_elmwood,2075
-0,1,134,poor,31,1185,northwest,1158.93
-3,2,813,good,7,4622,west_welmwood,4622
-2,1,722,good,0,3337,west_welmwood,3337
-2,1,710,poor,45,2845,northwest,2702.75
-1,1,377,great,7,2353,east_elmwood,2353
-3,2,1169,poor,57,4441,northwest,4112.366
-3,2,881,good,4,4641,west_welmwood,4641
-0,1,415,great,5,2283,east_elmwood,2283
-1,1,401,poor,17,1705,northwest,1705
-1,1,552,poor,64,1460,northwest,1331.52
-3,2,894,good,9,4639,west_welmwood,4639
-2,1,925,good,8,3414,west_welmwood,3414
-2,1,661,good,10,3135,west_welmwood,3135
-0,1,233,poor,54,1123,northwest,1046.636
-0,1,264,poor,15,1521,northwest,1521
-1,1,445,poor,48,1482,northwest,1399.008
-2,1,849,good,10,3308,west_welmwood,3308
-2,1,852,poor,31,3116,northwest,3047.448
-2,1,901,great,6,3977,east_elmwood,3977
-2,1,934,great,14,3876,east_elmwood,3876
-1,1,691,good,12,2021,west_welmwood,2021
-2,1,722,great,9,3734,east_elmwood,3734
-3,2,1000,good,9,4741,west_welmwood,4741
-2,1,573,great,3,3598,east_elmwood,3598
-1,1,732,poor,42,1858,northwest,1776.248
-0,1,319,great,13,2068,east_elmwood,2068
-0,1,101,poor,53,944,northwest,881.696
-2,1,838,poor,42,3053,northwest,2918.668
-2,1,518,poor,57,2571,northwest,2380.746
-2,1,876,great,6,3893,east_elmwood,3893
-0,1,482,good,13,1741,west_welmwood,1741
-2,1,789,good,7,3342,west_welmwood,3342
-3,2,955,poor,18,4627,northwest,4627
-0,1,102,great,5,1924,east_elmwood,1924
-0,1,373,poor,50,1299,northwest,1221.06
-3,2,1056,poor,25,4679,northwest,4632.21
-0,1,184,great,1,2098,east_elmwood,2098
-0,1,107,good,14,1373,west_welmwood,1373
-0,1,430,good,3,1813,west_welmwood,1813
-0,1,255,poor,26,1382,northwest,1365.416
-3,2,1242,great,12,5514,east_elmwood,5514
-3,2,1124,good,13,4817,west_welmwood,4817
-2,1,833,great,2,3943,east_elmwood,3943
-0,1,417,good,11,1660,west_welmwood,1660
-0,1,254,good,8,1566,west_welmwood,1566
-1,1,481,good,4,1986,west_welmwood,1986
-2,1,611,good,4,3193,west_welmwood,3193
-1,1,639,poor,20,1948,northwest,1948
-0,1,455,good,7,1799,west_welmwood,1799
-2,1,881,poor,54,2903,northwest,2705.596
-1,1,364,great,12,2225,east_elmwood,2225
-0,1,172,good,8,1444,west_welmwood,1444
-1,1,643,great,1,2637,east_elmwood,2637
-2,1,663,good,4,3269,west_welmwood,3269
-2,1,931,good,5,3524,west_welmwood,3524
-3,2,1143,great,2,5488,east_elmwood,5488
-2,1,681,good,13,3185,west_welmwood,3185
-1,1,426,good,11,1857,west_welmwood,1857
-3,2,1070,great,3,5342,east_elmwood,5342
-1,1,388,good,14,1744,west_welmwood,1744
-1,1,722,good,2,2219,west_welmwood,2219
-2,1,621,great,3,3665,east_elmwood,3665
-3,2,1018,poor,45,4452,northwest,4229.4
-3,2,845,poor,39,4284,northwest,4121.208
-0,1,323,poor,24,1529,northwest,1516.768
-1,1,558,good,11,1917,west_welmwood,1917
-2,1,813,great,10,3819,east_elmwood,3819
-1,1,601,poor,36,1771,northwest,1714.328
-1,1,617,good,8,1999,west_welmwood,1999
-2,1,851,good,4,3414,west_welmwood,3414
-2,1,828,good,14,3248,west_welmwood,3248
-3,2,825,good,0,4711,west_welmwood,4711
-1,1,386,good,13,1713,west_welmwood,1713
-3,2,895,good,2,4769,west_welmwood,4769
-1,1,443,good,14,1845,west_welmwood,1845
-3,2,846,poor,56,4124,northwest,3827.072
-3,2,896,great,2,5257,east_elmwood,5257
-3,2,1014,poor,40,4482,northwest,4302.72
-1,1,378,good,8,1807,west_welmwood,1807
-3,2,1126,poor,41,4551,northwest,4359.858
-2,1,929,poor,47,3039,northwest,2874.894
-2,1,739,poor,57,2753,northwest,2549.278
-3,2,1145,good,13,4887,west_welmwood,4887
-1,1,667,poor,54,1581,northwest,1473.492
-2,1,836,good,4,3361,west_welmwood,3361
-2,1,758,good,9,3265,west_welmwood,3265
-3,2,824,good,8,4570,west_welmwood,4570
-1,1,506,good,12,1895,west_welmwood,1895
-1,1,544,poor,62,1429,northwest,1308.964
-0,1,121,good,2,1512,west_welmwood,1512
-1,1,638,poor,34,1759,northwest,1709.748
-0,1,135,good,4,1480,west_welmwood,1480
-1,1,385,good,3,1874,west_welmwood,1874
-2,1,589,great,8,3651,east_elmwood,3651
-0,1,511,good,4,1905,west_welmwood,1905
-2,1,663,poor,63,2589,northwest,2366.346
-1,1,739,poor,56,1677,northwest,1556.256
-1,1,610,poor,39,1673,northwest,1609.426
-3,2,818,great,8,5081,east_elmwood,5081
-3,2,1202,good,5,4988,west_welmwood,4988
-0,1,164,good,7,1496,west_welmwood,1496
-0,1,336,poor,60,1153,northwest,1060.76
-3,2,1243,good,10,4950,west_welmwood,4950
-2,1,563,great,11,3503,east_elmwood,3503
-1,1,425,good,10,1818,west_welmwood,1818
-2,1,849,poor,45,2993,northwest,2843.35
-1,1,701,good,2,2170,west_welmwood,2170
-1,1,689,great,11,2557,east_elmwood,2557
-3,2,958,poor,61,4202,northwest,3857.436
-2,1,732,poor,16,3201,northwest,3201
-1,1,318,poor,49,1368,northwest,1288.656
-1,1,471,good,8,1852,west_welmwood,1852
-0,1,378,great,3,2205,east_elmwood,2205
-1,1,582,poor,30,1800,northwest,1764
-1,1,326,great,12,2239,east_elmwood,2239
-1,1,307,good,0,1856,west_welmwood,1856
-3,2,1206,good,10,4955,west_welmwood,4955
-1,1,547,great,13,2367,east_elmwood,2367
-0,1,215,good,10,1530,west_welmwood,1530
-1,1,454,good,2,1889,west_welmwood,1889
-1,1,367,great,5,2319,east_elmwood,2319
-1,1,373,good,4,1831,west_welmwood,1831
-3,2,921,good,12,4672,west_welmwood,4672
-0,1,230,great,5,2105,east_elmwood,2105
-3,2,1110,poor,47,4482,northwest,4239.972
-3,2,1148,good,9,4891,west_welmwood,4891
-0,1,190,great,14,1922,east_elmwood,1922
-0,1,480,great,7,2324,east_elmwood,2324
-2,1,921,poor,39,3110,northwest,2991.82
-0,1,127,great,9,1948,east_elmwood,1948
-1,1,528,great,5,2450,east_elmwood,2450
-1,1,710,good,5,2193,west_welmwood,2193
-0,1,349,great,12,2117,east_elmwood,2117
-0,1,467,good,13,1692,west_welmwood,1692
-3,2,1137,poor,21,4813,northwest,4803.374
-0,1,394,poor,40,1435,northwest,1377.6
-1,1,659,great,6,2646,east_elmwood,2646
-0,1,410,poor,53,1248,northwest,1165.632
-3,2,1158,poor,54,4464,northwest,4160.448
-0,1,401,poor,52,1312,northwest,1228.032
-1,1,641,poor,46,1680,northwest,1592.64
-1,1,599,great,6,2552,east_elmwood,2552
-1,1,614,good,4,2103,west_welmwood,2103
-0,1,443,good,8,1759,west_welmwood,1759
-0,1,266,poor,44,1196,northwest,1138.592
-1,1,627,good,10,1985,west_welmwood,1985
-0,1,476,poor,30,1530,northwest,1499.4
-1,1,657,good,3,2163,west_welmwood,2163
-2,1,839,poor,35,3073,northwest,2980.81
-3,2,868,poor,28,4447,northwest,4375.848
-1,1,454,good,12,1809,west_welmwood,1809
-0,1,319,poor,63,1088,northwest,994.432
-1,1,537,poor,56,1508,northwest,1399.424
-3,2,1235,great,6,5500,east_elmwood,5500
-0,1,123,good,13,1393,west_welmwood,1393
-0,1,249,great,10,2060,east_elmwood,2060
-2,1,817,great,4,3866,east_elmwood,3866
-2,1,711,poor,43,2882,northwest,2749.428
-1,1,479,good,0,1978,west_welmwood,1978
-0,1,361,poor,27,1540,northwest,1518.44
-3,2,918,poor,45,4329,northwest,4112.55
-2,1,554,good,11,3017,west_welmwood,3017
-1,1,314,poor,23,1543,northwest,1533.742
-2,1,766,good,0,3387,west_welmwood,3387
-3,2,947,good,2,4765,west_welmwood,4765
-0,1,245,great,7,2066,east_elmwood,2066
-1,1,551,good,3,1971,west_welmwood,1971
-3,2,1166,good,3,5008,west_welmwood,5008
-3,2,1064,good,12,4793,west_welmwood,4793
-3,2,1102,poor,26,4711,northwest,4654.468
-0,1,323,good,7,1603,west_welmwood,1603
-3,2,991,great,3,5325,east_elmwood,5325
-0,1,239,poor,18,1500,northwest,1500
-3,2,1037,good,6,4815,west_welmwood,4815
-2,1,949,good,8,3489,west_welmwood,3489
-1,1,442,good,12,1865,west_welmwood,1865
-3,2,901,great,2,5212,east_elmwood,5212
-3,2,898,good,3,4765,west_welmwood,4765
-3,2,1090,poor,59,4368,northwest,4027.296
-1,1,747,great,0,2754,east_elmwood,2754
-2,1,719,great,2,3842,east_elmwood,3842
-0,1,473,good,0,1915,west_welmwood,1915
-1,1,483,good,2,1957,west_welmwood,1957
-2,1,773,good,10,3303,west_welmwood,3303
-3,2,852,good,6,4659,west_welmwood,4659
-1,1,691,great,8,2610,east_elmwood,2610
-2,1,763,good,1,3397,west_welmwood,3397
-2,1,560,poor,46,2736,northwest,2593.728
-2,1,581,great,5,3616,east_elmwood,3616
-0,1,296,good,10,1599,west_welmwood,1599
-2,1,585,great,6,3634,east_elmwood,3634
-1,1,535,good,12,1928,west_welmwood,1928
-1,1,698,poor,35,1846,northwest,1790.62
-2,1,629,poor,50,2695,northwest,2533.3
-0,1,104,poor,61,943,northwest,865.674
-2,1,904,good,14,3386,west_welmwood,3386
-3,2,1100,great,10,5348,east_elmwood,5348
-0,1,212,poor,15,1414,northwest,1414
-1,1,491,poor,38,1577,northwest,1520.228
-2,1,862,great,5,3869,east_elmwood,3869
-3,2,900,poor,59,4155,northwest,3830.91
-3,2,867,good,5,4617,west_welmwood,4617
-1,1,362,great,12,2216,east_elmwood,2216
-1,1,576,great,14,2422,east_elmwood,2422
-0,1,300,great,5,2181,east_elmwood,2181
-3,2,1017,poor,33,4487,northwest,4370.338
-2,1,546,great,1,3672,east_elmwood,3672
-2,1,589,poor,48,2729,northwest,2576.176
-1,1,517,good,5,1948,west_welmwood,1948
-0,1,289,great,13,2066,east_elmwood,2066
-0,1,272,poor,27,1418,northwest,1398.148
-3,2,1222,good,2,5038,west_welmwood,5038
-0,1,410,good,8,1742,west_welmwood,1742
-3,2,1199,good,1,5084,west_welmwood,5084
-1,1,477,good,2,1948,west_welmwood,1948
-2,1,727,poor,61,2708,northwest,2485.944
-2,1,516,good,10,3018,west_welmwood,3018
-1,1,375,poor,45,1440,northwest,1368
-3,2,959,good,1,4800,west_welmwood,4800
-3,2,1031,great,4,5380,east_elmwood,5380
-1,1,465,good,11,1810,west_welmwood,1810
-3,2,1181,good,7,4930,west_welmwood,4930
-0,1,307,poor,63,1120,northwest,1023.68
-3,2,1170,good,11,4880,west_welmwood,4880
-3,2,1093,great,7,5356,east_elmwood,5356
-3,2,1219,good,9,4951,west_welmwood,4951
-2,1,539,poor,42,2709,northwest,2589.804
-1,1,462,great,1,2462,east_elmwood,2462
-3,2,1211,good,5,5032,west_welmwood,5032
-0,1,530,good,13,1837,west_welmwood,1837
-0,1,177,good,5,1496,west_welmwood,1496
-0,1,116,good,14,1424,west_welmwood,1424
-0,1,494,poor,45,1493,northwest,1418.35
-1,1,685,good,4,2173,west_welmwood,2173
-0,1,515,great,7,2389,east_elmwood,2389
-1,1,398,good,1,1900,west_welmwood,1900
-0,1,408,good,0,1839,west_welmwood,1839
-1,1,694,good,0,2165,west_welmwood,2165
-0,1,499,great,5,2324,east_elmwood,2324
-1,1,558,good,6,1974,west_welmwood,1974
-2,1,603,poor,63,2610,northwest,2385.54
-0,1,448,great,9,2260,east_elmwood,2260
-2,1,818,good,3,3409,west_welmwood,3409
-1,1,586,great,4,2535,east_elmwood,2535
-2,1,770,poor,30,3114,northwest,3051.72
-1,1,675,good,13,2053,west_welmwood,2053
-3,2,1024,poor,28,4560,northwest,4487.04
-2,1,532,good,12,3019,west_welmwood,3019
-3,2,816,poor,47,4210,northwest,3982.66
-2,1,838,good,8,3341,west_welmwood,3341
-3,2,1059,great,4,5329,east_elmwood,5329
-1,1,637,good,3,2119,west_welmwood,2119
-0,1,464,good,3,1799,west_welmwood,1799
-2,1,767,good,4,3288,west_welmwood,3288
-2,1,577,great,5,3618,east_elmwood,3618
-1,1,403,poor,28,1619,northwest,1593.096
-2,1,751,good,8,3255,west_welmwood,3255
-0,1,304,great,2,2219,east_elmwood,2219
-2,1,868,poor,41,3033,northwest,2905.614
-0,1,440,great,9,2293,east_elmwood,2293
-2,1,729,great,1,3806,east_elmwood,3806
-2,1,772,good,12,3281,west_welmwood,3281
-1,1,551,poor,18,1840,northwest,1840
-1,1,319,poor,44,1411,northwest,1343.272
-1,1,644,poor,63,1484,northwest,1356.376
-1,1,586,poor,23,1862,northwest,1850.828
-3,2,893,poor,31,4418,northwest,4320.804
-1,1,413,great,3,2343,east_elmwood,2343
-1,1,721,good,14,2031,west_welmwood,2031
-2,1,844,good,7,3358,west_welmwood,3358
-3,2,1247,good,9,4992,west_welmwood,4992
-1,1,686,great,6,2623,east_elmwood,2623
-3,2,975,good,8,4701,west_welmwood,4701
-0,1,487,good,13,1722,west_welmwood,1722
-0,1,115,good,5,1500,west_welmwood,1500
-1,1,555,poor,45,1608,northwest,1527.6
-0,1,194,good,5,1550,west_welmwood,1550
-2,1,683,poor,40,2855,northwest,2740.8
-0,1,498,good,11,1751,west_welmwood,1751
-0,1,540,poor,44,1516,northwest,1443.232
-3,2,1138,good,1,4982,west_welmwood,4982
-1,1,518,great,1,2486,east_elmwood,2486
-2,1,667,poor,37,2933,northwest,2833.278
-2,1,935,good,2,3512,west_welmwood,3512
-3,2,1160,great,4,5431,east_elmwood,5431
-0,1,167,good,13,1401,west_welmwood,1401
-2,1,878,poor,53,2968,northwest,2772.112
-2,1,705,good,5,3267,west_welmwood,3267
-0,1,256,good,3,1657,west_welmwood,1657
-2,1,886,good,11,3369,west_welmwood,3369
-2,1,864,great,0,3929,east_elmwood,3929
-1,1,523,good,0,2046,west_welmwood,2046
-2,1,924,poor,46,3055,northwest,2896.14
-1,1,486,great,13,2383,east_elmwood,2383
-1,1,353,good,6,1744,west_welmwood,1744
-0,1,133,poor,30,1187,northwest,1163.26
-1,1,577,poor,22,1884,northwest,1876.464
-1,1,499,great,6,2486,east_elmwood,2486
-0,1,325,great,1,2170,east_elmwood,2170
-1,1,578,poor,37,1687,northwest,1629.642
-3,2,1032,good,12,4788,west_welmwood,4788
-0,1,229,good,2,1564,west_welmwood,1564
-1,1,539,great,14,2353,east_elmwood,2353
-0,1,538,good,10,1882,west_welmwood,1882
-2,1,564,good,9,3047,west_welmwood,3047
-2,1,697,great,5,3766,east_elmwood,3766
-2,1,629,good,13,3132,west_welmwood,3132
-0,1,530,great,14,2329,east_elmwood,2329
-2,1,688,good,9,3235,west_welmwood,3235
-2,1,925,good,1,3469,west_welmwood,3469
-1,1,537,good,8,1988,west_welmwood,1988
-3,2,1029,great,13,5294,east_elmwood,5294
-1,1,712,good,0,2230,west_welmwood,2230
-3,2,898,good,10,4610,west_welmwood,4610
-2,1,749,good,9,3249,west_welmwood,3249
-1,1,747,good,11,2164,west_welmwood,2164
-0,1,267,poor,33,1296,northwest,1262.304
-3,2,1117,poor,44,4487,northwest,4271.624
-1,1,581,poor,59,1527,northwest,1407.894
-1,1,687,poor,47,1732,northwest,1638.472
-2,1,683,good,0,3254,west_welmwood,3254
-1,1,587,poor,18,1908,northwest,1908
-0,1,477,poor,44,1467,northwest,1396.584
-3,2,944,good,13,4625,west_welmwood,4625
-0,1,511,great,1,2420,east_elmwood,2420
-3,2,932,poor,36,4444,northwest,4301.792
-0,1,347,good,8,1686,west_welmwood,1686
-0,1,272,good,2,1645,west_welmwood,1645
-2,1,659,great,11,3698,east_elmwood,3698
-1,1,742,great,10,2678,east_elmwood,2678
-2,1,830,great,14,3826,east_elmwood,3826
-2,1,571,great,3,3631,east_elmwood,3631
-3,2,1007,great,5,5339,east_elmwood,5339
-3,2,1059,poor,34,4592,northwest,4463.424
-2,1,823,poor,23,3191,northwest,3171.854
-1,1,705,great,5,2681,east_elmwood,2681
-1,1,381,poor,16,1742,northwest,1742
-1,1,558,good,4,2010,west_welmwood,2010
-1,1,452,poor,35,1637,northwest,1587.89
-1,1,446,good,7,1911,west_welmwood,1911
-3,2,1011,good,11,4704,west_welmwood,4704
-2,1,770,good,5,3285,west_welmwood,3285
-3,2,853,good,11,4592,west_welmwood,4592
-0,1,436,good,2,1838,west_welmwood,1838
-2,1,718,good,11,3197,west_welmwood,3197
-0,1,507,good,5,1835,west_welmwood,1835
-0,1,175,great,11,1985,east_elmwood,1985
-1,1,379,good,9,1785,west_welmwood,1785
-0,1,463,good,4,1859,west_welmwood,1859
-0,1,131,great,10,1917,east_elmwood,1917
-1,1,584,good,7,2039,west_welmwood,2039
-1,1,343,good,2,1866,west_welmwood,1866
-0,1,424,poor,24,1589,northwest,1576.288
-1,1,670,good,3,2153,west_welmwood,2153
-2,1,551,good,8,3103,west_welmwood,3103
-2,1,737,great,14,3733,east_elmwood,3733
-1,1,655,poor,36,1810,northwest,1752.08
-3,2,946,poor,46,4289,northwest,4065.972
-1,1,360,good,1,1841,west_welmwood,1841
-3,2,988,good,2,4846,west_welmwood,4846
-2,1,641,poor,17,3075,northwest,3075
-3,2,1163,good,12,4938,west_welmwood,4938
-3,2,1025,good,14,4745,west_welmwood,4745
-3,2,893,great,0,5228,east_elmwood,5228
-1,1,302,poor,38,1413,northwest,1362.132
-3,2,1183,good,2,4978,west_welmwood,4978
-1,1,721,great,3,2707,east_elmwood,2707
-2,1,670,poor,18,3137,northwest,3137
-3,2,1138,great,12,5408,east_elmwood,5408
-0,1,144,good,11,1436,west_welmwood,1436
-1,1,543,good,9,1927,west_welmwood,1927
-3,2,838,poor,63,4086,northwest,3734.604
-1,1,478,poor,16,1833,northwest,1833
-2,1,760,good,2,3319,west_welmwood,3319
-3,2,1054,good,9,4775,west_welmwood,4775
-1,1,738,poor,55,1641,northwest,1526.13
-1,1,331,poor,52,1321,northwest,1236.456
-2,1,881,great,6,3923,east_elmwood,3923
-2,1,916,poor,29,3211,northwest,3153.202
-1,1,592,good,12,1923,west_welmwood,1923
-0,1,250,great,5,2088,east_elmwood,2088
-3,2,1233,poor,36,4710,northwest,4559.28
-1,1,672,great,14,2547,east_elmwood,2547
-0,1,522,great,7,2318,east_elmwood,2318
-1,1,523,poor,59,1465,northwest,1350.73
-3,2,1116,great,1,5470,east_elmwood,5470
-0,1,389,good,14,1647,west_welmwood,1647
-3,2,1008,great,13,5198,east_elmwood,5198
-0,1,320,great,9,2173,east_elmwood,2173
-2,1,707,good,1,3301,west_welmwood,3301
-2,1,776,great,5,3870,east_elmwood,3870
-1,1,607,great,6,2582,east_elmwood,2582
-2,1,931,good,1,3475,west_welmwood,3475
-3,2,886,great,2,5224,east_elmwood,5224
-2,1,785,good,14,3277,west_welmwood,3277
-0,1,373,great,4,2199,east_elmwood,2199
-3,2,1243,good,7,4983,west_welmwood,4983
-0,1,549,poor,48,1454,northwest,1372.576
-2,1,620,poor,58,2641,northwest,2440.284
-2,1,921,good,1,3550,west_welmwood,3550
-0,1,106,poor,25,1239,northwest,1226.61
-1,1,623,good,4,2039,west_welmwood,2039
-0,1,328,great,9,2097,east_elmwood,2097
-3,2,1129,poor,37,4581,northwest,4425.246
-3,2,857,poor,42,4237,northwest,4050.572
-3,2,999,poor,32,4567,northwest,4457.392
-3,2,966,good,13,4681,west_welmwood,4681
-3,2,897,great,11,5107,east_elmwood,5107
-1,1,369,good,2,1807,west_welmwood,1807
-3,2,817,good,12,4541,west_welmwood,4541
-1,1,317,good,11,1688,west_welmwood,1688
-3,2,1228,great,8,5521,east_elmwood,5521
-1,1,479,good,14,1800,west_welmwood,1800
-1,1,748,good,12,2082,west_welmwood,2082
-1,1,330,good,11,1680,west_welmwood,1680
-1,1,398,good,14,1723,west_welmwood,1723
-2,1,533,poor,50,2623,northwest,2465.62
-3,2,830,good,3,4664,west_welmwood,4664
-3,2,1050,good,8,4783,west_welmwood,4783
-3,2,942,poor,64,4201,northwest,3831.312
-2,1,621,poor,42,2841,northwest,2715.996
-3,2,1221,great,9,5522,east_elmwood,5522
-0,1,259,poor,37,1288,northwest,1244.208
-1,1,599,poor,44,1645,northwest,1566.04
-2,1,860,good,3,3461,west_welmwood,3461
-0,1,330,good,2,1735,west_welmwood,1735
-2,1,736,great,8,3773,east_elmwood,3773
-1,1,687,great,14,2532,east_elmwood,2532
-3,2,1040,poor,24,4695,northwest,4657.44
-1,1,718,great,11,2640,east_elmwood,2640
-1,1,313,poor,15,1656,northwest,1656
-3,2,1008,poor,37,4492,northwest,4339.272
-1,1,550,poor,42,1585,northwest,1515.26
-1,1,467,poor,60,1407,northwest,1294.44
-0,1,472,great,9,2243,east_elmwood,2243
-0,1,189,great,4,2002,east_elmwood,2002
-2,1,935,poor,15,3429,northwest,3429
-2,1,638,poor,37,2843,northwest,2746.338
-3,2,803,great,5,5055,east_elmwood,5055
-2,1,734,poor,37,2940,northwest,2840.04
-3,2,815,good,5,4566,west_welmwood,4566
-2,1,618,good,0,3228,west_welmwood,3228
-2,1,519,poor,30,2790,northwest,2734.2
-3,2,1163,great,7,5429,east_elmwood,5429
-0,1,145,good,0,1542,west_welmwood,1542
-2,1,805,good,1,3345,west_welmwood,3345
-0,1,512,great,13,2318,east_elmwood,2318
-0,1,515,great,8,2307,east_elmwood,2307
-0,1,470,great,10,2251,east_elmwood,2251
-2,1,845,good,14,3312,west_welmwood,3312
-2,1,676,great,2,3726,east_elmwood,3726
-0,1,157,good,3,1523,west_welmwood,1523
-3,2,905,good,8,4666,west_welmwood,4666
-1,1,335,poor,34,1461,northwest,1420.092
-3,2,1240,great,7,5555,east_elmwood,5555
-1,1,686,good,9,2093,west_welmwood,2093
-1,1,437,good,2,1948,west_welmwood,1948
-3,2,1217,poor,29,4766,northwest,4680.212
-3,2,854,poor,47,4256,northwest,4026.176
-0,1,511,good,12,1787,west_welmwood,1787
-1,1,494,poor,18,1798,northwest,1798
-3,2,876,good,13,4560,west_welmwood,4560
-1,1,653,good,6,2057,west_welmwood,2057
-2,1,537,good,14,2997,west_welmwood,2997
-2,1,670,good,5,3258,west_welmwood,3258
-1,1,477,good,4,1980,west_welmwood,1980
-2,1,775,good,8,3265,west_welmwood,3265
-0,1,332,good,8,1670,west_welmwood,1670
-3,2,1214,good,3,5062,west_welmwood,5062
-2,1,631,poor,57,2618,northwest,2424.268
-2,1,801,good,4,3357,west_welmwood,3357
-3,2,847,good,10,4606,west_welmwood,4606
-2,1,672,great,8,3699,east_elmwood,3699
-2,1,903,good,2,3468,west_welmwood,3468
-0,1,513,poor,55,1386,northwest,1288.98
-1,1,642,great,7,2551,east_elmwood,2551
-3,2,973,poor,42,4398,northwest,4204.488
-1,1,452,great,2,2458,east_elmwood,2458
-0,1,119,good,5,1440,west_welmwood,1440
-2,1,763,good,12,3228,west_welmwood,3228
-1,1,365,good,10,1745,west_welmwood,1745
-1,1,548,poor,47,1556,northwest,1471.976
-2,1,920,good,6,3486,west_welmwood,3486
-0,1,303,great,5,2138,east_elmwood,2138
-1,1,664,poor,27,1938,northwest,1910.868
-2,1,773,good,11,3249,west_welmwood,3249
-0,1,143,good,7,1510,west_welmwood,1510
-0,1,286,good,14,1554,west_welmwood,1554
-3,2,850,poor,45,4232,northwest,4020.4
-1,1,670,poor,42,1761,northwest,1683.516
-0,1,165,poor,55,1006,northwest,935.58
-0,1,322,poor,45,1297,northwest,1232.15
-1,1,496,good,3,1988,west_welmwood,1988
-2,1,777,good,4,3358,west_welmwood,3358
-0,1,290,good,7,1597,west_welmwood,1597
-0,1,281,great,14,2027,east_elmwood,2027
-0,1,185,poor,56,1063,northwest,986.464
-1,1,601,poor,23,1915,northwest,1903.51
-0,1,156,poor,33,1264,northwest,1231.136
-3,2,967,poor,18,4644,northwest,4644
-2,1,687,good,7,3253,west_welmwood,3253
-0,1,102,great,13,1852,east_elmwood,1852
-0,1,237,good,3,1577,west_welmwood,1577
-3,2,1076,great,7,5330,east_elmwood,5330
-0,1,308,poor,47,1206,northwest,1140.876
-1,1,715,poor,27,1966,northwest,1938.476
-3,2,1095,poor,38,4598,northwest,4432.472
-3,2,1110,good,14,4784,west_welmwood,4784
-2,1,788,good,2,3352,west_welmwood,3352
-2,1,568,great,4,3629,east_elmwood,3629
-3,2,1097,poor,35,4553,northwest,4416.41
-1,1,540,good,13,1885,west_welmwood,1885
-0,1,507,poor,30,1624,northwest,1591.52
-3,2,919,good,14,4647,west_welmwood,4647
-0,1,314,poor,35,1320,northwest,1280.4
-3,2,1191,poor,43,4637,northwest,4423.698
-3,2,919,good,0,4734,west_welmwood,4734
-0,1,343,great,4,2199,east_elmwood,2199
-2,1,574,great,3,3633,east_elmwood,3633
-2,1,755,good,5,3343,west_welmwood,3343
-3,2,874,good,7,4629,west_welmwood,4629
-0,1,504,poor,29,1602,northwest,1573.164
-0,1,444,poor,41,1477,northwest,1414.966
-0,1,386,good,9,1741,west_welmwood,1741
-0,1,481,poor,51,1404,northwest,1316.952
-1,1,696,poor,32,1908,northwest,1862.208
-3,2,840,poor,15,4547,northwest,4547
-1,1,697,good,7,2115,west_welmwood,2115
-1,1,676,good,2,2202,west_welmwood,2202
-3,2,1094,great,9,5382,east_elmwood,5382
-2,1,669,poor,28,3004,northwest,2955.936
-1,1,340,good,10,1729,west_welmwood,1729
-1,1,475,good,1,1923,west_welmwood,1923
-0,1,340,good,1,1728,west_welmwood,1728
-1,1,389,good,2,1846,west_welmwood,1846
-1,1,623,poor,31,1787,northwest,1747.686
-0,1,264,good,12,1525,west_welmwood,1525
-1,1,701,poor,60,1578,northwest,1451.76
-3,2,1069,good,12,4760,west_welmwood,4760
-2,1,908,good,1,3465,west_welmwood,3465
-1,1,745,good,0,2253,west_welmwood,2253
-3,2,989,poor,43,4431,northwest,4227.174
-0,1,470,great,9,2315,east_elmwood,2315
-1,1,599,good,6,2035,west_welmwood,2035
-3,2,885,good,11,4622,west_welmwood,4622
-2,1,508,good,11,2950,west_welmwood,2950
-2,1,763,good,2,3372,west_welmwood,3372
-2,1,545,great,10,3587,east_elmwood,3587
-1,1,505,great,1,2538,east_elmwood,2538
-0,1,262,poor,18,1434,northwest,1434
-3,2,812,poor,48,4166,northwest,3932.704
-2,1,592,poor,55,2638,northwest,2453.34
-1,1,356,poor,38,1450,northwest,1397.8
-2,1,519,good,5,3082,west_welmwood,3082
-2,1,877,poor,23,3254,northwest,3234.476
-2,1,644,great,14,3579,east_elmwood,3579
-1,1,745,good,12,2161,west_welmwood,2161
-1,1,747,good,7,2205,west_welmwood,2205
-1,1,380,great,11,2231,east_elmwood,2231
-1,1,459,poor,62,1349,northwest,1235.684
-3,2,1073,good,11,4773,west_welmwood,4773
-0,1,376,good,7,1726,west_welmwood,1726
-2,1,757,poor,38,2969,northwest,2862.116
-1,1,658,poor,33,1807,northwest,1760.018
-2,1,816,great,11,3848,east_elmwood,3848
-2,1,848,good,11,3371,west_welmwood,3371
-0,1,259,good,2,1634,west_welmwood,1634
-2,1,607,good,4,3142,west_welmwood,3142
-3,2,894,good,6,4719,west_welmwood,4719
-1,1,339,poor,64,1220,northwest,1112.64
-2,1,756,poor,40,2930,northwest,2812.8
-0,1,209,good,9,1518,west_welmwood,1518
-2,1,567,good,5,3070,west_welmwood,3070
-0,1,504,good,0,1877,west_welmwood,1877
-0,1,486,poor,52,1344,northwest,1257.984
-1,1,666,good,11,2086,west_welmwood,2086
-2,1,532,great,6,3558,east_elmwood,3558
-1,1,503,good,12,1840,west_welmwood,1840
-1,1,453,poor,56,1345,northwest,1248.16
-2,1,659,great,5,3716,east_elmwood,3716
-0,1,278,good,0,1661,west_welmwood,1661
-2,1,524,good,5,3083,west_welmwood,3083
-1,1,527,good,11,1904,west_welmwood,1904
-1,1,661,poor,52,1601,northwest,1498.536
-3,2,1025,good,13,4775,west_welmwood,4775
-3,2,810,good,3,4660,west_welmwood,4660
-3,2,1086,good,13,4843,west_welmwood,4843
-3,2,812,good,0,4661,west_welmwood,4661
-3,2,881,good,0,4742,west_welmwood,4742
-1,1,510,great,12,2428,east_elmwood,2428
-1,1,378,good,11,1815,west_welmwood,1815
-2,1,889,great,10,3934,east_elmwood,3934
-1,1,748,poor,18,2038,northwest,2038
-1,1,476,poor,57,1366,northwest,1264.916
-3,2,931,poor,31,4433,northwest,4335.474
-0,1,111,good,9,1468,west_welmwood,1468
-1,1,571,poor,36,1757,northwest,1700.776
-2,1,909,poor,63,2898,northwest,2648.772
-3,2,1131,poor,35,4601,northwest,4462.97
-1,1,552,great,9,2446,east_elmwood,2446
-0,1,419,good,14,1709,west_welmwood,1709
-2,1,551,good,6,3130,west_welmwood,3130
-3,2,1120,good,7,4938,west_welmwood,4938
-1,1,647,poor,58,1536,northwest,1419.264
-0,1,435,good,0,1785,west_welmwood,1785
-0,1,222,good,1,1656,west_welmwood,1656
-0,1,468,good,11,1725,west_welmwood,1725
-2,1,561,poor,47,2693,northwest,2547.578
-1,1,747,poor,59,1699,northwest,1566.478
-1,1,696,great,14,2573,east_elmwood,2573
-1,1,440,good,13,1797,west_welmwood,1797
-1,1,686,great,9,2600,east_elmwood,2600
-2,1,712,great,7,3701,east_elmwood,3701
-0,1,302,poor,26,1432,northwest,1414.816
-2,1,626,good,14,3074,west_welmwood,3074
-1,1,700,poor,55,1663,northwest,1546.59
-1,1,409,good,12,1808,west_welmwood,1808
-3,2,880,good,4,4678,west_welmwood,4678
-3,2,915,good,5,4713,west_welmwood,4713
-1,1,748,poor,48,1756,northwest,1657.664
-0,1,227,good,9,1487,west_welmwood,1487
-1,1,390,good,8,1840,west_welmwood,1840
-0,1,460,good,0,1902,west_welmwood,1902
-1,1,392,good,8,1782,west_welmwood,1782
-2,1,921,good,14,3383,west_welmwood,3383
-2,1,756,good,3,3336,west_welmwood,3336
-1,1,604,great,1,2583,east_elmwood,2583
-3,2,832,great,3,5137,east_elmwood,5137
-3,2,1022,great,2,5386,east_elmwood,5386
-2,1,517,poor,36,2756,northwest,2667.808
-3,2,1018,good,6,4847,west_welmwood,4847
-0,1,422,good,7,1727,west_welmwood,1727
-2,1,716,good,8,3283,west_welmwood,3283
-0,1,301,great,3,2172,east_elmwood,2172
-1,1,608,poor,29,1829,northwest,1796.078
-2,1,889,poor,17,3277,northwest,3277
-2,1,577,great,9,3636,east_elmwood,3636
-2,1,671,great,0,3765,east_elmwood,3765
-0,1,315,poor,44,1277,northwest,1215.704
-1,1,709,great,1,2739,east_elmwood,2739
-3,2,1204,poor,63,4400,northwest,4021.6
-0,1,187,great,13,1920,east_elmwood,1920
-0,1,543,poor,31,1677,northwest,1640.106
-1,1,351,good,6,1799,west_welmwood,1799
-2,1,816,poor,45,2952,northwest,2804.4
-3,2,994,poor,53,4323,northwest,4037.682
-1,1,612,good,2,2102,west_welmwood,2102
-0,1,447,good,8,1798,west_welmwood,1798
-0,1,457,good,7,1744,west_welmwood,1744
-3,2,1211,good,8,4968,west_welmwood,4968
-0,1,284,poor,60,1041,northwest,957.72
-1,1,741,poor,19,2035,northwest,2035
-0,1,425,great,2,2298,east_elmwood,2298
-1,1,308,poor,36,1491,northwest,1443.288
-1,1,390,good,8,1776,west_welmwood,1776
-1,1,675,great,14,2529,east_elmwood,2529
-0,1,283,poor,44,1262,northwest,1201.424
-1,1,460,good,1,1938,west_welmwood,1938
-1,1,678,great,4,2609,east_elmwood,2609
-2,1,892,great,11,3925,east_elmwood,3925
-3,2,1149,good,7,4978,west_welmwood,4978
-3,2,876,great,2,5207,east_elmwood,5207
-1,1,588,poor,21,1850,northwest,1846.3
-3,2,1005,good,5,4794,west_welmwood,4794
-2,1,636,good,3,3193,west_welmwood,3193
-3,2,955,good,3,4763,west_welmwood,4763
-1,1,661,poor,37,1741,northwest,1681.806
-1,1,477,great,7,2447,east_elmwood,2447
-3,2,1230,good,4,5070,west_welmwood,5070
-2,1,735,great,13,3698,east_elmwood,3698
-2,1,630,poor,60,2667,northwest,2453.64
-2,1,506,good,12,3017,west_welmwood,3017
-3,2,1191,poor,18,4896,northwest,4896
-3,2,1103,poor,44,4475,northwest,4260.2
-2,1,760,great,9,3780,east_elmwood,3780
-3,2,979,good,14,4693,west_welmwood,4693
-0,1,440,great,7,2231,east_elmwood,2231
-1,1,359,great,12,2286,east_elmwood,2286
-0,1,125,poor,38,1104,northwest,1064.256
-1,1,692,good,0,2147,west_welmwood,2147
-0,1,503,poor,49,1455,northwest,1370.61
-1,1,659,good,12,2025,west_welmwood,2025
-3,2,995,good,12,4682,west_welmwood,4682
-0,1,435,poor,27,1522,northwest,1500.692
-3,2,1028,great,14,5204,east_elmwood,5204
-3,2,880,good,5,4676,west_welmwood,4676
-2,1,796,poor,15,3261,northwest,3261
-0,1,252,great,6,2140,east_elmwood,2140
-1,1,675,good,11,2112,west_welmwood,2112
-0,1,133,good,3,1482,west_welmwood,1482
-0,1,400,poor,18,1622,northwest,1622
-3,2,954,poor,19,4629,northwest,4629
-1,1,332,great,12,2174,east_elmwood,2174
-3,2,1178,poor,53,4479,northwest,4183.386
-0,1,334,good,6,1698,west_welmwood,1698
-0,1,176,good,2,1548,west_welmwood,1548
-2,1,854,poor,58,2909,northwest,2687.916
-2,1,846,good,1,3461,west_welmwood,3461
-2,1,543,good,2,3129,west_welmwood,3129
-2,1,620,good,1,3225,west_welmwood,3225
-3,2,911,poor,31,4419,northwest,4321.782
-0,1,304,great,1,2241,east_elmwood,2241
-0,1,241,great,3,2116,east_elmwood,2116
-0,1,453,great,13,2269,east_elmwood,2269
-2,1,589,great,11,3617,east_elmwood,3617
-2,1,942,poor,15,3419,northwest,3419
-2,1,678,poor,37,2936,northwest,2836.176
-1,1,465,poor,42,1519,northwest,1452.164
-0,1,256,great,0,2140,east_elmwood,2140
-2,1,561,poor,56,2580,northwest,2394.24
-3,2,1032,good,4,4804,west_welmwood,4804
-0,1,502,good,0,1915,west_welmwood,1915
-1,1,617,poor,35,1739,northwest,1686.83
-3,2,846,good,10,4620,west_welmwood,4620
-2,1,632,poor,37,2874,northwest,2776.284
-0,1,244,poor,59,1087,northwest,1002.214
-1,1,366,good,12,1708,west_welmwood,1708
-3,2,1097,good,4,4865,west_welmwood,4865
-3,2,931,poor,42,4385,northwest,4192.06
-2,1,531,good,3,3142,west_welmwood,3142
-1,1,428,good,13,1796,west_welmwood,1796
-0,1,427,great,1,2283,east_elmwood,2283
-2,1,682,poor,24,3036,northwest,3011.712
-0,1,181,poor,16,1430,northwest,1430
-2,1,672,good,10,3179,west_welmwood,3179
-2,1,548,poor,35,2762,northwest,2679.14
-2,1,721,good,5,3274,west_welmwood,3274
-1,1,379,poor,56,1283,northwest,1190.624
-0,1,369,poor,17,1592,northwest,1592
-2,1,738,good,2,3329,west_welmwood,3329
-0,1,321,great,14,2034,east_elmwood,2034
-2,1,636,great,9,3684,east_elmwood,3684
-1,1,564,poor,60,1452,northwest,1335.84
-2,1,723,good,7,3250,west_welmwood,3250
-1,1,584,poor,16,1950,northwest,1950
-1,1,307,good,12,1690,west_welmwood,1690
-2,1,946,great,11,3975,east_elmwood,3975
-2,1,655,good,2,3226,west_welmwood,3226
-3,2,1101,good,4,4903,west_welmwood,4903
-2,1,779,poor,27,3101,northwest,3057.586
-1,1,497,poor,28,1737,northwest,1709.208
-3,2,806,poor,55,4066,northwest,3781.38
-1,1,746,good,4,2223,west_welmwood,2223
-0,1,358,poor,30,1462,northwest,1432.76
-0,1,181,good,11,1489,west_welmwood,1489
-1,1,553,good,8,1964,west_welmwood,1964
-2,1,864,poor,15,3299,northwest,3299
-3,2,1040,good,11,4785,west_welmwood,4785
-2,1,516,good,8,3037,west_welmwood,3037
-2,1,517,good,13,3005,west_welmwood,3005
-2,1,835,poor,27,3131,northwest,3087.166
-3,2,1163,good,7,4987,west_welmwood,4987
-3,2,1044,poor,64,4281,northwest,3904.272
-3,2,1025,good,3,4887,west_welmwood,4887
-0,1,103,great,6,1970,east_elmwood,1970
-3,2,990,poor,33,4476,northwest,4359.624
-2,1,575,good,7,3143,west_welmwood,3143
-3,2,1014,good,6,4796,west_welmwood,4796
-3,2,1185,poor,16,4897,northwest,4897
-3,2,1058,good,0,4865,west_welmwood,4865
-1,1,412,great,10,2284,east_elmwood,2284
-0,1,251,good,12,1547,west_welmwood,1547
-2,1,711,good,1,3301,west_welmwood,3301
-2,1,813,good,4,3401,west_welmwood,3401
-2,1,726,poor,33,3016,northwest,2937.584
-0,1,192,poor,19,1434,northwest,1434
-1,1,486,good,11,1922,west_welmwood,1922
-3,2,1198,great,4,5524,east_elmwood,5524
-1,1,316,good,14,1688,west_welmwood,1688
-3,2,1238,great,5,5550,east_elmwood,5550
-3,2,868,good,1,4658,west_welmwood,4658
-1,1,371,great,13,2264,east_elmwood,2264
-2,1,597,good,5,3135,west_welmwood,3135
-0,1,298,good,13,1597,west_welmwood,1597
-3,2,1049,good,6,4831,west_welmwood,4831
-2,1,553,good,11,3063,west_welmwood,3063
-0,1,521,poor,61,1322,northwest,1213.596
-3,2,902,poor,49,4300,northwest,4050.6
-1,1,719,poor,41,1791,northwest,1715.778
-2,1,713,poor,63,2703,northwest,2470.542
-3,2,1196,good,0,5060,west_welmwood,5060
-1,1,426,good,8,1874,west_welmwood,1874
-3,2,1235,poor,44,4671,northwest,4446.792
-2,1,643,great,3,3670,east_elmwood,3670
-0,1,535,poor,26,1692,northwest,1671.696
-2,1,581,poor,29,2913,northwest,2860.566
-0,1,111,poor,62,849,northwest,777.684
-1,1,469,great,6,2432,east_elmwood,2432
-3,2,880,great,11,5103,east_elmwood,5103
-1,1,487,good,8,1909,west_welmwood,1909
-1,1,492,good,14,1834,west_welmwood,1834
-2,1,846,good,4,3394,west_welmwood,3394
-0,1,121,good,1,1522,west_welmwood,1522
-3,2,984,poor,25,4561,northwest,4515.39
-3,2,964,good,8,4705,west_welmwood,4705
-3,2,998,good,3,4818,west_welmwood,4818
-0,1,282,great,3,2119,east_elmwood,2119
-0,1,498,good,12,1811,west_welmwood,1811
-0,1,157,good,9,1511,west_welmwood,1511
-1,1,495,good,4,1969,west_welmwood,1969
-1,1,522,good,4,2029,west_welmwood,2029
-3,2,1036,poor,42,4496,northwest,4298.176
-0,1,441,good,5,1830,west_welmwood,1830
-1,1,361,good,1,1870,west_welmwood,1870
-1,1,452,poor,50,1426,northwest,1340.44
-0,1,441,great,0,2373,east_elmwood,2373
-1,1,396,good,0,1918,west_welmwood,1918
-0,1,220,good,12,1528,west_welmwood,1528
-3,2,894,good,14,4628,west_welmwood,4628
-0,1,111,good,7,1435,west_welmwood,1435
-2,1,881,great,7,3872,east_elmwood,3872
-1,1,613,great,13,2478,east_elmwood,2478
-1,1,380,poor,58,1296,northwest,1197.504
-3,2,985,good,5,4829,west_welmwood,4829
-2,1,784,great,14,3738,east_elmwood,3738
-1,1,431,poor,19,1787,northwest,1787
-2,1,564,great,3,3664,east_elmwood,3664
-1,1,445,poor,64,1296,northwest,1181.952
-0,1,236,good,3,1631,west_welmwood,1631
-2,1,893,good,6,3407,west_welmwood,3407
-1,1,355,good,3,1824,west_welmwood,1824
-1,1,600,poor,42,1655,northwest,1582.18
-3,2,929,great,7,5162,east_elmwood,5162
-2,1,881,poor,30,3146,northwest,3083.08
-3,2,801,poor,22,4462,northwest,4444.152
-1,1,732,good,9,2132,west_welmwood,2132
-1,1,702,poor,23,2003,northwest,1990.982
-3,2,1033,poor,53,4355,northwest,4067.57
-2,1,700,great,9,3691,east_elmwood,3691
-3,2,875,great,5,5145,east_elmwood,5145
-1,1,330,good,3,1798,west_welmwood,1798
-0,1,530,good,0,1961,west_welmwood,1961
-0,1,364,great,8,2215,east_elmwood,2215
-1,1,359,poor,42,1481,northwest,1415.836
-2,1,564,great,2,3645,east_elmwood,3645
-0,1,122,great,13,1897,east_elmwood,1897
-1,1,590,good,0,2125,west_welmwood,2125
-0,1,224,poor,42,1246,northwest,1191.176
-1,1,515,good,12,1869,west_welmwood,1869
-3,2,852,poor,20,4488,northwest,4488
-0,1,515,poor,57,1374,northwest,1272.324
-0,1,394,poor,45,1335,northwest,1268.25
-2,1,881,good,6,3375,west_welmwood,3375
-0,1,221,poor,56,1022,northwest,948.416
-0,1,462,great,5,2299,east_elmwood,2299
-0,1,406,poor,24,1535,northwest,1522.72
-1,1,595,great,4,2540,east_elmwood,2540
-1,1,474,good,11,1855,west_welmwood,1855
-3,2,1249,good,14,4931,west_welmwood,4931
-3,2,1223,good,10,5008,west_welmwood,5008
-3,2,834,good,0,4704,west_welmwood,4704
-0,1,288,poor,38,1288,northwest,1241.632
-3,2,1035,poor,44,4451,northwest,4237.352
-2,1,567,good,2,3183,west_welmwood,3183
-2,1,884,good,4,3430,west_welmwood,3430
-2,1,888,great,4,3900,east_elmwood,3900
-0,1,227,great,9,2064,east_elmwood,2064
-0,1,204,poor,39,1223,northwest,1176.526
-3,2,897,good,13,4660,west_welmwood,4660
-1,1,586,good,5,2056,west_welmwood,2056
-0,1,532,great,10,2315,east_elmwood,2315
-1,1,377,good,5,1839,west_welmwood,1839
-0,1,534,great,5,2429,east_elmwood,2429
-3,2,1201,poor,32,4767,northwest,4652.592
-0,1,163,poor,51,1003,northwest,940.814
-2,1,844,great,4,3905,east_elmwood,3905
-3,2,1024,poor,37,4487,northwest,4334.442
-0,1,196,poor,46,1142,northwest,1082.616
-2,1,661,poor,54,2732,northwest,2546.224
-0,1,317,good,10,1616,west_welmwood,1616
-0,1,162,great,13,1930,east_elmwood,1930
-1,1,721,great,10,2596,east_elmwood,2596
-1,1,472,great,10,2340,east_elmwood,2340
-2,1,650,poor,62,2617,northwest,2397.172
-2,1,535,good,9,3017,west_welmwood,3017
-1,1,311,good,8,1764,west_welmwood,1764
-0,1,371,good,7,1720,west_welmwood,1720
-1,1,524,good,11,1907,west_welmwood,1907
-2,1,580,good,5,3176,west_welmwood,3176
-1,1,681,poor,61,1583,northwest,1453.194
-2,1,943,good,14,3374,west_welmwood,3374
-1,1,309,poor,53,1290,northwest,1204.86
-3,2,1233,good,9,4960,west_welmwood,4960
-0,1,198,poor,49,1133,northwest,1067.286
-2,1,897,great,9,3919,east_elmwood,3919
-3,2,1129,poor,15,4828,northwest,4828
-3,2,1014,great,11,5263,east_elmwood,5263
-3,2,1119,good,9,4864,west_welmwood,4864
-3,2,962,poor,59,4199,northwest,3871.478
-1,1,317,poor,17,1655,northwest,1655
-0,1,165,poor,55,1010,northwest,939.3
-1,1,738,great,7,2648,east_elmwood,2648
-3,2,977,great,9,5281,east_elmwood,5281
-1,1,722,great,7,2686,east_elmwood,2686
-2,1,706,good,7,3187,west_welmwood,3187
-1,1,424,good,9,1811,west_welmwood,1811
-1,1,669,great,6,2594,east_elmwood,2594
-1,1,327,great,9,2252,east_elmwood,2252
-1,1,435,good,3,1915,west_welmwood,1915
-2,1,706,poor,31,2990,northwest,2924.22
-3,2,1200,good,10,4920,west_welmwood,4920
-2,1,622,great,0,3703,east_elmwood,3703
-1,1,338,poor,42,1463,northwest,1398.628
-2,1,724,poor,60,2725,northwest,2507
-0,1,255,good,14,1526,west_welmwood,1526
-1,1,501,poor,33,1718,northwest,1673.332
-2,1,643,great,7,3703,east_elmwood,3703
-3,2,1054,poor,19,4696,northwest,4696
-2,1,817,good,8,3296,west_welmwood,3296
-1,1,301,poor,36,1402,northwest,1357.136
-0,1,261,good,13,1577,west_welmwood,1577
-2,1,925,good,12,3447,west_welmwood,3447
-3,2,939,poor,52,4310,northwest,4034.16
-3,2,1203,poor,22,4848,northwest,4828.608
-0,1,439,great,12,2214,east_elmwood,2214
-1,1,344,poor,28,1603,northwest,1577.352
-1,1,404,poor,28,1602,northwest,1576.368
-2,1,723,good,6,3240,west_welmwood,3240
-1,1,494,great,1,2526,east_elmwood,2526
-2,1,870,great,10,3907,east_elmwood,3907
-1,1,562,good,1,2024,west_welmwood,2024
-1,1,335,poor,46,1347,northwest,1276.956
-3,2,897,good,10,4618,west_welmwood,4618
-2,1,512,good,4,3089,west_welmwood,3089
-1,1,743,good,13,2128,west_welmwood,2128
-3,2,1197,good,8,4984,west_welmwood,4984
-3,2,1221,great,4,5521,east_elmwood,5521
-0,1,327,great,9,2158,east_elmwood,2158
-0,1,199,good,8,1479,west_welmwood,1479
-3,2,942,good,13,4663,west_welmwood,4663
-2,1,616,good,4,3150,west_welmwood,3150
-0,1,380,good,14,1598,west_welmwood,1598
-2,1,707,good,12,3225,west_welmwood,3225
-0,1,498,poor,57,1355,northwest,1254.73
-2,1,566,poor,37,2755,northwest,2661.33
-3,2,1173,good,12,4855,west_welmwood,4855
-0,1,312,poor,28,1385,northwest,1362.84
-0,1,433,great,7,2220,east_elmwood,2220
-3,2,1243,good,0,5140,west_welmwood,5140
-2,1,946,good,12,3467,west_welmwood,3467
-2,1,666,good,1,3245,west_welmwood,3245
-2,1,689,poor,44,2879,northwest,2740.808
-2,1,549,good,11,2999,west_welmwood,2999
-0,1,514,poor,47,1399,northwest,1323.454
-3,2,932,poor,47,4356,northwest,4120.776
-0,1,325,poor,45,1268,northwest,1204.6
-3,2,1245,poor,30,4820,northwest,4723.6
-3,2,817,poor,37,4341,northwest,4193.406
-0,1,347,poor,27,1441,northwest,1420.826
-2,1,701,great,0,3846,east_elmwood,3846
-1,1,534,good,8,2001,west_welmwood,2001
-0,1,543,great,7,2385,east_elmwood,2385
-2,1,893,poor,59,2858,northwest,2635.076
-1,1,402,good,12,1809,west_welmwood,1809
-3,2,1110,great,2,5441,east_elmwood,5441
-0,1,295,poor,17,1566,northwest,1566
-0,1,277,good,9,1603,west_welmwood,1603
-1,1,552,great,4,2499,east_elmwood,2499
-0,1,110,great,0,2000,east_elmwood,2000
-1,1,347,poor,20,1624,northwest,1624
-3,2,1182,great,6,5497,east_elmwood,5497
-0,1,297,good,3,1657,west_welmwood,1657
-2,1,619,good,10,3152,west_welmwood,3152
-2,1,596,good,12,3080,west_welmwood,3080
-0,1,303,poor,35,1308,northwest,1268.76
-0,1,316,good,8,1681,west_welmwood,1681
-1,1,482,good,5,1888,west_welmwood,1888
-2,1,846,poor,16,3277,northwest,3277
-3,2,831,good,14,4539,west_welmwood,4539
-0,1,251,great,10,2056,east_elmwood,2056
-3,2,1154,poor,53,4515,northwest,4217.01
-2,1,564,good,7,3141,west_welmwood,3141
-3,2,1171,good,10,4928,west_welmwood,4928
-0,1,232,poor,27,1367,northwest,1347.862
-2,1,737,poor,56,2739,northwest,2541.792
-2,1,511,poor,47,2682,northwest,2537.172
-1,1,337,good,0,1855,west_welmwood,1855
-1,1,355,poor,22,1619,northwest,1612.524
-0,1,290,poor,48,1257,northwest,1186.608
-3,2,977,good,1,4809,west_welmwood,4809
-2,1,568,great,13,3566,east_elmwood,3566
-2,1,633,poor,57,2693,northwest,2493.718
-0,1,341,poor,39,1382,northwest,1329.484
-2,1,818,good,4,3394,west_welmwood,3394
-0,1,191,good,0,1578,west_welmwood,1578
-1,1,649,poor,27,1829,northwest,1803.394
-0,1,237,good,2,1613,west_welmwood,1613
-3,2,1028,good,8,4790,west_welmwood,4790
-1,1,459,good,12,1827,west_welmwood,1827
-2,1,786,good,0,3413,west_welmwood,3413
-0,1,454,poor,63,1200,northwest,1096.8
-1,1,539,poor,55,1464,northwest,1361.52
-0,1,220,poor,42,1247,northwest,1192.132
-1,1,747,poor,49,1803,northwest,1698.426
-2,1,542,poor,24,2878,northwest,2854.976
-3,2,913,good,11,4640,west_welmwood,4640
-2,1,949,good,0,3548,west_welmwood,3548
-0,1,212,poor,57,1028,northwest,951.928
-2,1,793,good,3,3378,west_welmwood,3378
-0,1,382,poor,29,1477,northwest,1450.414
-0,1,515,poor,52,1424,northwest,1332.864
-3,2,966,great,6,5240,east_elmwood,5240
-3,2,1034,poor,36,4514,northwest,4369.552
-3,2,1192,good,0,5042,west_welmwood,5042
-2,1,787,great,3,3843,east_elmwood,3843
-1,1,326,great,3,2268,east_elmwood,2268
-2,1,760,good,2,3291,west_welmwood,3291
-3,2,996,great,7,5321,east_elmwood,5321
-0,1,148,poor,58,953,northwest,880.572
-1,1,662,good,5,2125,west_welmwood,2125
-3,2,1038,poor,62,4311,northwest,3948.876
-1,1,690,great,5,2667,east_elmwood,2667
-3,2,1029,good,14,4699,west_welmwood,4699
-2,1,847,poor,46,2988,northwest,2832.624
-3,2,922,good,12,4642,west_welmwood,4642
-2,1,606,good,2,3183,west_welmwood,3183
-1,1,385,poor,36,1543,northwest,1493.624
-1,1,546,good,6,1938,west_welmwood,1938
-0,1,472,poor,26,1578,northwest,1559.064
-1,1,419,good,1,1911,west_welmwood,1911
-2,1,550,poor,37,2793,northwest,2698.038
-1,1,617,good,0,2077,west_welmwood,2077
-1,1,495,good,6,1920,west_welmwood,1920
-2,1,686,good,8,3186,west_welmwood,3186
-2,1,930,good,6,3484,west_welmwood,3484
-1,1,420,good,1,1933,west_welmwood,1933
-1,1,494,poor,24,1755,northwest,1740.96
-3,2,1012,good,2,4807,west_welmwood,4807
-3,2,835,poor,43,4232,northwest,4037.328
-2,1,539,great,2,3594,east_elmwood,3594
-0,1,153,great,13,1937,east_elmwood,1937
-2,1,776,great,14,3734,east_elmwood,3734
-1,1,563,good,13,1975,west_welmwood,1975
-3,2,1144,poor,51,4445,northwest,4169.41
-1,1,549,great,8,2478,east_elmwood,2478
-3,2,1015,good,9,4782,west_welmwood,4782
-1,1,446,poor,50,1465,northwest,1377.1
-0,1,531,great,0,2408,east_elmwood,2408
-2,1,538,poor,64,2513,northwest,2291.856
-0,1,279,good,14,1542,west_welmwood,1542
-2,1,860,great,2,3923,east_elmwood,3923
-3,2,1153,poor,31,4693,northwest,4589.754
-0,1,288,poor,36,1323,northwest,1280.664
-0,1,366,great,11,2190,east_elmwood,2190
-2,1,539,good,9,3029,west_welmwood,3029
-1,1,482,poor,61,1327,northwest,1218.186
-0,1,148,good,13,1458,west_welmwood,1458
-0,1,433,good,12,1703,west_welmwood,1703
-0,1,147,good,8,1472,west_welmwood,1472
-3,2,825,poor,30,4326,northwest,4239.48
-3,2,824,good,3,4621,west_welmwood,4621
-1,1,418,great,6,2321,east_elmwood,2321
-0,1,512,poor,55,1385,northwest,1288.05
-0,1,456,poor,42,1470,northwest,1405.32
-1,1,582,good,8,2050,west_welmwood,2050
-0,1,107,good,10,1438,west_welmwood,1438
-1,1,628,great,3,2559,east_elmwood,2559
-2,1,827,poor,41,3053,northwest,2924.774
-0,1,267,poor,30,1378,northwest,1350.44
-0,1,363,good,0,1750,west_welmwood,1750
-2,1,801,great,10,3776,east_elmwood,3776
-1,1,444,good,11,1817,west_welmwood,1817
-0,1,273,poor,29,1379,northwest,1354.178
-3,2,1012,great,9,5268,east_elmwood,5268
-2,1,837,good,10,3374,west_welmwood,3374
-0,1,329,good,12,1585,west_welmwood,1585
-3,2,1119,good,6,4899,west_welmwood,4899
-2,1,611,good,14,3048,west_welmwood,3048
-2,1,541,good,9,3014,west_welmwood,3014
-3,2,848,good,9,4594,west_welmwood,4594
-2,1,730,good,12,3164,west_welmwood,3164
-2,1,602,poor,49,2747,northwest,2587.674
-3,2,1164,poor,43,4604,northwest,4392.216
-1,1,632,good,13,2018,west_welmwood,2018
-2,1,797,good,9,3260,west_welmwood,3260
-1,1,377,great,2,2333,east_elmwood,2333
-2,1,780,great,4,3828,east_elmwood,3828
-3,2,1004,great,13,5216,east_elmwood,5216
-2,1,769,poor,22,3101,northwest,3088.596
-1,1,733,good,0,2230,west_welmwood,2230
-1,1,309,poor,62,1209,northwest,1107.444
-3,2,1178,good,11,4967,west_welmwood,4967
-1,1,464,poor,20,1768,northwest,1768
-2,1,835,good,12,3349,west_welmwood,3349
-3,2,1046,great,7,5363,east_elmwood,5363
-1,1,480,good,3,1923,west_welmwood,1923
-0,1,397,great,11,2173,east_elmwood,2173
-3,2,966,good,6,4768,west_welmwood,4768
-1,1,414,poor,37,1531,northwest,1478.946
-3,2,848,great,0,5242,east_elmwood,5242
-1,1,610,poor,38,1760,northwest,1696.64
-1,1,542,great,7,2480,east_elmwood,2480
-0,1,299,good,3,1669,west_welmwood,1669
-1,1,347,poor,53,1311,northwest,1224.474
-2,1,608,good,9,3107,west_welmwood,3107
-0,1,259,good,11,1594,west_welmwood,1594
-0,1,498,poor,39,1517,northwest,1459.354
-0,1,470,great,11,2308,east_elmwood,2308
-3,2,871,great,0,5265,east_elmwood,5265
-0,1,485,poor,48,1416,northwest,1336.704
-2,1,516,great,7,3546,east_elmwood,3546
-2,1,697,great,1,3787,east_elmwood,3787
-1,1,370,poor,58,1319,northwest,1218.756
-0,1,406,good,10,1685,west_welmwood,1685
-2,1,737,great,0,3878,east_elmwood,3878
-2,1,610,poor,18,3020,northwest,3020
-0,1,392,poor,26,1557,northwest,1538.316
-0,1,429,good,3,1801,west_welmwood,1801
-1,1,368,poor,29,1626,northwest,1596.732
-3,2,1111,good,7,4918,west_welmwood,4918
-2,1,612,great,10,3598,east_elmwood,3598
-2,1,860,good,6,3387,west_welmwood,3387
-2,1,939,good,3,3519,west_welmwood,3519
-1,1,676,poor,34,1848,northwest,1796.256
-1,1,513,good,8,1915,west_welmwood,1915
-3,2,945,good,7,4725,west_welmwood,4725
-1,1,400,poor,22,1636,northwest,1629.456
-1,1,704,great,2,2723,east_elmwood,2723
-1,1,721,poor,29,1931,northwest,1896.242
-2,1,554,good,12,3073,west_welmwood,3073
-0,1,439,good,3,1790,west_welmwood,1790
-0,1,523,good,3,1902,west_welmwood,1902
-1,1,477,good,3,1996,west_welmwood,1996
-1,1,698,good,4,2133,west_welmwood,2133
-0,1,463,poor,27,1582,northwest,1559.852
-1,1,321,poor,50,1307,northwest,1228.58
-0,1,170,good,5,1563,west_welmwood,1563
-3,2,1179,poor,44,4609,northwest,4387.768
-3,2,1117,good,8,4919,west_welmwood,4919
-1,1,672,good,11,2088,west_welmwood,2088
-2,1,937,good,6,3506,west_welmwood,3506
-2,1,545,poor,61,2489,northwest,2284.902
-2,1,532,great,9,3565,east_elmwood,3565
-0,1,528,good,6,1860,west_welmwood,1860
-2,1,813,great,3,3857,east_elmwood,3857
-2,1,821,poor,27,3118,northwest,3074.348
-3,2,1061,poor,44,4442,northwest,4228.784
-1,1,678,good,2,2125,west_welmwood,2125
-1,1,452,poor,18,1771,northwest,1771
-0,1,150,good,4,1490,west_welmwood,1490
-1,1,742,poor,52,1673,northwest,1565.928
-1,1,408,poor,17,1779,northwest,1779
-2,1,949,poor,56,2983,northwest,2768.224
-3,2,1229,great,3,5583,east_elmwood,5583
-0,1,287,great,14,2093,east_elmwood,2093
-0,1,169,great,9,1932,east_elmwood,1932
-2,1,623,good,12,3083,west_welmwood,3083
-3,2,1148,good,2,4931,west_welmwood,4931
-1,1,484,good,1,1999,west_welmwood,1999
-0,1,111,good,5,1510,west_welmwood,1510
-2,1,855,good,1,3433,west_welmwood,3433
-0,1,427,great,2,2312,east_elmwood,2312
-1,1,504,great,9,2416,east_elmwood,2416
-2,1,704,good,14,3142,west_welmwood,3142
-1,1,504,great,14,2346,east_elmwood,2346
-3,2,1127,good,10,4896,west_welmwood,4896
-1,1,501,good,6,1924,west_welmwood,1924
-2,1,923,poor,31,3199,northwest,3128.622
-3,2,894,good,2,4699,west_welmwood,4699
-2,1,778,good,6,3336,west_welmwood,3336
-1,1,698,good,7,2155,west_welmwood,2155
-1,1,376,great,4,2339,east_elmwood,2339
-3,2,1188,good,6,4992,west_welmwood,4992
-2,1,905,poor,16,3349,northwest,3349
-0,1,549,poor,47,1481,northwest,1401.026
-2,1,558,good,3,3115,west_welmwood,3115
-1,1,418,good,3,1880,west_welmwood,1880
-3,2,961,poor,43,4349,northwest,4148.946
-3,2,1112,good,5,4913,west_welmwood,4913
-0,1,467,good,2,1879,west_welmwood,1879
-3,2,1111,good,0,4999,west_welmwood,4999
-1,1,590,good,10,1975,west_welmwood,1975
-0,1,518,poor,62,1315,northwest,1204.54
-1,1,416,great,5,2341,east_elmwood,2341
-3,2,1051,poor,51,4382,northwest,4110.316
-2,1,934,good,10,3440,west_welmwood,3440
-0,1,209,good,3,1628,west_welmwood,1628
-2,1,618,great,12,3642,east_elmwood,3642
-1,1,648,great,10,2563,east_elmwood,2563
-1,1,338,good,10,1725,west_welmwood,1725
-3,2,1073,poor,21,4716,northwest,4706.568
-2,1,511,good,14,2926,west_welmwood,2926
-2,1,701,good,13,3156,west_welmwood,3156
-1,1,419,good,14,1753,west_welmwood,1753
-2,1,806,good,2,3364,west_welmwood,3364
-2,1,667,good,3,3278,west_welmwood,3278
-0,1,428,poor,30,1539,northwest,1508.22
-0,1,270,good,3,1664,west_welmwood,1664
-1,1,465,poor,19,1822,northwest,1822
-2,1,724,great,3,3806,east_elmwood,3806
-3,2,1105,poor,55,4384,northwest,4077.12
-2,1,911,great,1,4037,east_elmwood,4037
-0,1,495,poor,33,1574,northwest,1533.076
-2,1,818,great,1,3883,east_elmwood,3883
-0,1,524,good,4,1902,west_welmwood,1902
-3,2,1011,poor,64,4268,northwest,3892.416
-1,1,439,poor,26,1644,northwest,1624.272
-1,1,326,poor,49,1311,northwest,1234.962
-2,1,584,good,5,3141,west_welmwood,3141
-0,1,278,great,1,2140,east_elmwood,2140
-2,1,929,good,8,3455,west_welmwood,3455
-3,2,903,good,6,4700,west_welmwood,4700
-0,1,307,good,4,1658,west_welmwood,1658
-3,2,945,good,1,4779,west_welmwood,4779
-3,2,871,great,12,5094,east_elmwood,5094
-2,1,742,good,8,3288,west_welmwood,3288
-0,1,400,good,7,1734,west_welmwood,1734
-3,2,1158,great,7,5408,east_elmwood,5408
-3,2,862,poor,28,4473,northwest,4401.432
-1,1,513,poor,52,1540,northwest,1441.44
-0,1,248,good,9,1524,west_welmwood,1524
-3,2,1052,great,9,5345,east_elmwood,5345
-0,1,246,poor,60,1003,northwest,922.76
-3,2,1190,poor,55,4468,northwest,4155.24
-1,1,586,poor,61,1515,northwest,1390.77
-3,2,1219,great,7,5472,east_elmwood,5472
-1,1,371,great,13,2192,east_elmwood,2192
-1,1,729,good,14,2120,west_welmwood,2120
-3,2,986,poor,40,4388,northwest,4212.48
-3,2,1026,good,0,4893,west_welmwood,4893
-2,1,640,good,12,3120,west_welmwood,3120
-2,1,841,poor,26,3139,northwest,3101.332
-3,2,897,poor,35,4427,northwest,4294.19
-0,1,446,good,6,1823,west_welmwood,1823
-2,1,883,poor,22,3278,northwest,3264.888
-3,2,801,good,14,4468,west_welmwood,4468
-1,1,693,good,13,2108,west_welmwood,2108
-0,1,221,poor,40,1250,northwest,1200
-3,2,1116,poor,36,4593,northwest,4446.024
-3,2,1164,good,7,4922,west_welmwood,4922
-3,2,941,good,10,4643,west_welmwood,4643
-2,1,685,good,5,3272,west_welmwood,3272
-0,1,205,great,8,2064,east_elmwood,2064
-0,1,519,good,10,1781,west_welmwood,1781
-3,2,1064,good,13,4750,west_welmwood,4750
-0,1,486,poor,63,1273,northwest,1163.522
-3,2,1235,great,3,5545,east_elmwood,5545
-0,1,297,good,7,1648,west_welmwood,1648
-0,1,121,good,13,1402,west_welmwood,1402
-0,1,140,good,11,1385,west_welmwood,1385
-1,1,540,good,8,1974,west_welmwood,1974
-1,1,407,good,6,1848,west_welmwood,1848
-2,1,557,good,2,3185,west_welmwood,3185
-3,2,1002,poor,60,4225,northwest,3887
-1,1,505,good,11,1885,west_welmwood,1885
-2,1,861,great,12,3801,east_elmwood,3801
-1,1,718,good,14,2072,west_welmwood,2072
-0,1,366,great,6,2209,east_elmwood,2209
-2,1,696,great,10,3728,east_elmwood,3728
-3,2,1123,good,9,4870,west_welmwood,4870
-2,1,713,great,11,3667,east_elmwood,3667
-2,1,884,poor,23,3232,northwest,3212.608
-0,1,505,great,9,2293,east_elmwood,2293
-2,1,659,poor,55,2716,northwest,2525.88
-0,1,369,great,10,2212,east_elmwood,2212
-3,2,1220,poor,45,4615,northwest,4384.25
-2,1,522,great,1,3574,east_elmwood,3574
-2,1,671,poor,32,2946,northwest,2875.296
-2,1,762,good,5,3346,west_welmwood,3346
-0,1,192,poor,53,1111,northwest,1037.674
-1,1,553,poor,24,1803,northwest,1788.576
-1,1,618,great,12,2480,east_elmwood,2480
-1,1,394,poor,35,1534,northwest,1487.98
-3,2,828,poor,48,4230,northwest,3993.12
-1,1,705,poor,36,1873,northwest,1813.064
-3,2,987,poor,52,4355,northwest,4076.28
-3,2,854,great,10,5072,east_elmwood,5072
-2,1,930,poor,62,2932,northwest,2685.712
-0,1,283,good,7,1595,west_welmwood,1595
-0,1,241,good,5,1567,west_welmwood,1567
-0,1,315,good,1,1752,west_welmwood,1752
-2,1,687,poor,45,2880,northwest,2736
-3,2,1120,good,13,4833,west_welmwood,4833
-1,1,698,great,4,2654,east_elmwood,2654
-3,2,1243,poor,60,4474,northwest,4116.08
-0,1,180,great,12,1963,east_elmwood,1963
-3,2,1030,great,7,5314,east_elmwood,5314
-2,1,791,poor,39,3002,northwest,2887.924
-1,1,361,poor,16,1714,northwest,1714
-0,1,529,poor,23,1684,northwest,1673.896
-3,2,1091,poor,50,4429,northwest,4163.26
-2,1,888,good,14,3347,west_welmwood,3347
-1,1,479,great,10,2417,east_elmwood,2417
-0,1,122,great,5,1934,east_elmwood,1934
-2,1,929,good,5,3497,west_welmwood,3497
-0,1,439,good,7,1755,west_welmwood,1755
-3,2,1185,good,5,4999,west_welmwood,4999
-3,2,837,poor,37,4283,northwest,4137.378
-3,2,897,good,8,4674,west_welmwood,4674
-1,1,354,good,10,1776,west_welmwood,1776
-3,2,1205,poor,16,4914,northwest,4914
-3,2,1063,good,9,4835,west_welmwood,4835
-3,2,966,poor,20,4632,northwest,4632
-3,2,1192,great,6,5480,east_elmwood,5480
-1,1,693,poor,42,1807,northwest,1727.492
-0,1,214,poor,34,1305,northwest,1268.46
-3,2,1075,great,10,5320,east_elmwood,5320
-1,1,616,great,13,2456,east_elmwood,2456
-3,2,805,good,6,4632,west_welmwood,4632
-1,1,534,good,14,1936,west_welmwood,1936
-0,1,408,good,7,1705,west_welmwood,1705
-2,1,803,good,2,3354,west_welmwood,3354
-1,1,410,great,1,2365,east_elmwood,2365
-0,1,378,poor,21,1536,northwest,1532.928
-2,1,882,great,7,3892,east_elmwood,3892
-1,1,647,good,12,2041,west_welmwood,2041
-1,1,560,good,9,1930,west_welmwood,1930
-0,1,507,poor,30,1630,northwest,1597.4
-3,2,1129,poor,63,4354,northwest,3979.556
-0,1,115,good,9,1427,west_welmwood,1427
-1,1,318,poor,25,1518,northwest,1502.82
-1,1,315,poor,50,1302,northwest,1223.88
-2,1,748,poor,15,3188,northwest,3188
-2,1,685,poor,33,2938,northwest,2861.612
-0,1,154,poor,37,1180,northwest,1139.88
-0,1,515,good,10,1786,west_welmwood,1786
-1,1,355,good,6,1825,west_welmwood,1825
-3,2,1217,poor,19,4867,northwest,4867
-0,1,430,great,7,2292,east_elmwood,2292
-3,2,1227,good,9,4974,west_welmwood,4974
-3,2,1186,poor,63,4418,northwest,4038.052
-1,1,729,poor,41,1861,northwest,1782.838
-2,1,549,good,9,3018,west_welmwood,3018
-1,1,644,great,4,2611,east_elmwood,2611
-3,2,976,great,1,5331,east_elmwood,5331
-2,1,566,good,10,3052,west_welmwood,3052
-0,1,113,poor,26,1238,northwest,1223.144
-0,1,402,great,7,2248,east_elmwood,2248
-3,2,1243,good,2,5070,west_welmwood,5070
-2,1,893,great,5,3923,east_elmwood,3923
-0,1,104,poor,49,1025,northwest,965.55
-2,1,764,great,9,3784,east_elmwood,3784
-2,1,570,poor,64,2544,northwest,2320.128
-0,1,269,good,9,1546,west_welmwood,1546
-1,1,500,poor,17,1865,northwest,1865
-2,1,699,great,10,3685,east_elmwood,3685
-1,1,492,poor,19,1759,northwest,1759
-1,1,494,poor,49,1480,northwest,1394.16
-2,1,628,poor,53,2656,northwest,2480.704
-2,1,871,poor,37,3139,northwest,3032.274
-2,1,897,great,7,3894,east_elmwood,3894
-3,2,1249,good,12,5017,west_welmwood,5017
-1,1,458,good,6,1862,west_welmwood,1862
-2,1,616,great,0,3709,east_elmwood,3709
-2,1,846,good,6,3405,west_welmwood,3405
-1,1,565,good,11,1980,west_welmwood,1980
-3,2,1197,poor,56,4477,northwest,4154.656
-3,2,1177,good,3,4975,west_welmwood,4975
-2,1,776,good,5,3306,west_welmwood,3306
-3,2,819,good,14,4552,west_welmwood,4552
-0,1,344,good,8,1634,west_welmwood,1634
-0,1,435,good,7,1770,west_welmwood,1770
-1,1,677,great,3,2645,east_elmwood,2645
-2,1,702,poor,46,2831,northwest,2683.788
-1,1,483,good,7,1893,west_welmwood,1893
-1,1,571,good,8,2032,west_welmwood,2032
-1,1,368,good,5,1770,west_welmwood,1770
-2,1,559,poor,35,2786,northwest,2702.42
-0,1,510,poor,15,1781,northwest,1781
-3,2,1054,poor,26,4623,northwest,4567.524
-0,1,459,poor,26,1626,northwest,1606.488
-0,1,325,good,9,1598,west_welmwood,1598
-3,2,878,great,7,5200,east_elmwood,5200
-1,1,467,good,3,1960,west_welmwood,1960
-3,2,1072,good,12,4791,west_welmwood,4791
-1,1,474,poor,40,1607,northwest,1542.72
-0,1,486,good,4,1855,west_welmwood,1855
-0,1,459,good,11,1728,west_welmwood,1728
-1,1,344,great,6,2267,east_elmwood,2267
-2,1,861,poor,43,3040,northwest,2900.16
-3,2,1239,poor,24,4816,northwest,4777.472
-0,1,361,poor,33,1427,northwest,1389.898
-3,2,913,good,1,4794,west_welmwood,4794
-0,1,220,good,5,1619,west_welmwood,1619
-2,1,574,poor,47,2698,northwest,2552.308
-1,1,309,good,2,1832,west_welmwood,1832
-3,2,998,good,12,4758,west_welmwood,4758
-0,1,525,poor,44,1460,northwest,1389.92
-1,1,384,good,7,1776,west_welmwood,1776
-0,1,175,great,13,1952,east_elmwood,1952
-3,2,872,poor,53,4207,northwest,3929.338
-0,1,419,great,7,2209,east_elmwood,2209
-3,2,862,great,13,5060,east_elmwood,5060
-1,1,671,poor,40,1776,northwest,1704.96
-0,1,243,great,11,2075,east_elmwood,2075
-1,1,576,good,12,1992,west_welmwood,1992
-1,1,419,good,13,1777,west_welmwood,1777
-2,1,827,good,10,3303,west_welmwood,3303
-3,2,994,good,1,4871,west_welmwood,4871
-0,1,355,great,14,2076,east_elmwood,2076
-3,2,1024,good,9,4791,west_welmwood,4791
-1,1,340,great,6,2288,east_elmwood,2288
-1,1,433,good,9,1827,west_welmwood,1827
-0,1,473,poor,55,1351,northwest,1256.43
-1,1,579,great,1,2610,east_elmwood,2610
-3,2,1201,good,3,5067,west_welmwood,5067
-1,1,412,poor,64,1248,northwest,1138.176
-1,1,306,great,13,2225,east_elmwood,2225
-3,2,1218,good,0,5106,west_welmwood,5106
-0,1,386,good,2,1746,west_welmwood,1746
-2,1,634,good,12,3135,west_welmwood,3135
-1,1,505,great,10,2402,east_elmwood,2402
-2,1,847,poor,16,3301,northwest,3301
-0,1,126,good,6,1438,west_welmwood,1438
-3,2,1088,good,7,4873,west_welmwood,4873
-3,2,1171,good,13,4918,west_welmwood,4918
-3,2,1182,good,5,4972,west_welmwood,4972
-3,2,1076,poor,38,4529,northwest,4365.956
-2,1,607,poor,52,2714,northwest,2540.304
-1,1,576,poor,28,1804,northwest,1775.136
-3,2,1102,poor,17,4823,northwest,4823
-3,2,1128,good,0,4987,west_welmwood,4987
-1,1,629,good,6,2080,west_welmwood,2080
-0,1,449,poor,20,1602,northwest,1602
-2,1,744,good,3,3345,west_welmwood,3345
-3,2,1172,good,7,4932,west_welmwood,4932
-0,1,315,great,14,2072,east_elmwood,2072
-0,1,154,great,6,2003,east_elmwood,2003
-2,1,631,good,2,3185,west_welmwood,3185
-2,1,503,good,9,2963,west_welmwood,2963
-0,1,333,good,5,1724,west_welmwood,1724
-3,2,803,good,7,4602,west_welmwood,4602
-3,2,969,good,6,4761,west_welmwood,4761
-2,1,624,good,6,3195,west_welmwood,3195
-3,2,1138,good,1,4989,west_welmwood,4989
-2,1,633,good,8,3167,west_welmwood,3167
-3,2,1170,good,11,4900,west_welmwood,4900
-3,2,1107,good,6,4866,west_welmwood,4866
-0,1,147,great,10,1927,east_elmwood,1927
-2,1,788,poor,38,3017,northwest,2908.388
-3,2,1239,good,13,4991,west_welmwood,4991
-1,1,383,great,7,2356,east_elmwood,2356
-0,1,205,good,0,1587,west_welmwood,1587
-0,1,315,good,5,1654,west_welmwood,1654
-2,1,659,great,10,3684,east_elmwood,3684
-1,1,361,poor,53,1348,northwest,1259.032
-1,1,382,poor,43,1469,northwest,1401.426
-2,1,569,poor,37,2830,northwest,2733.78
-1,1,351,great,9,2283,east_elmwood,2283
-0,1,110,poor,55,961,northwest,893.73
-3,2,1160,great,8,5455,east_elmwood,5455
-1,1,376,good,6,1861,west_welmwood,1861
-0,1,119,poor,62,871,northwest,797.836
-0,1,323,great,13,2098,east_elmwood,2098
-0,1,262,good,12,1584,west_welmwood,1584
-0,1,119,poor,15,1373,northwest,1373
-2,1,899,good,12,3397,west_welmwood,3397
-3,2,971,good,5,4819,west_welmwood,4819
-2,1,938,good,12,3393,west_welmwood,3393
-2,1,655,good,3,3246,west_welmwood,3246
-0,1,449,good,9,1746,west_welmwood,1746
-1,1,613,poor,32,1777,northwest,1734.352
-0,1,251,great,2,2139,east_elmwood,2139
-1,1,374,good,11,1775,west_welmwood,1775
-3,2,1081,poor,34,4581,northwest,4452.732
-2,1,514,poor,25,2884,northwest,2855.16
-0,1,450,good,6,1773,west_welmwood,1773
-0,1,519,good,6,1882,west_welmwood,1882
-3,2,831,great,3,5122,east_elmwood,5122
-2,1,568,poor,40,2724,northwest,2615.04
-3,2,1194,good,7,4960,west_welmwood,4960
-2,1,620,poor,44,2758,northwest,2625.616
-3,2,990,great,7,5286,east_elmwood,5286
-2,1,513,great,14,3453,east_elmwood,3453
-0,1,196,good,11,1462,west_welmwood,1462
-2,1,594,good,9,3139,west_welmwood,3139
-0,1,167,poor,55,967,northwest,899.31
-3,2,1098,good,6,4855,west_welmwood,4855
-3,2,1072,good,11,4819,west_welmwood,4819
-1,1,397,good,13,1743,west_welmwood,1743
-1,1,383,good,13,1751,west_welmwood,1751
-2,1,546,poor,60,2515,northwest,2313.8
-3,2,1200,good,0,5080,west_welmwood,5080
-2,1,815,poor,43,2999,northwest,2861.046
-1,1,401,good,9,1768,west_welmwood,1768
-2,1,565,good,2,3137,west_welmwood,3137
-1,1,362,good,1,1883,west_welmwood,1883
-3,2,1156,good,5,4997,west_welmwood,4997
-1,1,502,poor,51,1522,northwest,1427.636
-1,1,487,poor,30,1648,northwest,1615.04
-3,2,1179,good,7,4937,west_welmwood,4937
-0,1,412,poor,37,1438,northwest,1389.108
-1,1,455,good,3,1891,west_welmwood,1891
-1,1,677,good,9,2040,west_welmwood,2040
-2,1,633,good,14,3098,west_welmwood,3098
-3,2,949,good,9,4699,west_welmwood,4699
-3,2,1125,great,2,5435,east_elmwood,5435
-1,1,581,poor,40,1711,northwest,1642.56
-2,1,900,poor,56,2957,northwest,2744.096
-3,2,980,great,3,5251,east_elmwood,5251
-0,1,549,good,0,1940,west_welmwood,1940
-1,1,615,poor,21,1870,northwest,1866.26
-0,1,473,good,14,1770,west_welmwood,1770
-1,1,523,good,7,1941,west_welmwood,1941
-2,1,745,poor,26,3105,northwest,3067.74
-1,1,566,good,7,1980,west_welmwood,1980
-1,1,544,good,7,1998,west_welmwood,1998
-2,1,903,poor,63,2855,northwest,2609.47
-2,1,600,great,6,3601,east_elmwood,3601
-1,1,465,good,0,1994,west_welmwood,1994
-2,1,675,poor,27,3009,northwest,2966.874
-3,2,893,great,7,5126,east_elmwood,5126
-3,2,1111,great,1,5487,east_elmwood,5487
-3,2,1144,poor,20,4783,northwest,4783
-2,1,508,poor,24,2863,northwest,2840.096
-1,1,551,great,12,2456,east_elmwood,2456
-2,1,676,poor,29,2943,northwest,2890.026
-1,1,699,good,7,2090,west_welmwood,2090
-1,1,429,good,7,1843,west_welmwood,1843
-1,1,590,good,4,2030,west_welmwood,2030
-0,1,128,good,4,1496,west_welmwood,1496
-3,2,1150,poor,57,4427,northwest,4099.402
-1,1,392,good,12,1758,west_welmwood,1758
-2,1,806,good,8,3286,west_welmwood,3286
-2,1,938,good,10,3427,west_welmwood,3427
-2,1,552,good,5,3072,west_welmwood,3072
-1,1,316,great,8,2283,east_elmwood,2283
-0,1,319,great,12,2071,east_elmwood,2071
-1,1,483,poor,59,1430,northwest,1318.46
-1,1,400,great,0,2413,east_elmwood,2413
-1,1,586,poor,48,1568,northwest,1480.192
-1,1,484,poor,24,1700,northwest,1686.4
-3,2,859,poor,23,4430,northwest,4403.42
-0,1,534,poor,56,1386,northwest,1286.208
-2,1,936,poor,47,3071,northwest,2905.166
-3,2,911,good,7,4665,west_welmwood,4665
-2,1,900,poor,24,3258,northwest,3231.936
-3,2,933,good,10,4719,west_welmwood,4719
-1,1,464,great,5,2366,east_elmwood,2366
-2,1,773,poor,44,2961,northwest,2818.872
-0,1,203,poor,26,1295,northwest,1279.46
-1,1,321,poor,18,1605,northwest,1605
-1,1,539,good,4,1976,west_welmwood,1976
-1,1,655,poor,45,1723,northwest,1636.85
-3,2,1185,good,4,4991,west_welmwood,4991
-1,1,611,poor,25,1904,northwest,1884.96
-0,1,509,good,11,1833,west_welmwood,1833
-2,1,666,good,8,3165,west_welmwood,3165
-2,1,539,poor,47,2621,northwest,2479.466
-0,1,442,poor,30,1546,northwest,1515.08
-1,1,690,poor,34,1809,northwest,1758.348
-3,2,1062,good,14,4744,west_welmwood,4744
-1,1,428,good,8,1880,west_welmwood,1880
-3,2,814,poor,60,4057,northwest,3732.44
-2,1,742,poor,38,2989,northwest,2881.396
-1,1,618,poor,62,1495,northwest,1369.42
-1,1,384,great,10,2250,east_elmwood,2250
-2,1,506,great,13,3521,east_elmwood,3521
-0,1,286,poor,35,1344,northwest,1303.68
-0,1,152,great,4,1973,east_elmwood,1973
-0,1,131,good,7,1412,west_welmwood,1412
-3,2,1079,good,9,4866,west_welmwood,4866
-2,1,904,poor,33,3141,northwest,3059.334
-2,1,804,good,6,3327,west_welmwood,3327
-0,1,384,good,7,1736,west_welmwood,1736
-0,1,264,poor,27,1344,northwest,1325.184
-0,1,542,poor,17,1816,northwest,1816
-3,2,1236,poor,55,4549,northwest,4230.57
-0,1,487,good,10,1820,west_welmwood,1820
-0,1,286,poor,55,1091,northwest,1014.63
-0,1,351,good,12,1606,west_welmwood,1606
-0,1,534,great,8,2345,east_elmwood,2345
-0,1,461,good,6,1780,west_welmwood,1780
-2,1,768,good,6,3269,west_welmwood,3269
-2,1,949,great,5,3998,east_elmwood,3998
-1,1,638,poor,31,1802,northwest,1762.356
-0,1,135,good,10,1466,west_welmwood,1466
-1,1,724,poor,44,1760,northwest,1675.52
-0,1,131,good,8,1439,west_welmwood,1439
-3,2,1241,great,2,5615,east_elmwood,5615
-0,1,524,great,5,2348,east_elmwood,2348
-1,1,494,good,10,1875,west_welmwood,1875
-1,1,592,poor,33,1739,northwest,1693.786
-2,1,759,poor,61,2720,northwest,2496.96
-0,1,176,good,3,1572,west_welmwood,1572
-1,1,435,poor,16,1814,northwest,1814
-3,2,966,poor,28,4583,northwest,4509.672
-0,1,466,poor,31,1529,northwest,1495.362
-1,1,507,good,8,1932,west_welmwood,1932
-3,2,1012,poor,35,4527,northwest,4391.19
-0,1,533,good,6,1849,west_welmwood,1849
-2,1,837,great,13,3834,east_elmwood,3834
-1,1,509,good,11,1916,west_welmwood,1916
-0,1,165,good,5,1562,west_welmwood,1562
-3,2,1055,great,0,5400,east_elmwood,5400
-0,1,267,good,2,1634,west_welmwood,1634
-2,1,545,good,9,3067,west_welmwood,3067
-3,2,1085,good,1,4931,west_welmwood,4931
-0,1,273,poor,30,1400,northwest,1372
-2,1,692,poor,31,2948,northwest,2883.144
-2,1,527,great,11,3468,east_elmwood,3468
-3,2,1144,good,3,4994,west_welmwood,4994
-0,1,288,great,0,2151,east_elmwood,2151
-0,1,313,poor,45,1239,northwest,1177.05
-0,1,124,good,12,1406,west_welmwood,1406
-1,1,332,poor,36,1496,northwest,1448.128
-2,1,724,good,14,3220,west_welmwood,3220
-1,1,429,great,0,2445,east_elmwood,2445
-2,1,752,good,1,3361,west_welmwood,3361
-3,2,825,great,6,5124,east_elmwood,5124
-3,2,1190,good,1,5008,west_welmwood,5008
-2,1,579,poor,58,2575,northwest,2379.3
-3,2,1069,poor,49,4472,northwest,4212.624
-0,1,345,poor,45,1309,northwest,1243.55
-0,1,293,great,8,2124,east_elmwood,2124
-0,1,134,good,4,1490,west_welmwood,1490
-1,1,477,poor,31,1643,northwest,1606.854
-0,1,211,poor,35,1281,northwest,1242.57
-3,2,1106,good,3,4951,west_welmwood,4951
-1,1,429,great,3,2442,east_elmwood,2442
-3,2,1186,poor,45,4551,northwest,4323.45
-0,1,287,good,13,1594,west_welmwood,1594
-2,1,927,great,0,3981,east_elmwood,3981
-3,2,1073,great,4,5379,east_elmwood,5379
-0,1,166,poor,15,1394,northwest,1394
-2,1,598,great,7,3600,east_elmwood,3600
-2,1,908,good,5,3482,west_welmwood,3482
-2,1,788,good,1,3364,west_welmwood,3364
-1,1,330,poor,43,1414,northwest,1348.956
-3,2,1233,good,9,4982,west_welmwood,4982
-1,1,496,good,3,1929,west_welmwood,1929
-3,2,1209,good,6,5039,west_welmwood,5039
-3,2,1224,poor,55,4500,northwest,4185
-1,1,409,good,2,1863,west_welmwood,1863
-2,1,605,good,10,3120,west_welmwood,3120
-2,1,517,poor,17,2916,northwest,2916
-1,1,515,great,0,2558,east_elmwood,2558
-1,1,373,great,4,2363,east_elmwood,2363
-2,1,637,great,8,3702,east_elmwood,3702
-0,1,219,poor,46,1158,northwest,1097.784
-0,1,151,great,0,2053,east_elmwood,2053
-0,1,187,great,3,2058,east_elmwood,2058
-0,1,158,poor,20,1338,northwest,1338
-0,1,146,good,6,1463,west_welmwood,1463
-1,1,493,good,4,1960,west_welmwood,1960
-1,1,348,good,2,1853,west_welmwood,1853
-2,1,603,poor,22,2956,northwest,2944.176
-1,1,546,good,9,1930,west_welmwood,1930
-3,2,1144,good,2,4932,west_welmwood,4932
-0,1,413,good,12,1674,west_welmwood,1674
-0,1,341,great,12,2156,east_elmwood,2156
-2,1,624,great,12,3642,east_elmwood,3642
-1,1,478,great,5,2434,east_elmwood,2434
-1,1,683,poor,64,1559,northwest,1421.808
-1,1,581,poor,34,1782,northwest,1732.104
-3,2,1210,good,1,5081,west_welmwood,5081
-1,1,658,great,3,2633,east_elmwood,2633
-2,1,758,poor,58,2745,northwest,2536.38
-1,1,611,good,2,2109,west_welmwood,2109
-1,1,522,good,10,1878,west_welmwood,1878
-1,1,612,great,11,2516,east_elmwood,2516
-0,1,408,good,2,1783,west_welmwood,1783
-2,1,789,poor,34,3021,northwest,2936.412
-3,2,963,good,7,4710,west_welmwood,4710
-2,1,718,great,1,3856,east_elmwood,3856
-0,1,403,poor,40,1428,northwest,1370.88
-1,1,598,poor,22,1879,northwest,1871.484
-1,1,689,good,1,2183,west_welmwood,2183
-0,1,418,great,1,2314,east_elmwood,2314
-2,1,727,poor,54,2809,northwest,2617.988
-1,1,738,great,8,2665,east_elmwood,2665
-3,2,1130,great,10,5400,east_elmwood,5400
-3,2,1057,good,9,4773,west_welmwood,4773
-1,1,719,great,0,2669,east_elmwood,2669
-1,1,472,good,1,1971,west_welmwood,1971
-1,1,745,good,6,2170,west_welmwood,2170
-1,1,443,good,5,1934,west_welmwood,1934
-3,2,1075,great,14,5236,east_elmwood,5236
-3,2,1144,good,2,5021,west_welmwood,5021
-1,1,339,great,8,2210,east_elmwood,2210
-1,1,575,good,5,1995,west_welmwood,1995
-2,1,542,good,1,3084,west_welmwood,3084
-0,1,439,good,4,1793,west_welmwood,1793
-2,1,877,great,3,3963,east_elmwood,3963
-0,1,457,poor,46,1407,northwest,1333.836
-0,1,388,poor,36,1380,northwest,1335.84
-0,1,484,poor,21,1677,northwest,1673.646
-0,1,500,poor,21,1687,northwest,1683.626
-3,2,942,good,0,4781,west_welmwood,4781
-3,2,1204,poor,64,4437,northwest,4046.544
-3,2,1102,good,13,4820,west_welmwood,4820
-1,1,603,poor,44,1712,northwest,1629.824
-1,1,499,good,6,1979,west_welmwood,1979
-2,1,582,good,3,3200,west_welmwood,3200
-3,2,874,poor,44,4292,northwest,4085.984
-3,2,967,great,9,5269,east_elmwood,5269
-1,1,668,great,4,2619,east_elmwood,2619
-0,1,530,great,7,2369,east_elmwood,2369
-2,1,750,good,11,3190,west_welmwood,3190
-0,1,263,good,4,1653,west_welmwood,1653
-1,1,467,good,2,1985,west_welmwood,1985
-3,2,1039,poor,39,4471,northwest,4301.102
-0,1,187,good,6,1489,west_welmwood,1489
-0,1,209,poor,50,1100,northwest,1034
-0,1,398,poor,59,1201,northwest,1107.322
-3,2,945,poor,63,4131,northwest,3775.734
-2,1,657,poor,31,2920,northwest,2855.76
-1,1,537,good,1,2023,west_welmwood,2023
-0,1,334,great,2,2231,east_elmwood,2231
-2,1,660,great,9,3678,east_elmwood,3678
-1,1,479,good,6,1903,west_welmwood,1903
-3,2,917,poor,64,4102,northwest,3741.024
-2,1,898,good,7,3389,west_welmwood,3389
-2,1,822,great,1,3950,east_elmwood,3950
-0,1,270,poor,47,1156,northwest,1093.576
-2,1,625,poor,64,2613,northwest,2383.056
-0,1,268,poor,45,1248,northwest,1185.6
-0,1,218,poor,46,1131,northwest,1072.188
-0,1,422,poor,45,1402,northwest,1331.9
-2,1,766,poor,61,2790,northwest,2561.22
-0,1,224,good,3,1638,west_welmwood,1638
-2,1,762,good,1,3326,west_welmwood,3326
-2,1,782,good,3,3318,west_welmwood,3318
-0,1,518,poor,36,1537,northwest,1487.816
-1,1,548,good,1,2076,west_welmwood,2076
-1,1,665,poor,44,1750,northwest,1666
-2,1,723,good,3,3340,west_welmwood,3340
-0,1,373,good,14,1599,west_welmwood,1599
-0,1,442,poor,63,1205,northwest,1101.37
-2,1,743,poor,49,2861,northwest,2695.062
-2,1,872,poor,35,3072,northwest,2979.84
-1,1,504,good,5,1988,west_welmwood,1988
-2,1,829,great,10,3837,east_elmwood,3837
-1,1,314,good,13,1656,west_welmwood,1656
-0,1,418,great,13,2186,east_elmwood,2186
-1,1,312,poor,29,1509,northwest,1481.838
-2,1,548,poor,17,3012,northwest,3012
-1,1,524,great,7,2475,east_elmwood,2475
-3,2,1142,good,14,4841,west_welmwood,4841
-2,1,929,great,9,3918,east_elmwood,3918
-1,1,505,great,3,2499,east_elmwood,2499
-2,1,665,poor,16,3097,northwest,3097
-0,1,109,great,13,1873,east_elmwood,1873
-0,1,290,poor,37,1274,northwest,1230.684
-3,2,1213,good,10,5001,west_welmwood,5001
-2,1,728,good,12,3185,west_welmwood,3185
-2,1,933,good,5,3520,west_welmwood,3520
-2,1,895,poor,23,3263,northwest,3243.422
-2,1,547,poor,23,2896,northwest,2878.624
-0,1,549,poor,41,1587,northwest,1520.346
-0,1,217,poor,34,1258,northwest,1222.776
-1,1,492,poor,56,1414,northwest,1312.192
-1,1,421,good,10,1818,west_welmwood,1818
-3,2,1034,poor,15,4684,northwest,4684
-2,1,944,good,10,3477,west_welmwood,3477
-1,1,347,good,12,1681,west_welmwood,1681
-3,2,1208,good,14,4951,west_welmwood,4951
-0,1,449,good,4,1802,west_welmwood,1802
-0,1,464,great,5,2343,east_elmwood,2343
-0,1,205,poor,55,1085,northwest,1009.05
-1,1,435,good,9,1891,west_welmwood,1891
-1,1,682,great,10,2617,east_elmwood,2617
-1,1,317,poor,57,1244,northwest,1151.944
-1,1,730,great,12,2649,east_elmwood,2649
-1,1,668,good,14,2036,west_welmwood,2036
-3,2,1199,good,4,5000,west_welmwood,5000
-0,1,461,great,11,2224,east_elmwood,2224
-2,1,576,poor,43,2727,northwest,2601.558
-3,2,1028,good,3,4845,west_welmwood,4845
-2,1,601,good,5,3154,west_welmwood,3154
-0,1,122,poor,33,1178,northwest,1147.372
-0,1,390,great,5,2241,east_elmwood,2241
-2,1,845,great,7,3891,east_elmwood,3891
-0,1,511,good,1,1881,west_welmwood,1881
-3,2,998,poor,46,4435,northwest,4204.38
-3,2,1042,good,2,4860,west_welmwood,4860
-0,1,195,great,0,2122,east_elmwood,2122
-1,1,305,poor,17,1636,northwest,1636
-2,1,839,great,13,3820,east_elmwood,3820
-3,2,1218,good,1,5045,west_welmwood,5045
-1,1,443,great,6,2401,east_elmwood,2401
-3,2,1203,poor,57,4453,northwest,4123.478
-1,1,429,good,5,1838,west_welmwood,1838
-3,2,1189,good,10,4899,west_welmwood,4899
-1,1,303,good,14,1695,west_welmwood,1695
-0,1,254,poor,64,1061,northwest,967.632
-0,1,208,poor,55,1053,northwest,979.29
-1,1,453,great,8,2397,east_elmwood,2397
-0,1,230,great,14,2012,east_elmwood,2012
-0,1,198,good,9,1470,west_welmwood,1470
-3,2,1132,good,13,4886,west_welmwood,4886
-0,1,314,great,5,2184,east_elmwood,2184
-2,1,882,good,12,3382,west_welmwood,3382
-3,2,1056,good,6,4849,west_welmwood,4849
-3,2,1072,great,7,5385,east_elmwood,5385
-0,1,154,good,1,1536,west_welmwood,1536
-1,1,612,poor,48,1668,northwest,1574.592
-0,1,201,good,7,1539,west_welmwood,1539
-2,1,540,good,10,3068,west_welmwood,3068
-3,2,1088,good,13,4829,west_welmwood,4829
-3,2,804,great,12,5012,east_elmwood,5012
-3,2,904,good,12,4659,west_welmwood,4659
-1,1,600,good,8,1992,west_welmwood,1992
-2,1,614,great,7,3612,east_elmwood,3612
-0,1,197,good,9,1472,west_welmwood,1472
-1,1,544,poor,24,1838,northwest,1823.296
-2,1,551,good,8,3079,west_welmwood,3079
-1,1,560,good,4,2067,west_welmwood,2067
-2,1,742,great,12,3692,east_elmwood,3692
-0,1,145,good,2,1486,west_welmwood,1486
-2,1,830,great,2,3897,east_elmwood,3897
-1,1,404,great,4,2348,east_elmwood,2348
-0,1,525,poor,57,1321,northwest,1223.246
-0,1,142,great,12,1940,east_elmwood,1940
-3,2,1088,good,11,4836,west_welmwood,4836
-0,1,232,poor,32,1280,northwest,1249.28
-2,1,598,poor,41,2780,northwest,2663.24
-0,1,199,great,4,2096,east_elmwood,2096
-2,1,584,great,6,3607,east_elmwood,3607
-3,2,857,good,9,4606,west_welmwood,4606
-1,1,530,great,14,2405,east_elmwood,2405
-2,1,614,poor,54,2714,northwest,2529.448
-0,1,363,good,11,1690,west_welmwood,1690
-2,1,649,poor,60,2663,northwest,2449.96
-2,1,535,good,9,2996,west_welmwood,2996
-3,2,1041,good,12,4744,west_welmwood,4744
-2,1,844,poor,53,2908,northwest,2716.072
-2,1,576,poor,64,2516,northwest,2294.592
-2,1,872,good,5,3402,west_welmwood,3402
-3,2,1172,good,10,4946,west_welmwood,4946
-3,2,874,great,9,5121,east_elmwood,5121
-3,2,1217,poor,34,4729,northwest,4596.588
-3,2,1220,poor,47,4600,northwest,4351.6
-3,2,1123,poor,46,4519,northwest,4284.012
-0,1,276,great,5,2095,east_elmwood,2095
-2,1,688,good,8,3246,west_welmwood,3246
-1,1,577,great,9,2528,east_elmwood,2528
-3,2,1232,great,6,5518,east_elmwood,5518
-3,2,973,poor,54,4298,northwest,4005.736
-1,1,668,poor,36,1815,northwest,1756.92
-1,1,711,good,11,2083,west_welmwood,2083
-0,1,235,good,14,1478,west_welmwood,1478
-3,2,1114,good,14,4836,west_welmwood,4836
-2,1,741,great,2,3832,east_elmwood,3832
-0,1,210,poor,52,1131,northwest,1058.616
-1,1,699,good,0,2162,west_welmwood,2162
-1,1,612,poor,33,1771,northwest,1724.954
-3,2,978,poor,20,4651,northwest,4651
-3,2,1160,poor,28,4737,northwest,4661.208
-0,1,103,good,9,1411,west_welmwood,1411
-1,1,739,great,9,2698,east_elmwood,2698
-1,1,589,poor,18,1948,northwest,1948
-0,1,429,great,8,2214,east_elmwood,2214
-0,1,399,great,5,2253,east_elmwood,2253
-2,1,789,great,1,3920,east_elmwood,3920
-3,2,1132,good,14,4820,west_welmwood,4820
-2,1,515,good,2,3078,west_welmwood,3078
-2,1,936,great,11,3934,east_elmwood,3934
-2,1,669,great,14,3633,east_elmwood,3633
-1,1,523,great,0,2527,east_elmwood,2527
-0,1,192,poor,58,1038,northwest,959.112
-0,1,500,poor,50,1448,northwest,1361.12
-1,1,615,good,2,2101,west_welmwood,2101
-1,1,352,poor,17,1649,northwest,1649
-3,2,863,great,11,5108,east_elmwood,5108
-2,1,827,good,11,3304,west_welmwood,3304
-2,1,799,great,4,3887,east_elmwood,3887
-2,1,613,poor,54,2672,northwest,2490.304
-2,1,524,good,10,3052,west_welmwood,3052
-0,1,307,good,3,1664,west_welmwood,1664
-1,1,343,poor,57,1250,northwest,1157.5
-0,1,474,great,10,2319,east_elmwood,2319
-2,1,803,poor,38,3053,northwest,2943.092
-3,2,1168,poor,28,4688,northwest,4612.992
-2,1,704,good,6,3234,west_welmwood,3234
-3,2,1237,good,2,5104,west_welmwood,5104
-3,2,865,good,10,4637,west_welmwood,4637
-0,1,268,good,0,1676,west_welmwood,1676
-1,1,697,good,2,2146,west_welmwood,2146
-1,1,372,good,12,1775,west_welmwood,1775
-3,2,904,poor,51,4256,northwest,3992.128
-2,1,590,poor,61,2554,northwest,2344.572
-1,1,436,good,2,1873,west_welmwood,1873
-3,2,845,good,12,4561,west_welmwood,4561
-3,2,1059,great,3,5336,east_elmwood,5336
-0,1,424,great,14,2177,east_elmwood,2177
-1,1,735,good,1,2181,west_welmwood,2181
-2,1,698,good,5,3214,west_welmwood,3214
-1,1,543,good,10,1984,west_welmwood,1984
-2,1,811,poor,45,2921,northwest,2774.95
-2,1,623,good,0,3211,west_welmwood,3211
-1,1,338,good,14,1709,west_welmwood,1709
-2,1,810,great,7,3875,east_elmwood,3875
-0,1,466,good,13,1715,west_welmwood,1715
-0,1,530,poor,35,1625,northwest,1576.25
-2,1,684,good,12,3164,west_welmwood,3164
-2,1,780,poor,60,2822,northwest,2596.24
-3,2,1241,good,4,5089,west_welmwood,5089
-3,2,1039,poor,19,4720,northwest,4720
-3,2,866,great,13,5039,east_elmwood,5039
-2,1,606,poor,59,2590,northwest,2387.98
-3,2,848,poor,58,4103,northwest,3791.172
-1,1,463,great,11,2357,east_elmwood,2357
-0,1,355,great,6,2189,east_elmwood,2189
-3,2,875,good,12,4608,west_welmwood,4608
-3,2,1075,good,13,4761,west_welmwood,4761
-3,2,915,good,7,4729,west_welmwood,4729
-1,1,326,poor,19,1617,northwest,1617
-3,2,905,good,1,4709,west_welmwood,4709
-0,1,441,great,8,2275,east_elmwood,2275
-2,1,748,poor,30,3078,northwest,3016.44
-0,1,514,good,3,1835,west_welmwood,1835
-0,1,443,poor,40,1410,northwest,1353.6
-0,1,207,good,10,1524,west_welmwood,1524
-1,1,657,great,9,2579,east_elmwood,2579
-2,1,866,poor,45,2988,northwest,2838.6
-0,1,221,great,12,1992,east_elmwood,1992
-3,2,986,good,1,4785,west_welmwood,4785
-2,1,852,poor,54,2881,northwest,2685.092
-0,1,176,good,8,1514,west_welmwood,1514
-0,1,372,poor,16,1590,northwest,1590
-3,2,936,good,2,4740,west_welmwood,4740
-3,2,1012,good,12,4721,west_welmwood,4721
-0,1,153,poor,28,1260,northwest,1239.84
-1,1,515,great,0,2514,east_elmwood,2514
-1,1,567,poor,47,1549,northwest,1465.354
-1,1,470,great,7,2387,east_elmwood,2387
-2,1,633,good,5,3211,west_welmwood,3211
-0,1,262,good,10,1549,west_welmwood,1549
-0,1,230,great,3,2071,east_elmwood,2071
-2,1,601,poor,29,2891,northwest,2838.962
-2,1,682,good,5,3189,west_welmwood,3189
-1,1,463,great,11,2345,east_elmwood,2345
-3,2,1197,poor,44,4649,northwest,4425.848
-2,1,925,great,1,4037,east_elmwood,4037
-0,1,277,poor,39,1305,northwest,1255.41
-3,2,1033,good,2,4852,west_welmwood,4852
-3,2,1208,poor,59,4443,northwest,4096.446
-0,1,190,good,11,1483,west_welmwood,1483
-0,1,483,good,8,1782,west_welmwood,1782
-3,2,896,great,11,5111,east_elmwood,5111
-3,2,1085,great,3,5399,east_elmwood,5399
-2,1,929,poor,48,3098,northwest,2924.512
-3,2,804,good,11,4496,west_welmwood,4496
-1,1,666,great,2,2652,east_elmwood,2652
-0,1,118,great,5,1953,east_elmwood,1953
-0,1,230,poor,52,1070,northwest,1001.52
-0,1,298,poor,32,1418,northwest,1383.968
-0,1,214,poor,29,1329,northwest,1305.078
-0,1,525,great,1,2463,east_elmwood,2463
-3,2,1241,good,3,5020,west_welmwood,5020
-1,1,446,good,11,1822,west_welmwood,1822
-2,1,761,great,2,3878,east_elmwood,3878
-2,1,615,poor,18,3053,northwest,3053
-0,1,189,good,7,1500,west_welmwood,1500
-0,1,443,good,9,1738,west_welmwood,1738
-0,1,146,poor,64,878,northwest,800.736
-2,1,787,poor,35,2989,northwest,2899.33
-0,1,356,poor,19,1594,northwest,1594
-3,2,1240,good,9,5011,west_welmwood,5011
-2,1,720,poor,28,3088,northwest,3038.592
-0,1,379,poor,34,1427,northwest,1387.044
-3,2,994,good,13,4675,west_welmwood,4675
-3,2,1213,good,12,4952,west_welmwood,4952
-0,1,338,good,3,1736,west_welmwood,1736
-3,2,919,great,9,5192,east_elmwood,5192
-2,1,501,good,9,3054,west_welmwood,3054
-1,1,476,good,3,1912,west_welmwood,1912
-2,1,577,great,6,3602,east_elmwood,3602
-1,1,400,poor,58,1340,northwest,1238.16
-3,2,1104,poor,59,4363,northwest,4022.686
-2,1,879,poor,16,3359,northwest,3359
-1,1,320,good,4,1808,west_welmwood,1808
-3,2,1038,great,1,5356,east_elmwood,5356
-3,2,884,great,8,5168,east_elmwood,5168
-0,1,445,great,6,2307,east_elmwood,2307
-2,1,841,poor,38,3096,northwest,2984.544
-2,1,704,good,12,3137,west_welmwood,3137
-2,1,571,good,11,3035,west_welmwood,3035
-3,2,1145,poor,59,4396,northwest,4053.112
-2,1,787,good,7,3317,west_welmwood,3317
-3,2,862,good,12,4626,west_welmwood,4626
-2,1,789,poor,52,2838,northwest,2656.368
-0,1,465,good,7,1809,west_welmwood,1809
-0,1,390,poor,30,1451,northwest,1421.98
-0,1,374,good,14,1683,west_welmwood,1683
-2,1,518,poor,43,2723,northwest,2597.742
-0,1,328,great,1,2224,east_elmwood,2224
-3,2,1227,great,12,5497,east_elmwood,5497
-3,2,873,good,6,4672,west_welmwood,4672
-3,2,1236,good,9,5010,west_welmwood,5010
-0,1,294,great,7,2162,east_elmwood,2162
-3,2,801,poor,55,4114,northwest,3826.02
-0,1,522,poor,44,1514,northwest,1441.328
-3,2,940,poor,39,4446,northwest,4277.052
-1,1,501,great,0,2485,east_elmwood,2485
-3,2,867,poor,49,4232,northwest,3986.544
-3,2,1156,poor,26,4697,northwest,4640.636
-1,1,669,good,8,2065,west_welmwood,2065
-3,2,853,poor,53,4126,northwest,3853.684
-2,1,786,poor,52,2862,northwest,2678.832
-0,1,306,good,5,1687,west_welmwood,1687
-0,1,525,good,12,1781,west_welmwood,1781
-0,1,400,good,3,1732,west_welmwood,1732
-1,1,329,poor,61,1186,northwest,1088.748
-0,1,474,good,11,1723,west_welmwood,1723
-1,1,698,poor,27,1949,northwest,1921.714
-2,1,914,good,5,3414,west_welmwood,3414
-1,1,712,poor,61,1626,northwest,1492.668
-2,1,541,great,3,3614,east_elmwood,3614
-1,1,397,good,13,1798,west_welmwood,1798
-3,2,1137,good,7,4908,west_welmwood,4908
-3,2,859,poor,56,4118,northwest,3821.504
-1,1,532,good,0,1982,west_welmwood,1982
-2,1,525,poor,37,2731,northwest,2638.146
-3,2,870,poor,61,4098,northwest,3761.964
-2,1,677,poor,31,2978,northwest,2912.484
-2,1,873,good,5,3399,west_welmwood,3399
-3,2,824,good,12,4510,west_welmwood,4510
-0,1,347,poor,36,1365,northwest,1321.32
-3,2,1215,great,0,5580,east_elmwood,5580
-0,1,201,good,1,1548,west_welmwood,1548
-2,1,590,poor,18,3026,northwest,3026
-2,1,540,good,12,2970,west_welmwood,2970
-3,2,960,good,11,4696,west_welmwood,4696
-0,1,417,good,7,1794,west_welmwood,1794
-3,2,962,great,0,5270,east_elmwood,5270
-1,1,513,good,14,1858,west_welmwood,1858
-2,1,621,good,8,3169,west_welmwood,3169
-2,1,612,poor,27,2919,northwest,2878.134
-1,1,522,great,11,2368,east_elmwood,2368
-3,2,1178,good,0,5045,west_welmwood,5045
-3,2,935,great,10,5137,east_elmwood,5137
-1,1,692,great,10,2570,east_elmwood,2570
-0,1,504,poor,28,1650,northwest,1623.6
-3,2,1058,poor,56,4393,northwest,4076.704
-0,1,112,good,14,1336,west_welmwood,1336
-0,1,396,great,0,2256,east_elmwood,2256
-1,1,744,great,3,2692,east_elmwood,2692
-2,1,715,poor,50,2783,northwest,2616.02
-3,2,1077,great,4,5369,east_elmwood,5369
-3,2,883,poor,47,4249,northwest,4019.554
-3,2,1056,good,4,4887,west_welmwood,4887
-0,1,425,good,12,1663,west_welmwood,1663
-1,1,602,good,4,2082,west_welmwood,2082
-1,1,642,great,3,2564,east_elmwood,2564
-1,1,351,poor,51,1365,northwest,1280.37
-1,1,674,poor,39,1758,northwest,1691.196
-3,2,1039,good,7,4858,west_welmwood,4858
-1,1,528,poor,37,1635,northwest,1579.41
-2,1,627,poor,42,2791,northwest,2668.196
-0,1,305,poor,40,1265,northwest,1214.4
-1,1,645,good,0,2097,west_welmwood,2097
-3,2,1074,good,5,4857,west_welmwood,4857
-1,1,536,poor,17,1836,northwest,1836
-0,1,313,good,8,1617,west_welmwood,1617
-1,1,532,great,6,2440,east_elmwood,2440
-3,2,983,good,13,4746,west_welmwood,4746
-3,2,1075,great,5,5398,east_elmwood,5398
-2,1,558,good,1,3175,west_welmwood,3175
-1,1,664,good,6,2145,west_welmwood,2145
-2,1,813,good,2,3399,west_welmwood,3399
-2,1,635,poor,25,3028,northwest,2997.72
-3,2,1029,great,4,5297,east_elmwood,5297
-0,1,137,great,3,2009,east_elmwood,2009
-1,1,372,poor,15,1723,northwest,1723
-0,1,512,poor,58,1371,northwest,1266.804
-3,2,1086,good,7,4880,west_welmwood,4880
-2,1,919,great,4,3951,east_elmwood,3951
-0,1,401,poor,59,1238,northwest,1141.436
-1,1,495,poor,57,1378,northwest,1276.028
-1,1,462,good,10,1910,west_welmwood,1910
-1,1,714,poor,55,1616,northwest,1502.88
-1,1,458,good,6,1879,west_welmwood,1879
-1,1,460,good,2,1922,west_welmwood,1922
-2,1,889,poor,20,3308,northwest,3308
-1,1,658,poor,37,1739,northwest,1679.874
-1,1,696,poor,61,1544,northwest,1417.392
-0,1,357,poor,46,1294,northwest,1226.712
-3,2,1131,poor,18,4839,northwest,4839
-2,1,845,poor,34,3151,northwest,3062.772
-0,1,377,good,12,1686,west_welmwood,1686
-0,1,464,great,12,2259,east_elmwood,2259
-3,2,1131,good,11,4873,west_welmwood,4873
-0,1,297,poor,27,1411,northwest,1391.246
-1,1,578,poor,57,1474,northwest,1364.924
-3,2,1036,good,0,4891,west_welmwood,4891
-3,2,1026,great,13,5220,east_elmwood,5220
-2,1,837,poor,49,2923,northwest,2753.466
-2,1,803,good,5,3384,west_welmwood,3384
-3,2,875,great,5,5213,east_elmwood,5213
-3,2,1000,good,8,4787,west_welmwood,4787
-3,2,842,good,4,4701,west_welmwood,4701
-3,2,1138,poor,21,4819,northwest,4809.362
-1,1,623,great,14,2476,east_elmwood,2476
-2,1,904,poor,25,3289,northwest,3256.11
-2,1,900,great,2,3976,east_elmwood,3976
-0,1,533,poor,48,1480,northwest,1397.12
-0,1,507,great,3,2426,east_elmwood,2426
-2,1,764,great,12,3706,east_elmwood,3706
-2,1,570,good,13,3066,west_welmwood,3066
-3,2,1218,good,12,4926,west_welmwood,4926
-2,1,686,good,4,3226,west_welmwood,3226
-1,1,708,great,5,2655,east_elmwood,2655
-3,2,985,poor,54,4296,northwest,4003.872
-1,1,346,poor,52,1304,northwest,1220.544
-2,1,743,great,0,3847,east_elmwood,3847
-0,1,144,great,0,2067,east_elmwood,2067
-3,2,921,poor,21,4569,northwest,4559.862
-2,1,520,poor,43,2665,northwest,2542.41
-2,1,762,good,14,3181,west_welmwood,3181
-3,2,1180,poor,47,4574,northwest,4327.004
-3,2,1246,poor,15,4923,northwest,4923
-3,2,1127,good,8,4910,west_welmwood,4910
-1,1,588,good,7,2015,west_welmwood,2015
-2,1,906,good,12,3393,west_welmwood,3393
-2,1,738,poor,57,2759,northwest,2554.834
-2,1,557,good,8,3118,west_welmwood,3118
-2,1,644,good,6,3203,west_welmwood,3203
-0,1,517,great,13,2293,east_elmwood,2293
-3,2,869,great,11,5135,east_elmwood,5135
-1,1,624,poor,41,1727,northwest,1654.466
-0,1,126,poor,21,1348,northwest,1345.304
-2,1,840,good,0,3404,west_welmwood,3404
-1,1,428,good,9,1816,west_welmwood,1816
-2,1,689,poor,28,3040,northwest,2991.36
-1,1,470,good,7,1892,west_welmwood,1892
-0,1,238,great,11,2041,east_elmwood,2041
-1,1,461,good,8,1857,west_welmwood,1857
-2,1,548,great,3,3659,east_elmwood,3659
-1,1,570,poor,48,1635,northwest,1543.44
-0,1,515,poor,37,1528,northwest,1476.048
-1,1,411,good,10,1782,west_welmwood,1782
-0,1,246,poor,57,1066,northwest,987.116
-2,1,859,good,0,3418,west_welmwood,3418
-3,2,971,good,10,4705,west_welmwood,4705
-2,1,549,good,4,3158,west_welmwood,3158
-3,2,1091,great,7,5411,east_elmwood,5411
-0,1,191,poor,56,1008,northwest,935.424
-1,1,377,great,8,2262,east_elmwood,2262
-0,1,250,poor,57,1063,northwest,984.338
-3,2,836,good,3,4697,west_welmwood,4697
-2,1,702,great,0,3784,east_elmwood,3784
-1,1,394,great,1,2337,east_elmwood,2337
-2,1,719,good,6,3223,west_welmwood,3223
-2,1,682,great,12,3623,east_elmwood,3623
-1,1,318,poor,42,1367,northwest,1306.852
-0,1,332,good,3,1739,west_welmwood,1739
-1,1,498,good,11,1904,west_welmwood,1904
-2,1,672,great,4,3703,east_elmwood,3703
-1,1,575,poor,62,1504,northwest,1377.664
-3,2,1036,good,11,4768,west_welmwood,4768
-2,1,812,good,4,3356,west_welmwood,3356
-1,1,706,poor,40,1770,northwest,1699.2
-2,1,584,good,5,3107,west_welmwood,3107
-1,1,555,poor,59,1419,northwest,1308.318
-1,1,653,good,4,2103,west_welmwood,2103
-1,1,604,good,2,2088,west_welmwood,2088
-2,1,798,poor,26,3098,northwest,3060.824
-3,2,1163,good,2,4995,west_welmwood,4995
-2,1,646,poor,27,2982,northwest,2940.252
-2,1,845,poor,46,3024,northwest,2866.752
-1,1,387,poor,47,1392,northwest,1316.832
-0,1,112,poor,60,948,northwest,872.16
-3,2,1079,poor,46,4445,northwest,4213.86
-3,2,1234,great,14,5394,east_elmwood,5394
-1,1,533,good,3,1992,west_welmwood,1992
-2,1,752,good,10,3204,west_welmwood,3204
-1,1,307,poor,55,1237,northwest,1150.41
-0,1,167,good,1,1517,west_welmwood,1517
-2,1,905,poor,46,3034,northwest,2876.232
-1,1,307,poor,40,1368,northwest,1313.28
-0,1,276,good,10,1586,west_welmwood,1586
-2,1,813,good,1,3364,west_welmwood,3364
-0,1,276,poor,43,1249,northwest,1191.546
-1,1,547,good,12,1953,west_welmwood,1953
-0,1,376,good,12,1695,west_welmwood,1695
-1,1,653,good,14,1999,west_welmwood,1999
-3,2,1017,poor,62,4264,northwest,3905.824
-1,1,582,great,6,2483,east_elmwood,2483
-3,2,1068,good,7,4889,west_welmwood,4889
-0,1,343,great,14,2120,east_elmwood,2120
-2,1,860,good,13,3357,west_welmwood,3357
-2,1,905,great,12,3863,east_elmwood,3863
-2,1,937,poor,18,3336,northwest,3336
-2,1,809,good,7,3295,west_welmwood,3295
-1,1,698,poor,56,1607,northwest,1491.296
-0,1,336,poor,62,1143,northwest,1046.988
-1,1,483,great,8,2417,east_elmwood,2417
-2,1,852,great,2,3975,east_elmwood,3975
-3,2,840,poor,58,4096,northwest,3784.704
-2,1,936,poor,53,3020,northwest,2820.68
-2,1,556,great,12,3535,east_elmwood,3535
-2,1,572,good,14,3047,west_welmwood,3047
-3,2,879,great,7,5149,east_elmwood,5149
-1,1,700,good,11,2079,west_welmwood,2079
-2,1,564,poor,63,2512,northwest,2295.968
-3,2,1148,great,4,5468,east_elmwood,5468
-0,1,542,poor,49,1420,northwest,1337.64
-2,1,668,good,0,3240,west_welmwood,3240
-2,1,813,great,4,3921,east_elmwood,3921
-3,2,1034,good,10,4782,west_welmwood,4782
-1,1,311,great,13,2185,east_elmwood,2185
-3,2,846,poor,62,4055,northwest,3714.38
-3,2,859,poor,16,4525,northwest,4525
-2,1,672,good,7,3244,west_welmwood,3244
-2,1,695,good,6,3190,west_welmwood,3190
-3,2,1025,good,12,4775,west_welmwood,4775
-0,1,519,poor,64,1270,northwest,1158.24
-2,1,779,poor,40,3022,northwest,2901.12
-3,2,1054,great,4,5342,east_elmwood,5342
-0,1,400,poor,29,1555,northwest,1527.01
-2,1,717,poor,28,3041,northwest,2992.344
-3,2,1163,poor,28,4763,northwest,4686.792
-0,1,408,great,14,2128,east_elmwood,2128
-3,2,1190,great,3,5475,east_elmwood,5475
-2,1,615,poor,41,2806,northwest,2688.148
-2,1,660,good,0,3220,west_welmwood,3220
-1,1,667,poor,22,1927,northwest,1919.292
-1,1,693,good,3,2129,west_welmwood,2129
-0,1,458,poor,36,1464,northwest,1417.152
-0,1,276,good,2,1638,west_welmwood,1638
-1,1,702,poor,44,1787,northwest,1701.224
-0,1,206,good,7,1541,west_welmwood,1541
-2,1,762,good,7,3317,west_welmwood,3317
-1,1,369,poor,26,1559,northwest,1540.292
-3,2,1164,good,11,4944,west_welmwood,4944
-2,1,858,good,9,3374,west_welmwood,3374
-1,1,509,good,11,1919,west_welmwood,1919
-0,1,344,good,14,1554,west_welmwood,1554
-2,1,509,great,6,3567,east_elmwood,3567
-2,1,860,poor,28,3223,northwest,3171.432
-2,1,699,good,8,3249,west_welmwood,3249
-0,1,541,good,14,1833,west_welmwood,1833
-2,1,898,good,3,3457,west_welmwood,3457
-0,1,391,good,13,1618,west_welmwood,1618
-0,1,516,great,13,2272,east_elmwood,2272
-2,1,546,poor,57,2606,northwest,2413.156
-3,2,898,good,3,4767,west_welmwood,4767
-2,1,895,good,0,3476,west_welmwood,3476
-2,1,877,poor,38,3084,northwest,2972.976
-1,1,706,good,10,2135,west_welmwood,2135
-0,1,542,poor,35,1597,northwest,1549.09
-3,2,1204,poor,54,4545,northwest,4235.94
-2,1,582,poor,48,2710,northwest,2558.24
-3,2,968,good,14,4679,west_welmwood,4679
-2,1,586,good,9,3106,west_welmwood,3106
-1,1,598,poor,28,1818,northwest,1788.912
-1,1,391,great,8,2328,east_elmwood,2328
-2,1,643,good,8,3195,west_welmwood,3195
-3,2,1036,good,3,4890,west_welmwood,4890
-3,2,867,poor,35,4388,northwest,4256.36
-3,2,1096,good,1,4971,west_welmwood,4971
-3,2,815,poor,21,4469,northwest,4460.062
-0,1,295,good,10,1553,west_welmwood,1553
-2,1,902,good,4,3453,west_welmwood,3453
-2,1,749,good,6,3240,west_welmwood,3240
-3,2,847,poor,38,4307,northwest,4151.948
-3,2,1170,poor,38,4655,northwest,4487.42
-1,1,346,good,12,1716,west_welmwood,1716
-2,1,566,good,6,3093,west_welmwood,3093
-2,1,768,poor,50,2823,northwest,2653.62
-1,1,616,poor,38,1727,northwest,1664.828
-2,1,501,good,4,3044,west_welmwood,3044
-2,1,538,poor,22,2927,northwest,2915.292
-3,2,912,poor,28,4463,northwest,4391.592
-2,1,683,poor,33,2923,northwest,2847.002
-3,2,1205,great,7,5532,east_elmwood,5532
-2,1,837,good,0,3457,west_welmwood,3457
-2,1,702,good,14,3190,west_welmwood,3190
-0,1,136,great,2,2008,east_elmwood,2008
-3,2,1075,poor,21,4690,northwest,4680.62
-1,1,432,good,10,1852,west_welmwood,1852
-3,2,941,good,11,4695,west_welmwood,4695
-0,1,220,great,0,2076,east_elmwood,2076
-1,1,671,poor,45,1699,northwest,1614.05
-2,1,682,good,10,3206,west_welmwood,3206
-1,1,700,good,1,2171,west_welmwood,2171
-1,1,411,great,14,2306,east_elmwood,2306
-2,1,552,poor,43,2729,northwest,2603.466
-0,1,497,poor,57,1371,northwest,1269.546
-3,2,951,good,3,4729,west_welmwood,4729
-0,1,430,great,12,2217,east_elmwood,2217
-3,2,1220,poor,32,4714,northwest,4600.864
-1,1,430,good,0,1928,west_welmwood,1928
-3,2,837,good,6,4594,west_welmwood,4594
-0,1,435,good,11,1681,west_welmwood,1681
-3,2,847,great,11,5051,east_elmwood,5051
-0,1,396,good,12,1708,west_welmwood,1708
-3,2,1119,good,6,4922,west_welmwood,4922
-0,1,386,poor,54,1268,northwest,1181.776
-3,2,1208,good,2,5004,west_welmwood,5004
-0,1,240,good,8,1609,west_welmwood,1609
-1,1,323,good,14,1697,west_welmwood,1697
-2,1,876,great,6,3866,east_elmwood,3866
-2,1,653,great,0,3801,east_elmwood,3801
-2,1,579,good,11,3039,west_welmwood,3039
-1,1,335,poor,24,1635,northwest,1621.92
-3,2,898,poor,62,4129,northwest,3782.164
-1,1,593,good,0,2059,west_welmwood,2059
-1,1,392,great,7,2282,east_elmwood,2282
-1,1,374,good,12,1708,west_welmwood,1708
-1,1,725,great,7,2672,east_elmwood,2672
-3,2,831,poor,27,4424,northwest,4362.064
-2,1,835,good,1,3378,west_welmwood,3378
-0,1,233,good,11,1506,west_welmwood,1506
-2,1,830,great,7,3879,east_elmwood,3879
-1,1,645,good,13,1976,west_welmwood,1976
-3,2,831,poor,36,4354,northwest,4214.672
-1,1,478,good,1,2009,west_welmwood,2009
-2,1,790,good,11,3300,west_welmwood,3300
-0,1,472,poor,27,1650,northwest,1626.9
-3,2,1200,good,11,4941,west_welmwood,4941
-2,1,805,poor,48,2897,northwest,2734.768
-1,1,561,good,6,1990,west_welmwood,1990
-2,1,857,good,8,3357,west_welmwood,3357
-0,1,126,poor,26,1275,northwest,1259.7
-0,1,412,great,9,2197,east_elmwood,2197
-2,1,911,poor,43,3085,northwest,2943.09
-0,1,398,good,10,1740,west_welmwood,1740
-0,1,459,poor,18,1671,northwest,1671
-1,1,407,good,1,1858,west_welmwood,1858
-3,2,928,good,6,4700,west_welmwood,4700
-0,1,232,good,3,1585,west_welmwood,1585
-2,1,703,good,0,3336,west_welmwood,3336
-1,1,628,great,11,2469,east_elmwood,2469
-1,1,643,poor,59,1602,northwest,1477.044
-1,1,359,poor,48,1380,northwest,1302.72
-2,1,755,good,8,3278,west_welmwood,3278
-2,1,613,good,5,3121,west_welmwood,3121
-3,2,1009,good,1,4829,west_welmwood,4829
-0,1,306,great,13,2099,east_elmwood,2099
-3,2,828,poor,57,4157,northwest,3849.382
-3,2,1245,poor,26,4824,northwest,4766.112
-3,2,859,good,9,4654,west_welmwood,4654
-3,2,1196,great,5,5537,east_elmwood,5537
-2,1,698,good,3,3229,west_welmwood,3229
-0,1,162,good,1,1533,west_welmwood,1533
-0,1,417,great,8,2254,east_elmwood,2254
-0,1,247,good,10,1578,west_welmwood,1578
-3,2,959,good,0,4847,west_welmwood,4847
-1,1,740,good,9,2101,west_welmwood,2101
-0,1,493,good,5,1802,west_welmwood,1802
-3,2,878,good,11,4572,west_welmwood,4572
-1,1,612,good,11,2031,west_welmwood,2031
-2,1,805,poor,40,2978,northwest,2858.88
-0,1,139,good,11,1445,west_welmwood,1445
-2,1,875,poor,26,3194,northwest,3155.672
-2,1,658,good,11,3156,west_welmwood,3156
-1,1,603,good,12,1962,west_welmwood,1962
-0,1,394,poor,61,1148,northwest,1053.864
-2,1,648,great,8,3707,east_elmwood,3707
-3,2,1038,great,11,5299,east_elmwood,5299
-0,1,249,good,8,1528,west_welmwood,1528
-1,1,362,poor,25,1654,northwest,1637.46
-2,1,622,good,0,3257,west_welmwood,3257
-2,1,724,good,13,3208,west_welmwood,3208
-0,1,186,poor,57,1011,northwest,936.186
-2,1,599,poor,22,2933,northwest,2921.268
-3,2,1248,good,10,4948,west_welmwood,4948
-0,1,272,good,9,1631,west_welmwood,1631
-2,1,772,great,9,3796,east_elmwood,3796
-1,1,427,good,0,1917,west_welmwood,1917
-3,2,1189,poor,49,4509,northwest,4247.478
-2,1,590,poor,59,2598,northwest,2395.356
-3,2,925,good,2,4795,west_welmwood,4795
-3,2,1074,poor,43,4525,northwest,4316.85
-2,1,559,good,1,3103,west_welmwood,3103
-3,2,931,poor,34,4416,northwest,4292.352
-1,1,584,poor,51,1585,northwest,1486.73
-3,2,879,good,3,4719,west_welmwood,4719
-1,1,417,poor,58,1377,northwest,1272.348
-2,1,800,poor,18,3177,northwest,3177
-2,1,507,poor,25,2849,northwest,2820.51
-1,1,351,good,14,1729,west_welmwood,1729
-3,2,1062,poor,28,4679,northwest,4604.136
-3,2,937,good,14,4598,west_welmwood,4598
-2,1,921,poor,23,3296,northwest,3276.224
-2,1,942,good,7,3441,west_welmwood,3441
-1,1,701,great,2,2699,east_elmwood,2699
-2,1,922,good,1,3538,west_welmwood,3538
-3,2,826,good,3,4688,west_welmwood,4688
-3,2,1147,good,0,5021,west_welmwood,5021
-1,1,602,poor,63,1431,northwest,1307.934
-2,1,814,poor,32,3072,northwest,2998.272
-2,1,875,good,12,3316,west_welmwood,3316
-1,1,365,poor,47,1373,northwest,1298.858
-0,1,263,good,9,1580,west_welmwood,1580
-0,1,371,poor,40,1354,northwest,1299.84
-2,1,877,poor,27,3169,northwest,3124.634
-2,1,606,good,12,3071,west_welmwood,3071
-0,1,316,great,12,2103,east_elmwood,2103
-0,1,487,good,10,1813,west_welmwood,1813
-3,2,1169,poor,26,4764,northwest,4706.832
-3,2,1233,good,6,5054,west_welmwood,5054
-3,2,1138,great,3,5414,east_elmwood,5414
-3,2,976,great,11,5255,east_elmwood,5255
-0,1,364,good,10,1615,west_welmwood,1615
-2,1,837,poor,30,3098,northwest,3036.04
-3,2,992,poor,39,4469,northwest,4299.178
-3,2,1151,great,13,5398,east_elmwood,5398
-0,1,528,good,12,1764,west_welmwood,1764
-1,1,453,good,12,1853,west_welmwood,1853
-2,1,578,good,14,3081,west_welmwood,3081
-2,1,910,great,11,3917,east_elmwood,3917
-3,2,1073,good,8,4819,west_welmwood,4819
-1,1,736,great,10,2649,east_elmwood,2649
-2,1,780,good,3,3329,west_welmwood,3329
-0,1,360,great,3,2266,east_elmwood,2266
-0,1,480,poor,21,1694,northwest,1690.612
-2,1,878,poor,59,2889,northwest,2663.658
-0,1,113,great,10,1927,east_elmwood,1927
-3,2,1161,great,3,5449,east_elmwood,5449
-3,2,937,poor,25,4565,northwest,4519.35
-1,1,581,good,10,1986,west_welmwood,1986
-3,2,1204,good,2,5041,west_welmwood,5041
-3,2,947,poor,35,4427,northwest,4294.19
-2,1,536,good,0,3093,west_welmwood,3093
-0,1,385,good,2,1778,west_welmwood,1778
-1,1,533,poor,36,1672,northwest,1618.496
-0,1,115,good,10,1402,west_welmwood,1402
-1,1,416,great,0,2413,east_elmwood,2413
-2,1,566,good,14,3041,west_welmwood,3041
-3,2,979,great,6,5257,east_elmwood,5257
-3,2,1152,good,4,4987,west_welmwood,4987
-1,1,384,good,11,1760,west_welmwood,1760
-2,1,541,good,11,3063,west_welmwood,3063
-2,1,799,poor,18,3174,northwest,3174
-0,1,535,poor,22,1701,northwest,1694.196
-0,1,210,good,3,1602,west_welmwood,1602
-3,2,912,poor,51,4207,northwest,3946.166
-3,2,1221,poor,40,4713,northwest,4524.48
-3,2,1091,good,1,4978,west_welmwood,4978
-3,2,1207,good,5,5055,west_welmwood,5055
-0,1,372,poor,31,1499,northwest,1466.022
-1,1,537,poor,33,1754,northwest,1708.396
-3,2,806,great,9,5111,east_elmwood,5111
-3,2,1062,great,12,5306,east_elmwood,5306
-1,1,695,good,8,2111,west_welmwood,2111
-0,1,531,great,4,2403,east_elmwood,2403
-1,1,693,poor,31,1909,northwest,1867.002
-1,1,732,good,3,2206,west_welmwood,2206
-1,1,664,poor,43,1744,northwest,1663.776
-3,2,998,great,5,5277,east_elmwood,5277
-2,1,657,poor,62,2598,northwest,2379.768
-3,2,1248,poor,40,4677,northwest,4489.92
-3,2,1112,great,13,5334,east_elmwood,5334
-1,1,451,good,8,1899,west_welmwood,1899
-2,1,562,good,2,3189,west_welmwood,3189
-1,1,671,great,5,2596,east_elmwood,2596
-3,2,1103,good,10,4852,west_welmwood,4852
-0,1,398,poor,62,1182,northwest,1082.712
-1,1,474,poor,16,1794,northwest,1794
-1,1,622,great,5,2566,east_elmwood,2566
-2,1,543,good,13,2991,west_welmwood,2991
-0,1,540,poor,15,1812,northwest,1812
-2,1,671,poor,61,2631,northwest,2415.258
-3,2,1171,poor,57,4437,northwest,4108.662
-0,1,316,great,4,2156,east_elmwood,2156
-3,2,1026,poor,48,4374,northwest,4129.056
-1,1,407,good,3,1898,west_welmwood,1898
-0,1,227,good,14,1462,west_welmwood,1462
-1,1,429,good,8,1866,west_welmwood,1866
-1,1,707,great,0,2752,east_elmwood,2752
-0,1,533,great,10,2371,east_elmwood,2371
-3,2,1106,poor,46,4483,northwest,4249.884
-0,1,414,great,5,2298,east_elmwood,2298
-2,1,745,poor,55,2828,northwest,2630.04
-0,1,334,good,5,1706,west_welmwood,1706
-3,2,1102,poor,32,4675,northwest,4562.8
-0,1,309,poor,43,1241,northwest,1183.914
-0,1,428,good,0,1851,west_welmwood,1851
-2,1,655,great,7,3697,east_elmwood,3697
-2,1,826,poor,17,3240,northwest,3240
-3,2,832,poor,63,4013,northwest,3667.882
-0,1,227,great,12,1977,east_elmwood,1977
-0,1,351,great,4,2251,east_elmwood,2251
-3,2,844,poor,57,4131,northwest,3825.306
-2,1,688,poor,41,2883,northwest,2761.914
-2,1,523,good,14,2998,west_welmwood,2998
-0,1,546,poor,30,1620,northwest,1587.6
-0,1,428,good,1,1865,west_welmwood,1865
-1,1,642,poor,49,1622,northwest,1527.924
-0,1,156,good,4,1474,west_welmwood,1474
-2,1,578,poor,30,2845,northwest,2788.1
-3,2,940,great,12,5177,east_elmwood,5177
-2,1,748,great,3,3813,east_elmwood,3813
-2,1,699,poor,32,2936,northwest,2865.536
-0,1,349,poor,50,1230,northwest,1156.2
-2,1,849,good,5,3390,west_welmwood,3390
-1,1,374,good,5,1850,west_welmwood,1850
-2,1,611,good,14,3061,west_welmwood,3061
-1,1,497,poor,52,1511,northwest,1414.296
-0,1,356,good,3,1703,west_welmwood,1703
-0,1,308,great,11,2137,east_elmwood,2137
-3,2,983,good,3,4837,west_welmwood,4837
-2,1,549,poor,51,2612,northwest,2450.056
-3,2,871,good,5,4711,west_welmwood,4711
-2,1,788,good,10,3313,west_welmwood,3313
-1,1,465,good,6,1881,west_welmwood,1881
-0,1,259,great,0,2137,east_elmwood,2137
-0,1,312,good,7,1612,west_welmwood,1612
-2,1,691,great,5,3696,east_elmwood,3696
-0,1,471,great,5,2353,east_elmwood,2353
-2,1,869,great,8,3876,east_elmwood,3876
-0,1,387,poor,56,1247,northwest,1157.216
-2,1,683,poor,38,2930,northwest,2824.52
-0,1,440,poor,29,1533,northwest,1505.406
-3,2,827,poor,48,4191,northwest,3956.304
-0,1,157,good,3,1487,west_welmwood,1487
-3,2,937,poor,64,4110,northwest,3748.32
-3,2,1037,poor,16,4721,northwest,4721
-2,1,918,poor,36,3175,northwest,3073.4
-2,1,543,good,0,3101,west_welmwood,3101
-1,1,663,good,1,2133,west_welmwood,2133
-1,1,529,good,0,2056,west_welmwood,2056
-0,1,446,great,5,2250,east_elmwood,2250
-3,2,972,good,12,4670,west_welmwood,4670
-2,1,509,great,1,3624,east_elmwood,3624
-0,1,382,poor,22,1597,northwest,1590.612
-3,2,1114,good,6,4892,west_welmwood,4892
-1,1,444,good,1,1948,west_welmwood,1948
-3,2,1019,great,12,5206,east_elmwood,5206
-2,1,936,great,12,3913,east_elmwood,3913
-0,1,319,poor,42,1261,northwest,1205.516
-0,1,352,good,9,1619,west_welmwood,1619
-3,2,1042,good,0,4915,west_welmwood,4915
-2,1,758,good,6,3271,west_welmwood,3271
-3,2,1156,good,4,5003,west_welmwood,5003
-3,2,849,good,1,4734,west_welmwood,4734
-1,1,350,great,10,2257,east_elmwood,2257
-2,1,793,good,6,3298,west_welmwood,3298
-1,1,650,good,14,2034,west_welmwood,2034
-1,1,526,great,8,2423,east_elmwood,2423
-3,2,857,good,2,4709,west_welmwood,4709
-1,1,520,good,5,1945,west_welmwood,1945
-1,1,508,great,6,2417,east_elmwood,2417
-3,2,1158,poor,53,4455,northwest,4160.97
-2,1,835,good,5,3383,west_welmwood,3383
-2,1,851,good,10,3310,west_welmwood,3310
-2,1,784,good,5,3287,west_welmwood,3287
-3,2,852,good,13,4550,west_welmwood,4550
-1,1,508,great,5,2484,east_elmwood,2484
-2,1,527,good,11,2990,west_welmwood,2990
-2,1,715,poor,64,2685,northwest,2448.72
-0,1,134,good,7,1513,west_welmwood,1513
-0,1,541,poor,22,1764,northwest,1756.944
-0,1,501,poor,41,1444,northwest,1383.352
-1,1,622,poor,56,1538,northwest,1427.264
-0,1,401,poor,40,1373,northwest,1318.08
-3,2,1167,poor,50,4535,northwest,4262.9
-0,1,196,poor,45,1133,northwest,1076.35
-3,2,916,good,7,4706,west_welmwood,4706
-1,1,536,good,2,1978,west_welmwood,1978
-0,1,174,good,12,1424,west_welmwood,1424
-3,2,1113,good,7,4869,west_welmwood,4869
-1,1,377,poor,62,1266,northwest,1159.656
-0,1,270,great,2,2134,east_elmwood,2134
-3,2,879,good,5,4670,west_welmwood,4670
-3,2,875,poor,22,4490,northwest,4472.04
-1,1,675,good,2,2189,west_welmwood,2189
-3,2,907,poor,25,4498,northwest,4453.02
-0,1,300,poor,35,1392,northwest,1350.24
-2,1,939,great,11,3895,east_elmwood,3895
-1,1,521,good,1,2004,west_welmwood,2004
-1,1,359,poor,45,1430,northwest,1358.5
-3,2,1246,good,7,4980,west_welmwood,4980
-2,1,530,good,1,3138,west_welmwood,3138
-2,1,501,good,11,2952,west_welmwood,2952
-0,1,307,poor,51,1243,northwest,1165.934
-3,2,1026,good,3,4834,west_welmwood,4834
-2,1,869,good,10,3406,west_welmwood,3406
-0,1,108,poor,45,1025,northwest,973.75
-3,2,1010,good,9,4813,west_welmwood,4813
-0,1,197,good,2,1622,west_welmwood,1622
-0,1,282,poor,38,1311,northwest,1263.804
-3,2,1096,poor,26,4685,northwest,4628.78
-0,1,537,good,12,1817,west_welmwood,1817
-2,1,939,poor,39,3118,northwest,2999.516
-0,1,503,great,7,2313,east_elmwood,2313
-2,1,914,good,12,3432,west_welmwood,3432
-2,1,799,poor,49,2874,northwest,2707.308
-1,1,594,poor,16,1886,northwest,1886
-2,1,911,good,5,3478,west_welmwood,3478
-0,1,286,good,10,1543,west_welmwood,1543
-0,1,433,good,14,1685,west_welmwood,1685
-1,1,392,good,4,1842,west_welmwood,1842
-0,1,222,good,4,1575,west_welmwood,1575
-2,1,939,poor,36,3162,northwest,3060.816
-2,1,507,good,2,3070,west_welmwood,3070
-1,1,410,poor,18,1736,northwest,1736
-0,1,434,poor,21,1636,northwest,1632.728
-1,1,385,good,5,1883,west_welmwood,1883
-2,1,821,poor,46,2942,northwest,2789.016
-1,1,659,good,5,2136,west_welmwood,2136
-3,2,1167,good,7,4938,west_welmwood,4938
-2,1,836,good,9,3335,west_welmwood,3335
-3,2,1236,poor,57,4563,northwest,4225.338
-0,1,456,poor,33,1564,northwest,1523.336
-0,1,421,poor,44,1365,northwest,1299.48
-0,1,461,poor,26,1562,northwest,1543.256
-3,2,1060,good,0,4875,west_welmwood,4875
-0,1,138,good,2,1491,west_welmwood,1491
-1,1,480,good,1,1924,west_welmwood,1924
-0,1,341,poor,17,1559,northwest,1559
-2,1,570,great,1,3670,east_elmwood,3670
-2,1,580,poor,53,2629,northwest,2455.486
-1,1,520,poor,29,1764,northwest,1732.248
-2,1,686,poor,20,3118,northwest,3118
-3,2,1183,great,3,5521,east_elmwood,5521
-0,1,380,good,2,1800,west_welmwood,1800
-2,1,727,poor,23,3098,northwest,3079.412
-3,2,1097,good,0,4910,west_welmwood,4910
-2,1,703,good,5,3255,west_welmwood,3255
-3,2,1046,great,14,5214,east_elmwood,5214
-0,1,336,great,8,2195,east_elmwood,2195
-2,1,500,poor,33,2778,northwest,2705.772
-1,1,578,poor,16,1906,northwest,1906
-1,1,571,great,3,2562,east_elmwood,2562
-2,1,735,good,11,3213,west_welmwood,3213
-0,1,384,poor,35,1394,northwest,1352.18
-0,1,200,great,6,1996,east_elmwood,1996
-2,1,612,good,9,3169,west_welmwood,3169
-1,1,619,good,2,2126,west_welmwood,2126
-2,1,905,poor,24,3282,northwest,3255.744
-0,1,307,good,3,1679,west_welmwood,1679
-3,2,962,good,1,4824,west_welmwood,4824
-1,1,653,good,4,2089,west_welmwood,2089
-2,1,638,good,3,3219,west_welmwood,3219
-2,1,666,good,7,3237,west_welmwood,3237
-0,1,235,poor,40,1186,northwest,1138.56
-0,1,487,poor,39,1483,northwest,1426.646
-3,2,919,great,1,5254,east_elmwood,5254
-0,1,504,great,3,2369,east_elmwood,2369
-0,1,532,great,2,2409,east_elmwood,2409
-0,1,216,good,6,1547,west_welmwood,1547
-2,1,848,poor,44,3015,northwest,2870.28
-3,2,1222,poor,15,4924,northwest,4924
-3,2,1159,poor,56,4404,northwest,4086.912
-2,1,712,great,14,3632,east_elmwood,3632
-0,1,411,poor,44,1322,northwest,1258.544
-3,2,1147,poor,19,4809,northwest,4809
-2,1,652,poor,57,2728,northwest,2526.128
-1,1,504,great,13,2416,east_elmwood,2416
-1,1,619,poor,20,1872,northwest,1872
-1,1,728,good,1,2254,west_welmwood,2254
-2,1,878,poor,47,3018,northwest,2855.028
-1,1,680,good,5,2133,west_welmwood,2133
-1,1,719,poor,41,1822,northwest,1745.476
-2,1,923,good,2,3458,west_welmwood,3458
-1,1,424,poor,41,1513,northwest,1449.454
-1,1,397,poor,26,1640,northwest,1620.32
-0,1,131,poor,37,1111,northwest,1073.226
-1,1,461,good,6,1863,west_welmwood,1863
-2,1,835,poor,38,3083,northwest,2972.012
-0,1,320,poor,36,1311,northwest,1269.048
-0,1,415,good,4,1814,west_welmwood,1814
-3,2,1034,good,13,4729,west_welmwood,4729
-2,1,786,poor,28,3144,northwest,3093.696
-3,2,807,poor,59,4091,northwest,3771.902
-2,1,724,good,14,3149,west_welmwood,3149
-2,1,512,good,6,3098,west_welmwood,3098
-2,1,753,great,12,3737,east_elmwood,3737
-1,1,521,good,5,1944,west_welmwood,1944
-0,1,147,good,13,1369,west_welmwood,1369
-1,1,627,poor,55,1610,northwest,1497.3
-0,1,174,good,8,1495,west_welmwood,1495
-1,1,590,good,9,1972,west_welmwood,1972
-1,1,655,good,1,2138,west_welmwood,2138
-0,1,506,poor,26,1601,northwest,1581.788
-3,2,1084,poor,16,4724,northwest,4724
\ No newline at end of file
diff --git a/tests/integration_tests/api/test_http.py b/tests/integration_tests/api/test_http.py
new file mode 100755
--- /dev/null
+++ b/tests/integration_tests/api/test_http.py
@@ -0,0 +1,118 @@
+from subprocess import Popen
+import time
+import os
+import signal
+from random import randint
+
+import unittest
+import requests
+import runpy
+
+
+rand = randint(0,pow(10,12))
+ds_name = f'hr_ds_{rand}'
+pred_name = f'hr_predictor_{rand}'
+root = 'http://localhost:47334'
+
+class HTTPTest(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ cls.sp = Popen(['python3', '-m', 'mindsdb', '--api', 'http'], close_fds=True)
+
+ for i in range(20):
+ try:
+ res = requests.get(f'{root}/util/ping')
+ if res.status_code != 200:
+ raise Exception('')
+ except:
+ time.sleep(1)
+ if i == 19:
+ raise Exception("Can't connect !")
+
+ @classmethod
+ def tearDownClass(cls):
+ try:
+ cls.sp.kill()
+ except:
+ pass
+
+ def test_1_put_ds(self):
+ # PUT datasource
+ params = {
+ 'name': ds_name,
+ 'source_type': 'url',
+ 'source': 'https://raw.githubusercontent.com/mindsdb/mindsdb-examples/master/benchmarks/home_rentals/dataset/train.csv'
+ }
+ url = f'{root}/datasources/{ds_name}'
+ res = requests.put(url, json=params)
+ assert res.status_code == 200
+
+ def test_2_analyze(self):
+ response = requests.get(f'{root}/datasources/{ds_name}/analyze')
+ assert response.status_code == 200
+
+ def test_3_put_predictor(self):
+ # PUT predictor
+ params = {
+ 'data_source_name': ds_name,
+ 'to_predict': 'rental_price',
+ 'kwargs': {
+ 'stop_training_in_x_seconds': 5,
+ 'join_learn_process': True
+ }
+ }
+ url = f'{root}/predictors/{pred_name}'
+ res = requests.put(url, json=params)
+ assert res.status_code == 200
+
+ # POST predictions
+ params = {
+ 'when': {'sqft':500}
+ }
+ url = f'{root}/predictors/{pred_name}/predict'
+ res = requests.post(url, json=params)
+ assert isinstance(res.json()[0]['rental_price']['predicted_value'],float)
+ assert res.status_code == 200
+
+ def test_3_datasources(self):
+ """
+ Call list datasources endpoint
+ THEN check the response is success
+ """
+ response = requests.get(f'{root}/datasources/')
+ assert response.status_code == 200
+
+ def test_4_datasource_not_found(self):
+ """
+ Call unexisting datasource
+ then check the response is NOT FOUND
+ """
+ response = requests.get(f'{root}/datasource/dummy_source')
+ assert response.status_code == 404
+
+ def test_5_ping(self):
+ """
+ Call utilities ping endpoint
+ THEN check the response is success
+ """
+ response = requests.get(f'{root}/util/ping')
+ assert response.status_code == 200
+
+ def test_6_predictors(self):
+ """
+ Call list predictors endpoint
+ THEN check the response is success
+ """
+ response = requests.get(f'{root}/predictors/')
+ assert response.status_code == 200
+
+ def test_6_predictor_not_found(self):
+ """
+ Call unexisting predictor
+ then check the response is NOT FOUND
+ """
+ response = requests.get(f'{root}/predictors/dummy_predictor')
+ assert response.status_code == 404
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/tests/integration_tests/common.py b/tests/integration_tests/common.py
new file mode 100755
--- /dev/null
+++ b/tests/integration_tests/common.py
@@ -0,0 +1,46 @@
+import psutil
+import time
+import pathlib
+import os
+import json
+
+def is_port_in_use(port_num):
+ portsinuse = []
+ conns = psutil.net_connections()
+ portsinuse = [x.laddr[1] for x in conns if x.status == 'LISTEN']
+ portsinuse.sort()
+ return int(port_num) in portsinuse
+
+def wait_port(port_num, timeout):
+ start_time = time.time()
+
+ in_use = is_port_in_use(port_num)
+ while in_use is False and (time.time() - start_time) < timeout:
+ time.sleep(2)
+ in_use = is_port_in_use(port_num)
+
+ return in_use
+
+def prepare_config(config):
+ for key in config._config['integrations'].keys():
+ config._config['integrations'][key]['enabled'] = key == 'default_mariadb'
+
+ TEMP_DIR = pathlib.Path(__file__).parent.absolute().joinpath('../temp/').resolve()
+ TEMP_DIR.mkdir(parents=True, exist_ok=True)
+
+ config.merge({
+ 'interface': {
+ 'datastore': {
+ 'storage_dir': str(TEMP_DIR.joinpath('datastore/'))
+ },
+ 'mindsdb_native': {
+ 'storage_dir': str(TEMP_DIR.joinpath('predictors/'))
+ }
+ }
+ })
+
+ temp_config_path = str(TEMP_DIR.joinpath('config.json').resolve())
+ with open(temp_config_path, 'wt') as f:
+ f.write(json.dumps(config._config))
+
+ return temp_config_path
diff --git a/tests/integration_tests/flows/_test_mariadb.py b/tests/integration_tests/flows/_test_mariadb.py
new file mode 100755
--- /dev/null
+++ b/tests/integration_tests/flows/_test_mariadb.py
@@ -0,0 +1,361 @@
+import unittest
+import requests
+import os
+import csv
+import time
+import inspect
+import subprocess
+import pathlib
+import atexit
+import json
+
+import mysql.connector
+
+from mindsdb.interfaces.native.mindsdb import MindsdbNative
+from mindsdb.utilities.config import Config
+from mindsdb.interfaces.database.database import DatabaseWrapper
+
+from common import wait_port, prepare_config
+
+TEST_CONFIG = '/path_to/config.json'
+
+START_TIMEOUT = 15
+
+TEST_CSV = {
+ 'name': 'home_rentals.csv',
+ 'url': 'https://s3.eu-west-2.amazonaws.com/mindsdb-example-data/home_rentals.csv'
+}
+TEST_DATA_TABLE = 'home_rentals'
+TEST_PREDICTOR_NAME = 'test_predictor'
+
+config = Config(TEST_CONFIG)
+
+def query(q, as_dict=False):
+ con = mysql.connector.connect(
+ host=config['integrations']['default_mariadb']['host'],
+ port=config['integrations']['default_mariadb']['port'],
+ user=config['integrations']['default_mariadb']['user'],
+ passwd=config['integrations']['default_mariadb']['password'],
+ db='mindsdb'
+ )
+
+ cur = con.cursor(dictionary=as_dict)
+ cur.execute(q)
+ res = True
+ try:
+ res = cur.fetchall()
+ except:
+ pass
+ con.commit()
+ con.close()
+ return res
+
+def create_churn_dataset(self):
+ for mode in ['train','test']:
+ os.system(f'rm {test_csv}')
+ cls.mdb = MindsdbNative(config)
+
+ if os.path.isfile(test_csv) is False:
+ r = requests.get(f"https://raw.githubusercontent.com/mindsdb/mindsdb-examples/master/benchmarks/churn/dataset/{mode}.csv")
+ with open(test_csv, 'wb') as f:
+ f.write(r.content)
+
+ models = cls.mdb.get_models()
+ models = [x['name'] for x in models]
+ if TEST_PREDICTOR_NAME in models:
+ cls.mdb.delete_model(TEST_PREDICTOR_NAME)
+
+ query('create database if not exists test')
+ test_tables = query('show tables from test')
+ test_tables = [x[0] for x in test_tables]
+ if TEST_DATA_TABLE not in test_tables:
+ query(f'DROP TABLE IF EXISTS data.{TEST_DATA_TABLE}_{mode}')
+ query(f'''
+ CREATE TABLE data.{TEST_DATA_TABLE}_{mode} (
+ CreditScore int,
+ Geography varchar(300),
+ Gender varchar(300),
+ Age int,
+ Tenure int,
+ Balance float,
+ NumOfProducts int,
+ HasCrCard int,
+ IsActiveMember int,
+ EstimatedSalary float,
+ Exited int
+ )
+ ''')
+ with open(test_csv) as f:
+ csvf = csv.reader(f)
+ i = 0
+ for row in csvf:
+ if i > 0:
+ CreditScore = int(row[0])
+ Geography = str(row[1])
+ Gender = str(row[2])
+ Age = int(row[3])
+ Tenure = int(row[4])
+ Balance = float(row[5])
+ NumOfProducts = int(row[6])
+ HasCrCard = int(row[7])
+ IsActiveMember = int(row[8])
+ EstimatedSalary = float(row[9])
+ Exited = int(row[10])
+
+ query(f'''INSERT INTO data.{TEST_DATA_TABLE}_{mode} VALUES (
+ {CreditScore},
+ '{Geography}',
+ '{Gender}',
+ {Age},
+ {Tenure},
+ {Balance},
+ {NumOfProducts},
+ {HasCrCard},
+ {IsActiveMember},
+ {EstimatedSalary},
+ {Exited}
+ )''')
+ i += 1
+ os.system(f'rm {test_csv}')
+
+class MariaDBTest(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ cls.mdb = MindsdbNative(config)
+
+ models = cls.mdb.get_models()
+ models = [x['name'] for x in models]
+ if TEST_PREDICTOR_NAME in models:
+ cls.mdb.delete_model(TEST_PREDICTOR_NAME)
+
+ query('create database if not exists test')
+ test_tables = query('show tables from test')
+ test_tables = [x[0] for x in test_tables]
+ if TEST_DATA_TABLE not in test_tables:
+ print('creating test data table...')
+ query(f'''
+ CREATE TABLE test.{TEST_DATA_TABLE} (
+ number_of_rooms int,
+ number_of_bathrooms int,
+ sqft int,
+ location varchar(100),
+ days_on_market int,
+ initial_price int,
+ neighborhood varchar(100),
+ rental_price int
+ )
+ ''')
+
+ test_csv_path = str(pathlib.Path(__file__).parent.absolute().joinpath('../temp/', TEST_CSV['name']).resolve())
+ if os.path.isfile(test_csv_path) is False:
+ r = requests.get(TEST_CSV['url'])
+ with open(test_csv_path, 'wb') as f:
+ f.write(r.content)
+ with open(test_csv_path) as f:
+ csvf = csv.reader(f)
+ i = 0
+ for row in csvf:
+ if i > 0:
+ number_of_rooms = int(row[0])
+ number_of_bathrooms = int(row[1])
+ sqft = int(float(row[2].replace(',','.')))
+ location = str(row[3])
+ days_on_market = int(row[4])
+ initial_price = int(row[5])
+ neighborhood = str(row[6])
+ rental_price = int(float(row[7]))
+ query(f'''INSERT INTO test.{TEST_DATA_TABLE} VALUES (
+ {number_of_rooms},
+ {number_of_bathrooms},
+ {sqft},
+ '{location}',
+ {days_on_market},
+ {initial_price},
+ '{neighborhood}',
+ {rental_price}
+ )''')
+ i += 1
+ print('done')
+
+ def test_1_initial_state(self):
+ print(f'\nExecuting {inspect.stack()[0].function}')
+ print('Check all testing objects not exists')
+
+ print(f'Predictor {TEST_PREDICTOR_NAME} not exists')
+ models = [x['name'] for x in self.mdb.get_models()]
+ self.assertTrue(TEST_PREDICTOR_NAME not in models)
+
+ print(f'Test datasource exists')
+ test_tables = query('show tables from test')
+ test_tables = [x[0] for x in test_tables]
+ self.assertTrue(TEST_DATA_TABLE in test_tables)
+
+ print(f'Test predictor table not exists')
+ mindsdb_tables = query('show tables from mindsdb')
+ mindsdb_tables = [x[0] for x in mindsdb_tables]
+ self.assertTrue(TEST_PREDICTOR_NAME not in mindsdb_tables)
+
+ print(f'mindsdb.predictors table exists')
+ self.assertTrue('predictors' in mindsdb_tables)
+
+ print(f'mindsdb.commands table exists')
+ self.assertTrue('commands' in mindsdb_tables)
+
+
+ def test_2_insert_predictor(self):
+ print(f'\nExecuting {inspect.stack()[0].function}')
+ query(f"""
+ insert into mindsdb.predictors (name, predict_cols, select_data_query, training_options) values
+ (
+ '{TEST_PREDICTOR_NAME}',
+ 'rental_price, location',
+ 'select * from test.{TEST_DATA_TABLE} limit 100',
+ '{{"join_learn_process": true, "stop_training_in_x_seconds": 3}}'
+ );
+ """)
+
+ print(f'predictor record in mindsdb.predictors')
+ res = query(f"select status from mindsdb.predictors where name = '{TEST_PREDICTOR_NAME}'", as_dict=True)
+ self.assertTrue(len(res) == 1)
+ self.assertTrue(res[0]['status'] == 'complete')
+
+ print(f'predictor table in mindsdb db')
+ mindsdb_tables = query('show tables from mindsdb')
+ mindsdb_tables = [x[0] for x in mindsdb_tables]
+ self.assertTrue(TEST_PREDICTOR_NAME in mindsdb_tables)
+
+ def test_3_query_predictor(self):
+ print(f'\nExecuting {inspect.stack()[0].function}')
+ res = query(f"""
+ select
+ rental_price, location, sqft, number_of_rooms,
+ rental_price_confidence, rental_price_min, rental_price_max, rental_price_explain
+ from
+ mindsdb.{TEST_PREDICTOR_NAME} where sqft=1000;
+ """, as_dict=True)
+
+ print('check result')
+ self.assertTrue(len(res) == 1)
+
+ res = res[0]
+
+ self.assertTrue(res['rental_price'] is not None and res['rental_price'] != 'None')
+ self.assertTrue(res['location'] is not None and res['location'] != 'None')
+ self.assertTrue(res['sqft'] == 1000)
+ self.assertIsInstance(res['rental_price_confidence'], float)
+ self.assertIsInstance(res['rental_price_min'], float)
+ self.assertIsInstance(res['rental_price_max'], float)
+ self.assertIsInstance(res['rental_price_explain'], str)
+ self.assertTrue(res['number_of_rooms'] == 'None' or res['number_of_rooms'] is None)
+
+ def test_4_range_query(self):
+ print(f'\nExecuting {inspect.stack()[0].function}')
+
+ results = query(f"""
+ select
+ rental_price, location, sqft, number_of_rooms,
+ rental_price_confidence, rental_price_min, rental_price_max, rental_price_explain
+ from
+ mindsdb.{TEST_PREDICTOR_NAME} where select_data_query='select * from test.{TEST_DATA_TABLE} limit 3';
+ """, as_dict=True)
+
+ print('check result')
+ self.assertTrue(len(results) == 3)
+ for res in results:
+ self.assertTrue(res['rental_price'] is not None and res['rental_price'] != 'None')
+ self.assertTrue(res['location'] is not None and res['location'] != 'None')
+ self.assertIsInstance(res['rental_price_confidence'], float)
+ self.assertIsInstance(res['rental_price_min'], float)
+ self.assertIsInstance(res['rental_price_max'], float)
+ self.assertIsInstance(res['rental_price_explain'], str)
+
+ def test_5_delete_predictor_by_command(self):
+ print(f'\nExecuting {inspect.stack()[0].function}')
+
+ query(f"""
+ insert into mindsdb.commands values ('delete predictor {TEST_PREDICTOR_NAME}');
+ """)
+
+ print(f'Predictor {TEST_PREDICTOR_NAME} not exists')
+ models = [x['name'] for x in self.mdb.get_models()]
+ self.assertTrue(TEST_PREDICTOR_NAME not in models)
+
+ print(f'Test predictor table not exists')
+ mindsdb_tables = query('show tables from mindsdb')
+ mindsdb_tables = [x[0] for x in mindsdb_tables]
+ self.assertTrue(TEST_PREDICTOR_NAME not in mindsdb_tables)
+
+ def test_6_insert_predictor_again(self):
+ print(f'\nExecuting {inspect.stack()[0].function}')
+ self.test_2_insert_predictor()
+
+ def test_7_delete_predictor_by_delete_statement(self):
+ print(f'\nExecuting {inspect.stack()[0].function}')
+ query(f"""
+ delete from mindsdb.predictors where name='{TEST_PREDICTOR_NAME}';
+ """)
+
+ print(f'Predictor {TEST_PREDICTOR_NAME} not exists')
+ models = [x['name'] for x in self.mdb.get_models()]
+ self.assertTrue(TEST_PREDICTOR_NAME not in models)
+
+ print(f'Test predictor table not exists')
+ mindsdb_tables = query('show tables from mindsdb')
+ mindsdb_tables = [x[0] for x in mindsdb_tables]
+ self.assertTrue(TEST_PREDICTOR_NAME not in mindsdb_tables)
+
+
+def wait_mysql(timeout):
+ global config
+ m = DatabaseWrapper(config)
+
+ start_time = time.time()
+
+ connected = m.check_connections()['default_mariadb']
+ while not connected and (time.time() - start_time) < timeout:
+ time.sleep(2)
+ connected = m.check_connections()['default_mariadb']
+
+ return connected
+
+def stop_mariadb():
+ maria_sp = subprocess.Popen(
+ ['./cli.sh', 'mariadb-stop'],
+ cwd=pathlib.Path(__file__).parent.absolute().joinpath('../docker/').resolve(),
+ stdout=subprocess.DEVNULL,
+ stderr=subprocess.DEVNULL
+ )
+ maria_sp.wait()
+
+if __name__ == "__main__":
+ temp_config_path = prepare_config(config)
+
+ maria_sp = subprocess.Popen(
+ ['./cli.sh', 'mariadb'],
+ cwd=pathlib.Path(__file__).parent.absolute().joinpath('../docker/').resolve(),
+ stdout=subprocess.DEVNULL,
+ stderr=subprocess.DEVNULL
+ )
+ atexit.register(stop_mariadb)
+ maria_ready = wait_mysql(START_TIMEOUT)
+
+ if maria_ready:
+ sp = subprocess.Popen(
+ ['python3', '-m', 'mindsdb', '--api', 'mysql', '--config', temp_config_path],
+ stdout=subprocess.DEVNULL,
+ stderr=subprocess.DEVNULL
+ )
+ atexit.register(sp.kill)
+ port_num = config['api']['mysql']['port']
+ api_ready = maria_ready and wait_port(port_num, START_TIMEOUT)
+
+ try:
+ if maria_ready is False or api_ready is False:
+ print(f'Failed by timeout. MariaDB started={maria_ready}, MindsDB started={api_ready}')
+ raise Exception()
+ unittest.main(failfast=True)
+ print('Tests passed !')
+ except Exception as e:
+ print('Tests Failed !')
+ print(e)
+ print('done')
diff --git a/tests/integration_tests/flows/config/cert.pem b/tests/integration_tests/flows/config/cert.pem
new file mode 100644
--- /dev/null
+++ b/tests/integration_tests/flows/config/cert.pem
@@ -0,0 +1,47 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEAlsXuxVcQEpLiB5PS96uk/zdJ00u8x1DwuL+/E2ObWaBabK4G
+wP3J8OV1mGgZFemQvz7bt5K8clE/GtW4gvqZuFnFcZ6XyPY9slRNsdChtxD3AqrP
+2Ipi+4WrgN9pOZ/lJXsJv7C16Ezpwz384c22xTR/ALPzg8eFPf00FcMbJ30Vi2Tr
+jWTHx9ZdLRb41cGLBedm3t/Nk9VKuAhDTCywwy7n3mfUAk6o2Ij9+g5zSEXs7ApA
+SvaGblv+mGviWHrjg58bpUD6od+ff2Dxf9qXnhSfuol0DEmfJ1LTykAYZjvsx9E3
+tZLXbewaP4QZgZxJA0eFxWWT+eE/SseCFXFNQQIDAQABAoIBAQCMYtRGTyCaCsYH
+RC8liV7ifDpzUYv/EnBxtahWh2J4+4P2ubt0K1rCwQTT2emxq36pc//lV+27i8b0
+V5er7WcAbiYX+xFdf1pHF5QYQyiwak104FMTTxeHFxlvr5Ysn6MfWQbkw8c8+5CV
+yLwUDBVoXc77epL9WuPHqmG6+aXpB3MEIER+S+MN0S4Gy96cZPKMrLU61ImCHlDD
+HITP1v2D+pzlEftdrT5wqk2nygtcElGBWHlM39m9P7SmQIEI0Yyn5HabCJAt+A6v
+dYk485e9tQNeM+o5FjYAZGGo8KyxMIuRMOdJEM1xx6d+K3SolQKqrNfNDidQG+mx
+N50tHqjZAoGBAMfDb7sMvTUv50yX+32scl7bfB09axtI9AfpgCNlY1gBeXHdWrLy
+E3mRgLetxG0cwIBw+9Rdn96OBb0rG/QxYxBMtWTN4Xmt3MJpJgzNPbKJlLPG38ks
+gBXLCSd5WKI3dTplqwB+mvfa05E2TS90MWCztm5K9o5MaIL6gEGAiVwbAoGBAME3
+3R+GwLiv7fDs7iZxNWbO0vOdM/ATp6pa9O6OU8H6ERSknmhKhFCnqWCtVX9TvwaW
+wNDf/AlkvXMhyyv1mHXy7/8eU27nNNezxIBwRlpqIzP8oHHS0n4y3v9EO7P2QMyj
+W40zKJxy5a8cyQ4Y9LCMpqt8UlvlfUJFLkOS71nTAoGAcstngKkAbfkaRRSti/m2
+O7aLa04pJ6PTwFw5q52O6jrmrD1U6rJb6KdpvaPY1g8IvIaw4Mfwn/cjayLDzJ84
+21wJfWUmZkM4Mmw0VnT3OIsBbhIh9bxh2qQznYCJI6cU8bsM+prGuRoCLFd1IYj2
++ygq9NSVPQ7BnEsRuxJ/XPMCgYBu+jEtjG5hJdtayBs5RUjQ1fZgKmjMed2RgVCx
+PFprTcBFxQ+fAz3gPYyRlL1Itsc/d5Mj5aKTmqMd38kcZKG/DVw/giGVz4KQahZx
+Vwtxnwi9MVrORbLU/U8L+xr3kHdDkohhH4Mx6OR3oebxC/Bu/zozwOJlGfjJ+YZL
+FbgYNQKBgQDAqOMdfvI2+HGFVBIoOG3CoD/FPlMYeNncc9y18WS19UJKqqCPkfJE
+CQuEqGsUBcbk+Nsz3GzOB9HpYj7eIsVsVLfgrl8BTk2xEKQd4rd9H4Bd6NMGI9A5
+1J+eI5q5Hx/D76K10KRGxTPDPSSH3NDPqUoNgyCOFg8s819GVVXPeA==
+-----END RSA PRIVATE KEY-----
+-----BEGIN CERTIFICATE-----
+MIIDSDCCAjCgAwIBAgIBATANBgkqhkiG9w0BAQsFADBdMRQwEgYDVQQDDAttZGJf
+YXV0b2dlbjELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExETAPBgNV
+BAcMCEJlcmtlbGV5MRAwDgYDVQQKDAdNaW5kc0RCMB4XDTIwMDcwODE2MTU1OVoX
+DTMwMDcwNjE2MTU1OVowXTEUMBIGA1UEAwwLbWRiX2F1dG9nZW4xCzAJBgNVBAYT
+AlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMREwDwYDVQQHDAhCZXJrZWxleTEQMA4G
+A1UECgwHTWluZHNEQjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJbF
+7sVXEBKS4geT0verpP83SdNLvMdQ8Li/vxNjm1mgWmyuBsD9yfDldZhoGRXpkL8+
+27eSvHJRPxrVuIL6mbhZxXGel8j2PbJUTbHQobcQ9wKqz9iKYvuFq4DfaTmf5SV7
+Cb+wtehM6cM9/OHNtsU0fwCz84PHhT39NBXDGyd9FYtk641kx8fWXS0W+NXBiwXn
+Zt7fzZPVSrgIQ0wssMMu595n1AJOqNiI/foOc0hF7OwKQEr2hm5b/phr4lh644Of
+G6VA+qHfn39g8X/al54Un7qJdAxJnydS08pAGGY77MfRN7WS123sGj+EGYGcSQNH
+hcVlk/nhP0rHghVxTUECAwEAAaMTMBEwDwYDVR0TBAgwBgEB/wIBADANBgkqhkiG
+9w0BAQsFAAOCAQEABLcjodSFVsPsrWw+SI3oZW6uLUJRBcoAoBijFY7NxKuEmmI+
+o9GImZ2gq05aLEhMpj6Nm05DPM7sgCHSPLGR6YlRWFlYG7uUAY89rYM5z43dPwNY
+Jr1MZ/jSyrrUsIiU4so1119yjXr/tC3BBN5drquf2AWduTmrqUF5RihWYEQ6S/rq
+yhmB9fn3zUTr7uTpzdr0+t6UABBbvRTWIgXtLS/YuXu4xHNnakfnJh56xx0OKQJ2
+62D+eU3acoprusNAtD9KPNdUPEMe7rOSeZIPrgH5gSECsfEB57HPh+bw6KjRVW0e
+W38OOPIRuziW5aR6hWPw8Pk92tzd6yJQ/EK2hQ==
+-----END CERTIFICATE-----
diff --git a/tests/integration_tests/flows/config/config.json b/tests/integration_tests/flows/config/config.json
new file mode 100644
--- /dev/null
+++ b/tests/integration_tests/flows/config/config.json
@@ -0,0 +1,61 @@
+{
+ "api": {
+ "http": {
+ "host": "0.0.0.0",
+ "port": "47334"
+ },
+ "mysql": {
+ "certificate_path": "config/cert.pem",
+ "datasources": [],
+ "host": "127.0.0.1",
+ "log": {
+ "console_level": "INFO",
+ "file": "mysql.log",
+ "file_level": "INFO",
+ "folder": "logs/",
+ "format": "%(asctime)s - %(levelname)s - %(message)s"
+ },
+ "password": "",
+ "port": "47335",
+ "user": "mindsdb"
+ }
+ },
+ "config_version": 1,
+ "debug": true,
+ "integrations": {
+ "default_clickhouse": {
+ "enabled": true,
+ "host": "localhost",
+ "password": "",
+ "port": 8123,
+ "type": "clickhouse",
+ "user": "default"
+ },
+ "default_mariadb": {
+ "enabled": true,
+ "host": "localhost",
+ "password": "",
+ "port": 3306,
+ "type": "mariadb",
+ "user": "root"
+ }
+ },
+ "interface": {
+ "dataskillet": {
+ "enabled": false
+ },
+ "datastore": {
+ "enabled": true,
+ "storage_dir": "/tmp/datastore"
+ },
+ "lightwood": {
+ "enabled": true
+ },
+ "mindsdb_native": {
+ "enabled": true,
+ "storage_dir": "/tmp/predictors"
+ }
+ },
+ "pip_path": null,
+ "python_interpreter": null
+}
\ No newline at end of file
diff --git a/tests/integration_tests/flows/test_clickhouse.py b/tests/integration_tests/flows/test_clickhouse.py
new file mode 100755
--- /dev/null
+++ b/tests/integration_tests/flows/test_clickhouse.py
@@ -0,0 +1,135 @@
+from subprocess import Popen
+import time
+import os
+import signal
+import random
+import string
+import requests
+
+import unittest
+
+from mindsdb.utilities.config import Config
+import mindsdb
+
+rand = ''.join(random.choice(string.ascii_uppercase) for _ in range(11))
+ds_name = f'default.hr_ds_{rand}'
+pred_name = f'hr_predictor_{rand}'
+
+# Can't be a fixture since it's used in setup/teardown
+root = 'http://localhost:47334'
+
+def set_get_config_path():
+ os.environ['DEV_CONFIG_PATH'] = 'config'
+ return os.environ['DEV_CONFIG_PATH'] + '/config.json'
+
+def query_ch(query, database='default'):
+ config = Config(set_get_config_path())
+ add = ' FORMAT JSON'
+ for ele in ['drop ', 'create ','insert ', 'show ']:
+ if ele in query.lower():
+ add = ''
+ query += add
+
+ connect_string = 'http://{}:{}'.format(
+ config['integrations']['default_clickhouse']['host'],
+ config['integrations']['default_clickhouse']['port']
+ )
+
+ params = {'user': config['integrations']['default_clickhouse']['user'], 'password': config['integrations']['default_clickhouse']['password'], 'database': database}
+
+ res = requests.post(
+ connect_string,
+ data=query,
+ params=params
+ )
+
+ if res.status_code != 200:
+ print(f'Error in query: {query}')
+
+ if ' FORMAT JSON' in query:
+ return res.json()['data']
+ else:
+ return res.text
+
+class ClickhouseTest(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ set_get_config_path()
+
+ cls.sp = Popen(['python3', '-m', 'mindsdb'], close_fds=True)
+
+ for i in range(20):
+ try:
+ res = requests.get(f'{root}/util/ping')
+ if res.status_code != 200:
+ raise Exception('')
+ except:
+ time.sleep(1)
+ if i == 19:
+ raise Exception("Can't connect !")
+
+ #query_ch('DROP DATABASE mindsdb')
+ query_ch(f"""
+ CREATE TABLE {ds_name} (number_of_rooms String, number_of_bathrooms String, sqft Int64, location String, days_on_market Int64, initial_price Int64, neighborhood String, rental_price Float64) ENGINE=URL('https://raw.githubusercontent.com/mindsdb/mindsdb-examples/master/benchmarks/home_rentals/dataset/train.csv', CSVWithNames)
+ """)
+
+ @classmethod
+ def tearDownClass(cls):
+ try:
+ pgrp = os.getpgid(cls.sp.pid)
+ os.killpg(pgrp, signal.SIGINT)
+ os.remove(set_get_config_path())
+ os.system('fuser -k 47335/tcp ; fuser -k 47334/tcp')
+ except:
+ pass
+
+ def test_1_setup(self):
+ result = query_ch(f"show tables", 'mindsdb')
+ assert 'predictors' in result
+ assert 'commands' in result
+
+ def test_2_learn(self):
+ q = f"""
+ insert into mindsdb.predictors
+ (name, predict_cols, select_data_query, training_options)
+ values (
+ '{pred_name}',
+ 'rental_price',
+ 'SELECT * FROM {ds_name} LIMIT 400',
+ '{{"stop_training_in_x_seconds": 10}}'
+ )
+ """
+ result = query_ch(q)
+ time.sleep(40)
+ for i in range(40):
+ try:
+ result = query_ch(f"SELECT name FROM mindsdb.predictors where name='{pred_name}'")
+ if result[0]['name'] != pred_name:
+ raise Exception('not ready yet !')
+ except:
+ time.sleep(1)
+ if i == 39:
+ raise Exception("Can't get predictor !")
+
+ result = query_ch(f"show tables", 'mindsdb')
+ assert pred_name in result
+
+ def test_3_predict_from_where(self):
+ result = query_ch(f"SELECT rental_price FROM mindsdb.{pred_name} where sqft=1000 and location='good'")
+ assert len(result) == 1
+ assert 'rental_price' in result[0]
+
+ def test_4_predict_from_query(self):
+ len_ds = query_ch(f'SELECT COUNT(*) as len from {ds_name}')[0]['len']
+ result = query_ch(f""" SELECT rental_price, rental_price_explain, rental_price_confidence, rental_price_max, rental_price_min FROM mindsdb.{pred_name} where select_data_query=='SELECT * FROM {ds_name}' """)
+
+ assert int(len(result)) == int(len_ds)
+ for res in result:
+ assert 'rental_price' in res
+ assert 'rental_price_explain' in res
+ assert 'rental_price_confidence' in res
+ assert 'rental_price_max' in res
+ assert 'rental_price_min' in res
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/tests/integration_tests/test_clickhouse.py b/tests/integration_tests/test_clickhouse.py
deleted file mode 100644
--- a/tests/integration_tests/test_clickhouse.py
+++ /dev/null
@@ -1,191 +0,0 @@
-import unittest
-import requests
-import os
-import csv
-import time
-import inspect
-import subprocess
-
-import MySQLdb
-
-from mindsdb.interfaces.native.mindsdb import MindsdbNative
-from mindsdb.utilities.config import Config
-
-
-TEST_CONFIG = '/home/maxs/dev/mdb/venv/sources/mindsdb/test_config.json'
-
-test_csv = 'tests/home_rentals.csv'
-test_data_table = 'home_rentals_400'
-test_predictor_name = 'test_predictor_400'
-
-config = Config(TEST_CONFIG)
-
-def query_ch(query):
- if 'CREATE ' not in query.upper() and 'INSERT ' not in query.upper():
- query += ' FORMAT JSON'
-
- user = config['integrations']['default_clickhouse']['user']
- password = config['integrations']['default_clickhouse']['password']
-
- connect_string = 'http://{}:{}'.format(
- 'localhost',
- 8123
- )
-
- params = {}
-
- params = {'user': 'default'}
- try:
- params['user'] = config['integrations']['default_clickhouse']['user']
- except:
- pass
-
- try:
- params['password'] = config['integrations']['default_clickhouse']['password']
- except:
- pass
-
- res = requests.post(
- connect_string,
- data=query,
- params=params
- )
-
- if ' FORMAT JSON' in query:
- res = res.json()['data']
-
- return res
-
-class ClickhouseTest(unittest.TestCase):
- @classmethod
- def setUpClass(cls):
- cls.mdb = MindsdbNative(config)
-
- if os.path.isfile(test_csv) is False:
- r = requests.get("https://s3.eu-west-2.amazonaws.com/mindsdb-example-data/home_rentals.csv")
- with open(test_csv, 'wb') as f:
- f.write(r.content)
-
- models = cls.mdb.get_models()
- models = [x['name'] for x in models]
- if test_predictor_name in models:
- cls.mdb.delete_model(test_predictor_name)
-
- query_ch('create database if not exists test')
- test_tables = query_ch('show tables from test')
- test_tables = [x['name'] for x in test_tables]
- if test_data_table not in test_tables:
- query_ch(f'''
- CREATE TABLE test.{test_data_table} (
- number_of_rooms Int8,
- number_of_bathrooms Int8,
- sqft Int32,
- location String,
- days_on_market Int16,
- initial_price Int32,
- neighborhood String,
- rental_price Int32
- ) ENGINE = TinyLog()
- ''')
- with open(test_csv) as f:
- csvf = csv.reader(f)
- i = 0
- for row in csvf:
- if i > 0:
- number_of_rooms = int(row[0])
- number_of_bathrooms = int(row[1])
- sqft = int(float(row[2].replace(',','.')))
- location = str(row[3])
- days_on_market = int(row[4])
- initial_price = int(row[5])
- neighborhood = str(row[6])
- rental_price = int(float(row[7]))
- query_ch(f'''INSERT INTO test.{test_data_table} VALUES (
- {number_of_rooms},
- {number_of_bathrooms},
- {sqft},
- '{location}',
- {days_on_market},
- {initial_price},
- '{neighborhood}',
- {rental_price}
- )''')
- i += 1
-
- def test_1_predictor_record_not_exists(self):
- print(f'Executing {inspect.stack()[0].function}')
- # try:
- # pass
- # except expression as identifier:
- # pass
- result = query_ch(f"select name from mindsdb.predictors where name='{test_predictor_name}'")
- self.assertTrue(len(result) == 0)
- print('Passed')
-
- def test_2_predictor_table_not_exists(self):
- print(f'Executing {inspect.stack()[0].function}')
- result = query_ch(f"show tables from mindsdb")
- result = [x['name'] for x in result]
- self.assertTrue(test_predictor_name not in result)
- print('Passed')
-
- def test_3_learn_predictor(self):
- print('Executing test 3')
- q = f"""
- insert into mindsdb.predictors
- (name, predict_cols, select_data_query, training_options)
- values (
- '{test_predictor_name}',
- 'rental_price',
- 'select * from test.{test_data_table} limit 1000',
- '{{"stop_training_in_x_seconds": 30}}'
- )
- """
- result = query_ch(q)
-
- time.sleep(40)
-
- result = query_ch(f"select name from mindsdb.predictors where name='{test_predictor_name}'")
- # check status!
- self.assertTrue(len(result) == 1)
-
- result = query_ch(f"show tables from mindsdb")
- result = [x['name'] for x in result]
- self.assertTrue(test_predictor_name in result)
-
- def test_4_query(self):
- print('Executing test 4')
- result = query_ch(f"select rental_price from mindsdb.{test_predictor_name} where sqft=1000 and location='good'")
- self.assertTrue(len(result) == 1 and 'rental_price' in result[0])
-
-def wait_mysql(timeout):
- config
-
-
- con = MySQLdb.connect(
- config['api']['mysql']['host'],
- USER,
- PASSWORD,
- DATABASE
- )
-
- cur = con.cursor()
-
- cur.execute('DROP TABLE IF EXISTS test_mindsdb')
- cur.execute('CREATE TABLE test_mindsdb(col_1 Text, col_2 BIGINT, col_3 BOOL)')
- for i in range(0,200):
- cur.execute(f'INSERT INTO test_mindsdb VALUES ("This is tring number {i}", {i}, {i % 2 == 0})')
- con.commit()
- con.close()
-
-if __name__ == "__main__":
- sp = subprocess.Popen(['python3', '-m', 'mindsdb', '--api', 'mysql', '--config', TEST_CONFIG])
- try:
- time.sleep(12)
- unittest.main()
- print('Tests passed !')
- except:
- print('Tests Failed !')
- finally:
- sp.terminate()
- print('done')
\ No newline at end of file
diff --git a/tests/integration_tests/test_mariadb.py b/tests/integration_tests/test_mariadb.py
deleted file mode 100644
--- a/tests/integration_tests/test_mariadb.py
+++ /dev/null
@@ -1,117 +0,0 @@
-import unittest
-import requests
-import os
-import csv
-import time
-import inspect
-import subprocess
-
-import MySQLdb
-
-from mindsdb.interfaces.native.mindsdb import MindsdbNative
-from mindsdb.utilities.config import Config
-
-
-#TEST_CONFIG = '/home/maxs/dev/mdb/venv/sources/mindsdb/test_config.json'
-TEST_CONFIG = '/home/george/mindsdb/etc/config.json'
-
-test_csv = 'tmp.csv'
-test_data_table = 'churn'
-test_predictor_name = 'test_predictor'
-
-
-config = Config(TEST_CONFIG)
-
-def query(q):
- con = MySQLdb.connect(
- host=config['integrations']['default_mariadb']['host'],
- port=config['integrations']['default_mariadb']['port'],
- user=config['integrations']['default_mariadb']['user'],
- passwd=config['integrations']['default_mariadb']['password'],
- db='mindsdb'
- )
-
- cur = con.cursor()
- cur.execute(q)
- resp = cur.fetchall()
- con.commit()
- con.close()
- return resp
-
-class MariadbInsert(unittest.TestCase):
- @classmethod
- def setUpClass(cls):
-
- for mode in ['train','test']:
- os.system(f'rm {test_csv}')
- cls.mdb = MindsdbNative(config)
-
- if os.path.isfile(test_csv) is False:
- r = requests.get(f"https://raw.githubusercontent.com/mindsdb/mindsdb-examples/master/benchmarks/churn/dataset/{mode}.csv")
- with open(test_csv, 'wb') as f:
- f.write(r.content)
-
- models = cls.mdb.get_models()
- models = [x['name'] for x in models]
- if test_predictor_name in models:
- cls.mdb.delete_model(test_predictor_name)
-
- query('create database if not exists test')
- test_tables = query('show tables from test')
- test_tables = [x[0] for x in test_tables]
- if test_data_table not in test_tables:
- query(f'DROP TABLE IF EXISTS data.{test_data_table}_{mode}')
- query(f'''
- CREATE TABLE data.{test_data_table}_{mode} (
- CreditScore int,
- Geography varchar(300),
- Gender varchar(300),
- Age int,
- Tenure int,
- Balance float,
- NumOfProducts int,
- HasCrCard int,
- IsActiveMember int,
- EstimatedSalary float,
- Exited int
- )
- ''')
- with open(test_csv) as f:
- csvf = csv.reader(f)
- i = 0
- for row in csvf:
- if i > 0:
- CreditScore = int(row[0])
- Geography = str(row[1])
- Gender = str(row[2])
- Age = int(row[3])
- Tenure = int(row[4])
- Balance = float(row[5])
- NumOfProducts = int(row[6])
- HasCrCard = int(row[7])
- IsActiveMember = int(row[8])
- EstimatedSalary = float(row[9])
- Exited = int(row[10])
-
- query(f'''INSERT INTO data.{test_data_table}_{mode} VALUES (
- {CreditScore},
- '{Geography}',
- '{Gender}',
- {Age},
- {Tenure},
- {Balance},
- {NumOfProducts},
- {HasCrCard},
- {IsActiveMember},
- {EstimatedSalary},
- {Exited}
- )''')
- i += 1
- os.system(f'rm {test_csv}')
-
- def test_1_pass(self):
- print(f'Executing {inspect.stack()[0].function}')
- self.assertTrue(True)
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/tests/ressources/cert.pem b/tests/ressources/cert.pem
deleted file mode 100644
--- a/tests/ressources/cert.pem
+++ /dev/null
@@ -1,49 +0,0 @@
------BEGIN PRIVATE KEY-----
-MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDeItDNfMko2uLe
-gVQUzPTLXx2hod9LKIN+S01TpwJvLi5185JZ8YhquwZnI19VvLQLyOIUMGeVxVvg
-GPgkRA58liO4oyCjZGYJygwAN0gkMvJXeMXuNeezlfm3xiVvCk0+l6Vb8FqyJod/
-P9sr6CyfQPolXSngZ8C7M/Fz3YWQ4O26L9in79eynCeJodXmzHsdrLHwKKOhv3F9
-Chp5Rx82KRvoFrdhmUEGM+l4Aq1J1iUVEsAQHHiMWhJhfsiC1GjGErOjSfWKw9e0
-ckbgqY/2ypXHdjsg0mI3eLO3E29SnG8OCWDLRcJVU1oHeX8hqk04c0urXhVQThN3
-X6ecXD1dAgMBAAECggEAEVxjOUwhZKIGzSEKcz25fBOI+1LtYrBd5ob4GiuZUxsm
-4m0Q6RqpcL4BOBpZnxfKcolWsgE+d0QfdBo/eoYfI7mQPSPyrxJvryAtY+7uInYg
-3pk/zuhDnZOBGs3PqygA9X1gnRjh3b6JJHbXKE1S/3dSlYz8ct9o/riGjGmpwLLO
-WuLbiRZoXRPCGWb1bIRpjVPn01YhlEvHyJsXktikm4pMUv+2QUZC7PU/eaAyY3eX
-Y0qdgaxza8q7toFXENG2nI/4dL5T9d4Bg2642zIk+Ki43NbQox4BDeWaSBWQK+bB
-DNDEjNnuGG0pTrdMD65TIOt7AoWeNCAqJZtSLDcoeQKBgQDwTHs1QX65Eo15cVh6
-sClRYTP1d01t39cSdV7sX1Vtp/z7C3FeUlWpb8zgyo3wYJlo+7hyOAcY4KyBpTeS
-aJGy1qIfD0qSt2ZNIvw1wfjiKa00ThMVW8urOcSMt+8+Q9SA/1nE/1iLNSc6M9Af
-ixx34zxlg25vbEaYcFKqiGYNgwKBgQDspoXV4hiufqoPf7F5bYYrv5j3SXoaJZnM
-RJngvBHohlSE9TwGvhHmy6xJAj1CRpoOOQpLoWgvxvWpdsCvcny3a8MY5AbyqOI4
-banVDCW5jnRe5ak/ECoxP4uPK+5/9CUlW3cP+GKfRGU3H7OhadopvNfwjUPg+wB4
-PXTCUvhznwKBgQCvKkFB//09Mb35QduKi7GCxgWXMKE7r8jahr5sNc5TQfqSkbPR
-WtlgysOhNWYkTHZn5d59PERIKTb2xpXs3tcec4D4fTASJSiooBETqtMfIdxFXYhh
-sGmV5mVVYps+Wzmj0wAAL1a/Gz7+GVjkNYbKCdYz9YviIx6O7ooED6u8uwKBgHuM
-aJ0EYExhVpmm2doCQyT973dTBgs2jDfnrMp2hYb28pNDkOYYPzJWLQkkwSSjxXQd
-dXGMv98JqWGi3O/7/n6oJQAOtE3lu80n+519rQhWBg0xK43/+3cgrNS/Y9GrfeUl
-/l/5Fkv+IjWIOHjR0ZMuwzIUHlcL0+/ybc2yEYITAoGAc5shIP9wvjEGexemj1u1
-mp2XwZ7zc0yyZA2icgsYAED6CVoNyrvU6KUm3m1fwEsHPdjk9vLhB5thgz1cjabr
-eoOAdPicwUjndabSor9ylCTDpYpTc8SwuM9KoZyk39DNMUcW3DtwWVZ8YBcm5j0X
-91+jp56NrKca0z1vcyRvy4Q=
------END PRIVATE KEY-----
------BEGIN CERTIFICATE-----
-MIIDazCCAlOgAwIBAgIUfo40Rk2dYhY8SO+yXL5vrvli+20wDQYJKoZIhvcNAQEL
-BQAwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
-GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0yMDA1MjExMzA4MzRaFw0yMTA1
-MjExMzA4MzRaMEUxCzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw
-HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggEiMA0GCSqGSIb3DQEB
-AQUAA4IBDwAwggEKAoIBAQDeItDNfMko2uLegVQUzPTLXx2hod9LKIN+S01TpwJv
-Li5185JZ8YhquwZnI19VvLQLyOIUMGeVxVvgGPgkRA58liO4oyCjZGYJygwAN0gk
-MvJXeMXuNeezlfm3xiVvCk0+l6Vb8FqyJod/P9sr6CyfQPolXSngZ8C7M/Fz3YWQ
-4O26L9in79eynCeJodXmzHsdrLHwKKOhv3F9Chp5Rx82KRvoFrdhmUEGM+l4Aq1J
-1iUVEsAQHHiMWhJhfsiC1GjGErOjSfWKw9e0ckbgqY/2ypXHdjsg0mI3eLO3E29S
-nG8OCWDLRcJVU1oHeX8hqk04c0urXhVQThN3X6ecXD1dAgMBAAGjUzBRMB0GA1Ud
-DgQWBBRXK63AaxqKc92abM3L9tM/sF1fmTAfBgNVHSMEGDAWgBRXK63AaxqKc92a
-bM3L9tM/sF1fmTAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQC8
-1/JSufP8yWnKWDXYrfWCM1ji+COiW3qrjeYxOyl6uvkJDDNFUt8MQUO2c4HFr4BE
-I7BGYbCfGT3dc1K3/JKtlGeoKqbKMgBWe+Lu12kkB5nrQdyqTVSgQnL1HHN7u7ED
-apSV9TzYcz6wbX4Yv27UMGpwbUypIG2EUVbBCkElZYoMn4TNlKF7uTH5dOmR+LNr
-zGvTvYkjMFLRtJ13SkRyfiMJkfJcM89czOVu4X/dljiHhGePfdbCUuGs1Gw759a8
-3l7b506sujWQEmuSe6UdOUws+gR82H7kb8n7qxcOa5HXiIE2MRdfHXx8AS0LGPsa
-n0PAUDF7eqI/kYskiWUX
------END CERTIFICATE-----
| MYSQL API fails to return large packets
When we are returning too many prediction on a `predict` call the mysql mime proxy breaks, error is:
```
2020-06-25 21:34:22,721 - ERROR - ERROR while executing query: SELECT `rental_price`, `$select_data_query` FROM `demo_rental_price_predictor_mariadb` WHERE $select_data_query= 'SELECT * FROM test.home_rentals limit 1000'
Traceback (most recent call last):
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 805, in handle
self.queryAnswer(sql)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 461, in queryAnswer
return self.selectAnswer(query)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 726, in selectAnswer
self.answerTableQuery(query)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 279, in answerTableQuery
self.sendPackageGroup(packages)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 240, in sendPackageGroup
string = b''.join([x.accum() for x in packages])
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 240, in <listcomp>
string = b''.join([x.accum() for x in packages])
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packet.py", line 92, in accum
string = self.getPacketString()
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packet.py", line 58, in getPacketString
count_header = struct.pack('b', self.seq)
struct.error: byte format requires -128 <= number <= 127
byte format requires -128 <= number <= 127
ERROR:mindsdb_sql:ERROR while executing query: SELECT `rental_price`, `$select_data_query` FROM `demo_rental_price_predictor_mariadb` WHERE $select_data_query= 'SELECT * FROM test.home_rentals limit 1000'
Traceback (most recent call last):
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 805, in handle
self.queryAnswer(sql)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 461, in queryAnswer
return self.selectAnswer(query)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 726, in selectAnswer
self.answerTableQuery(query)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 279, in answerTableQuery
self.sendPackageGroup(packages)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 240, in sendPackageGroup
string = b''.join([x.accum() for x in packages])
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 240, in <listcomp>
string = b''.join([x.accum() for x in packages])
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packet.py", line 92, in accum
string = self.getPacketString()
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packet.py", line 58, in getPacketString
count_header = struct.pack('b', self.seq)
struct.error: byte format requires -128 <= number <= 127
byte format requires -128 <= number <= 127
----------------------------------------
Exception happened during processing of request from ('127.0.0.1', 37752)
Traceback (most recent call last):
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 805, in handle
self.queryAnswer(sql)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 461, in queryAnswer
return self.selectAnswer(query)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 726, in selectAnswer
self.answerTableQuery(query)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 279, in answerTableQuery
self.sendPackageGroup(packages)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 240, in sendPackageGroup
string = b''.join([x.accum() for x in packages])
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 240, in <listcomp>
string = b''.join([x.accum() for x in packages])
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packet.py", line 92, in accum
string = self.getPacketString()
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packet.py", line 58, in getPacketString
count_header = struct.pack('b', self.seq)
struct.error: byte format requires -128 <= number <= 127
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib/python3.7/socketserver.py", line 650, in process_request_thread
self.finish_request(request, client_address)
File "/usr/lib/python3.7/socketserver.py", line 360, in finish_request
self.RequestHandlerClass(request, client_address, self)
File "/usr/lib/python3.7/socketserver.py", line 720, in __init__
self.handle()
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py", line 815, in handle
msg=str(e)
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packet.py", line 86, in send
string = self.getPacketString()
File "/home/george/mindsdb/mindsdb/api/mysql/mysql_proxy/data_types/mysql_packet.py", line 58, in getPacketString
count_header = struct.pack('b', self.seq)
struct.error: byte format requires -128 <= number <= 127
----------------------------------------
```
Issues connection from Mariadb
Inserting a predictor causes the following error:
```
2020-07-01 00:32:38,356 - INFO - handle new incoming connection
2020-07-01 00:32:38,356 - INFO - New connection [127.0.0.1:43208]
2020-07-01 00:32:38,356 - INFO - send HandshakePacket
2020-07-01 00:32:38,357 - INFO - Sending packet: HandshakePacket
2020-07-01 00:32:38,357 - INFO - Get packet: HandshakeResponsePacket
2020-07-01 00:32:38,357 - INFO - Got packet
2020-07-01 00:32:38,357 - INFO - b'\r\xa2:\x80\x00\x00\x00\x01-\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00mindsdb\x00\x00mindsdb\x00mysql_native_password\x00u\x03_os\x10debian-linux-gnu\x0c_client_name\x08libmysql\x04_pid\x041425\x0f_client_version\x0710.3.22\x0c_server_host\t127.0.0.1\t_platform\x06x86_64'
2020-07-01 00:32:38,357 - INFO - Check auth, user=mindsdb, ssl=False, auth_method=mysql_native_password: empty password
2020-07-01 00:32:38,357 - INFO - Check auth, user=mindsdb, ssl=False, auth_method=mysql_native_password: connecting to database mindsdb
2020-07-01 00:32:38,357 - INFO - Check auth, user=mindsdb: Ok
2020-07-01 00:32:38,357 - INFO - Sending packet: OkPacket
2020-07-01 00:32:38,357 - INFO - Get packet: CommandPacket
2020-07-01 00:32:38,357 - WARNING - Packet with less than 4 bytes in length: b''
2020-07-01 00:32:38,357 - INFO - Session closed by client
```
| 2020-07-10T13:46:28Z | [] | [] |
|
mindsdb/mindsdb | 595 | mindsdb__mindsdb-595 | [
"549"
] | af1a026c43e2840567e11fbeff502ca1a9317299 | diff --git a/mindsdb/api/mysql/mysql_proxy/classes/sql_query.py b/mindsdb/api/mysql/mysql_proxy/classes/sql_query.py
--- a/mindsdb/api/mysql/mysql_proxy/classes/sql_query.py
+++ b/mindsdb/api/mysql/mysql_proxy/classes/sql_query.py
@@ -12,7 +12,6 @@
import re
import traceback
-from pprint import pprint
from moz_sql_parser import parse
from mindsdb.api.mysql.mysql_proxy.classes.com_operators import join_keywords, binary_ops, unary_ops, operator_map
@@ -54,7 +53,7 @@ def parse_insert(sql):
search = re.search(r'(\(.*\)).*(\(.*\))', sql)
columns = search.groups()[0].split(',')
columns = [x.strip('(` )') for x in columns]
- p = re.compile( '\s*,\s*'.join(["('.*')"]*len(columns)) )
+ p = re.compile('\s*,\s*'.join(["('.*')"] * len(columns)))
values = re.search(p, search.groups()[1])
values = [x.strip("( ')") for x in values.groups()]
@@ -76,12 +75,15 @@ def __init__(self, sql, session=None):
# prepare
self._prepareQuery()
- def fetch(self, datahub):
+ def fetch(self, datahub, view='list'):
try:
self.datahub = datahub
self._fetchData()
data = self._processData()
- self.result = self._makeResultVeiw(data)
+ if view == 'dict':
+ self.result = self._makeDictResultVeiw(data)
+ elif view == 'list':
+ self.result = self._makeListResultVeiw(data)
except (TableWithoutDatasourceException,
UndefinedColumnTableException,
DuplicateTableNameException,
@@ -416,10 +418,26 @@ def _fetchData(self):
and table['join']['type'] == 'left join':
condition = {}
- if tablenum > 0 \
- and isinstance(table['join'], dict) \
- and table['join']['type'] == 'left join' \
- and dn.type == 'mindsdb':
+ if 'external_datasource' in condition:
+ external_datasource = condition['external_datasource']['$eq']
+ result = []
+ if 'select ' not in external_datasource.lower():
+ external_datasource = f'select * from {external_datasource}'
+ query = SQLQuery(external_datasource, default_dn='datasource')
+ result = query.fetch(self.datahub, view='dict')
+ if result['success'] is False:
+ raise Exception(result['msg'])
+ data = dn.select(
+ table=table_name,
+ columns=fields,
+ where=condition,
+ where_data=result['result'],
+ came_from=table.get('source')
+ )
+ elif tablenum > 0 \
+ and isinstance(table['join'], dict) \
+ and table['join']['type'] == 'left join' \
+ and dn.type == 'mindsdb':
data = dn.select(
table=table_name,
columns=fields,
@@ -539,7 +557,19 @@ def fnc(record):
return data
- def _makeResultVeiw(self, data):
+ def _makeDictResultVeiw(self, data):
+ result = []
+
+ for record in data:
+ row = {}
+ for col in self.columns:
+ table_record = record[f"{col['database']}.{col['table_name']}"]
+ row[col['name']] = table_record[col['name']]
+ result.append(row)
+
+ return result
+
+ def _makeListResultVeiw(self, data):
result = []
for record in data:
diff --git a/mindsdb/api/mysql/mysql_proxy/datahub/datahub.py b/mindsdb/api/mysql/mysql_proxy/datahub/datahub.py
--- a/mindsdb/api/mysql/mysql_proxy/datahub/datahub.py
+++ b/mindsdb/api/mysql/mysql_proxy/datahub/datahub.py
@@ -1,14 +1,17 @@
from mindsdb.api.mysql.mysql_proxy.datahub.information_schema import InformationSchema
from mindsdb.api.mysql.mysql_proxy.datahub.datanodes.mindsdb_datanode import MindsDBDataNode
+from mindsdb.api.mysql.mysql_proxy.datahub.datanodes.datasource_datanode import DataSourceDataNode
def init_datahub(config):
- all_ds = config['api']['mysql'].get('datasources', [])
+ # TODO remove 'datasources' from config
+ # all_ds = config['api']['mysql'].get('datasources', [])
datahub = InformationSchema()
datahub.add({
- 'mindsdb': MindsDBDataNode(config)
+ 'mindsdb': MindsDBDataNode(config),
+ 'datasource': DataSourceDataNode(config)
})
return datahub
diff --git a/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/datasource_datanode.py b/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/datasource_datanode.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/datasource_datanode.py
@@ -0,0 +1,33 @@
+import json
+
+import pandas
+
+from mindsdb.api.mysql.mysql_proxy.datahub.datanodes.datanode import DataNode
+# from mindsdb.interfaces.native.mindsdb import MindsdbNative
+from mindsdb.interfaces.datastore.datastore import DataStore
+from mindsdb.integrations.clickhouse.clickhouse import Clickhouse
+from mindsdb.integrations.mariadb.mariadb import Mariadb
+
+
+class DataSourceDataNode(DataNode):
+ type = 'mindsdb-datasource'
+
+ def __init__(self, config):
+ self.config = config
+ self.datastore = DataStore(config)
+ # self.mindsdb_native = MindsdbNative(config)
+
+ def getTables(self):
+ dss = self.datastore.get_datasources()
+ return [x['name'] for x in dss]
+
+ def hasTable(self, table):
+ return table in self.getTables()
+
+ def getTableColumns(self, table):
+ ds = self.datastore.get_datasource(table)
+ return [x['name'] for x in ds['columns']]
+
+ def select(self, table, columns=None, where=None, where_data=None, order_by=None, group_by=None, came_from=None):
+ data = self.datastore.get_data(table, where=None, limit=None, offset=None)
+ return data['data']
diff --git a/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/mindsdb_datanode.py b/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/mindsdb_datanode.py
--- a/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/mindsdb_datanode.py
+++ b/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/mindsdb_datanode.py
@@ -26,7 +26,7 @@ def hasTable(self, table):
def getTableColumns(self, table):
if table == 'predictors':
- return ['name', 'status', 'accuracy', 'predict', 'select_data_query', 'training_options']
+ return ['name', 'status', 'accuracy', 'predict', 'select_data_query', 'external_datasource', 'training_options']
if table == 'commands':
return ['command']
model = self.mindsdb_native.get_model_data(name=table)
@@ -42,6 +42,7 @@ def getTableColumns(self, table):
# TODO this should be added just for clickhouse queries
columns += ['select_data_query']
+ columns += ['external_datasource']
return columns
def _select_predictors(self):
@@ -52,6 +53,7 @@ def _select_predictors(self):
'accuracy': x['accuracy'],
'predict': ', '.join(x['predict']),
'select_data_query': x['data_source'],
+ 'external_datasource': '', # TODO
'training_options': '' # TODO ?
} for x in models]
@@ -64,6 +66,11 @@ def select(self, table, columns=None, where=None, where_data=None, order_by=None
if table == 'predictors':
return self._select_predictors()
+ external_datasource = None
+ if 'external_datasource' in where:
+ external_datasource = where['external_datasource']['$eq']
+ del where['external_datasource']
+
select_data_query = None
if came_from is not None and 'select_data_query' in where:
select_data_query = where['select_data_query']['$eq']
@@ -102,13 +109,13 @@ def select(self, table, columns=None, where=None, where_data=None, order_by=None
original_target_values = {}
for col in predicted_columns:
- if type(where_data) == list:
- original_target_values[col + '_original'] = [None] * len(where_data)
- for row in where_data:
- if col in row:
- original_target_values[col + '_original'].append(row[col])
+ if where_data is not None:
+ if col in where_data:
+ original_target_values[col + '_original'] = list(where_data[col])
+ else:
+ original_target_values[col + '_original'] = [None] * len(where_data)
else:
- original_target_values[col + '_original'] = list(where_data[col])
+ original_target_values[col + '_original'] = [None]
res = self.mindsdb_native.predict(name=table, when_data=where_data)
@@ -132,6 +139,7 @@ def select(self, table, columns=None, where=None, where_data=None, order_by=None
row[key + '_min'] = explanation[key]['confidence_interval'][0]
row[key + '_max'] = explanation[key]['confidence_interval'][-1]
row['select_data_query'] = select_data_query
+ row['external_datasource'] = external_datasource
for k in original_target_values:
row[k] = original_target_values[k][i]
data.append(row)
diff --git a/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py b/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py
--- a/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py
+++ b/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py
@@ -28,11 +28,7 @@
from mindsdb.api.mysql.mysql_proxy.classes.sql_query import (
SQLQuery,
- TableWithoutDatasourceException,
- UndefinedColumnTableException,
- DuplicateTableNameException,
- NotImplementedError,
- SqlError
+ NotImplementedError
)
from mindsdb.api.mysql.mysql_proxy.libs.constants.mysql import (
@@ -151,8 +147,8 @@ def get_fast_auth_password():
try:
password = password_answer.password.value.decode()
except Exception:
- log.info(f'error: no password in Fast Auth answer')
- self.packet(ErrPacket, err_code=ERR.ER_PASSWORD_NO_MATCH, msg=f'Is not password in connection query.').send()
+ log.info('error: no password in Fast Auth answer')
+ self.packet(ErrPacket, err_code=ERR.ER_PASSWORD_NO_MATCH, msg='Is not password in connection query.').send()
return None
return password
@@ -207,7 +203,7 @@ def get_fast_auth_password():
if client_auth_plugin != DEFAULT_AUTH_METHOD:
if client_auth_plugin == 'mysql_native_password' and \
- orig_password == '' and len(handshake_resp.enc_password.value) == 0:
+ orig_password == '' and len(handshake_resp.enc_password.value) == 0:
switch_auth('mysql_native_password')
password = ''
else:
@@ -215,12 +211,12 @@ def get_fast_auth_password():
if new_method == 'caching_sha2_password' and self.session.is_ssl is False:
log.info(f'Check auth, user={username}, ssl={self.session.is_ssl}, auth_method={client_auth_plugin}: '
- 'error: cant switch to caching_sha2_password without SSL')
- self.packet(ErrPacket, err_code=ERR.ER_PASSWORD_NO_MATCH, msg=f'caching_sha2_password without SSL not supported').send()
+ 'error: cant switch to caching_sha2_password without SSL')
+ self.packet(ErrPacket, err_code=ERR.ER_PASSWORD_NO_MATCH, msg='caching_sha2_password without SSL not supported').send()
return False
log.info(f'Check auth, user={username}, ssl={self.session.is_ssl}, auth_method={client_auth_plugin}: '
- f'switch auth method to {new_method}')
+ f'switch auth method to {new_method}')
password = switch_auth(new_method)
if new_method == 'caching_sha2_password':
@@ -229,21 +225,21 @@ def get_fast_auth_password():
orig_password = orig_password_hash
elif orig_username == username and HARDCODED_PASSWORD == '':
log.info(f'Check auth, user={username}, ssl={self.session.is_ssl}, auth_method={client_auth_plugin}: '
- 'empty password')
+ 'empty password')
password = ''
elif 'caching_sha2_password' in client_auth_plugin:
log.info(f'Check auth, user={username}, ssl={self.session.is_ssl}, auth_method={client_auth_plugin}: '
- 'check auth using caching_sha2_password')
+ 'check auth using caching_sha2_password')
password = get_fast_auth_password()
orig_password = HARDCODED_PASSWORD
elif 'mysql_native_password' in client_auth_plugin:
log.info(f'Check auth, user={username}, ssl={self.session.is_ssl}, auth_method={client_auth_plugin}: '
- 'check auth using mysql_native_password')
+ 'check auth using mysql_native_password')
password = handshake_resp.enc_password.value
orig_password = orig_password_hash
else:
log.info(f'Check auth, user={username}, ssl={self.session.is_ssl}, auth_method={client_auth_plugin}: '
- 'unknown method, possible ERROR. Try to switch to mysql_native_password')
+ 'unknown method, possible ERROR. Try to switch to mysql_native_password')
password = switch_auth('mysql_native_password')
orig_password = orig_password_hash
@@ -252,7 +248,7 @@ def get_fast_auth_password():
except Exception:
self.session.database = None
log.info(f'Check auth, user={username}, ssl={self.session.is_ssl}, auth_method={client_auth_plugin}: '
- f'connecting to database {self.session.database}')
+ f'connecting to database {self.session.database}')
if self.isAuthOk(username, orig_username, password, orig_password):
self.packet(OkPacket).send()
@@ -308,6 +304,24 @@ def insert_predictor_answer(self, sql):
global mdb, default_store, config
insert = SQLQuery.parse_insert(sql)
+ is_external_datasource = isinstance(insert.get('external_datasource'), str) and len(insert['external_datasource']) > 0
+ is_select_data_query = isinstance(insert.get('select_data_query'), str) and len(insert['select_data_query']) > 0
+
+ if is_external_datasource and is_select_data_query:
+ self.packet(
+ ErrPacket,
+ err_code=ERR.ER_WRONG_ARGUMENTS,
+ msg="'external_datasource' and 'select_data_query' should not be used in one query"
+ ).send()
+ return
+ elif is_external_datasource is False and is_select_data_query is False:
+ self.packet(
+ ErrPacket,
+ err_code=ERR.ER_WRONG_ARGUMENTS,
+ msg="in query should be 'external_datasource' or 'select_data_query'"
+ ).send()
+ return
+
models = mdb.get_models()
if insert['name'] in [x['name'] for x in models]:
self.packet(
@@ -319,10 +333,10 @@ def insert_predictor_answer(self, sql):
kwargs = {}
if isinstance(insert.get('training_options'), str) \
- and len(insert['training_options']) > 0:
+ and len(insert['training_options']) > 0:
try:
kwargs = json.loads(insert['training_options'])
- except Exception as e:
+ except Exception:
self.packet(
ErrPacket,
err_code=ERR.ER_WRONG_ARGUMENTS,
@@ -330,12 +344,15 @@ def insert_predictor_answer(self, sql):
).send()
return
- # TODO clickhouse with any type of used escaping sends escaped quotes as \'.
- # Need to check other clients, they behaviour can be differ
- insert['select_data_query'] = insert['select_data_query'].replace(r"\'", "'")
- ds_type = config['integrations'][self.session.integration]['type']
- ds = default_store.save_datasource(insert['name'], ds_type, insert['select_data_query'])
+ if is_select_data_query:
+ insert['select_data_query'] = insert['select_data_query'].replace(r"\'", "'")
+ ds_type = config['integrations'][self.session.integration]['type']
+ ds = default_store.save_datasource(insert['name'], ds_type, insert['select_data_query'])
+ elif is_external_datasource:
+ ds = default_store.get_datasource_obj(insert['external_datasource'], raw=True)
+
insert['predict'] = [x.strip() for x in insert['predict'].split(',')]
+
mdb.learn(insert['name'], ds, insert['predict'], kwargs)
self.packet(OkPacket).send()
@@ -371,10 +388,10 @@ def handle_custom_command(self, sql):
insert = SQLQuery.parse_insert(sql)
if 'command' not in insert:
- self.packet(ErrPacket, err_code=ERR.ER_WRONG_ARGUMENTS, msg=f"command should be inserted").send()
+ self.packet(ErrPacket, err_code=ERR.ER_WRONG_ARGUMENTS, msg="command should be inserted").send()
return
if len(insert) > 1:
- self.packet(ErrPacket, err_code=ERR.ER_WRONG_ARGUMENTS, msg=f"only command should be inserted").send()
+ self.packet(ErrPacket, err_code=ERR.ER_WRONG_ARGUMENTS, msg="only command should be inserted").send()
return
command = insert['command'].strip(' ;').split()
@@ -455,15 +472,15 @@ def queryAnswer(self, sql):
self.answerShowCollation()
return
elif keyword == 'delete' and \
- ('mindsdb.predictors' in sql_lower or self.session.database == 'mindsdb' and 'predictors' in sql_lower):
+ ('mindsdb.predictors' in sql_lower or self.session.database == 'mindsdb' and 'predictors' in sql_lower):
self.delete_predictor_answer(sql)
return
elif keyword == 'insert' and \
- ('mindsdb.commands' in sql_lower or self.session.database == 'mindsdb' and 'commands' in sql_lower):
+ ('mindsdb.commands' in sql_lower or self.session.database == 'mindsdb' and 'commands' in sql_lower):
self.handle_custom_command(sql)
return
elif keyword == 'insert' and \
- ('mindsdb.predictors' in sql_lower or self.session.database == 'mindsdb' and 'predictors' in sql_lower):
+ ('mindsdb.predictors' in sql_lower or self.session.database == 'mindsdb' and 'predictors' in sql_lower):
self.insert_predictor_answer(sql)
return
elif keyword in ('update', 'insert'):
diff --git a/mindsdb/integrations/clickhouse/clickhouse.py b/mindsdb/integrations/clickhouse/clickhouse.py
--- a/mindsdb/integrations/clickhouse/clickhouse.py
+++ b/mindsdb/integrations/clickhouse/clickhouse.py
@@ -73,12 +73,13 @@ def setup(self):
msqyl_user = self._get_mysql_user()
q = f"""
- CREATE TABLE IF NOT EXISTS mindsdb.predictors
- (name String,
+ CREATE TABLE IF NOT EXISTS mindsdb.predictors (
+ name String,
status String,
accuracy String,
predict String,
select_data_query String,
+ external_datasource String,
training_options String
) ENGINE=MySQL('{msqyl_conn}', 'mindsdb', 'predictors', '{msqyl_user}', '{msqyl_pass}')
"""
@@ -99,6 +100,7 @@ def register_predictors(self, model_data_arr):
del stats['columns_to_ignore']
columns_sql = ','.join(self._to_clickhouse_table(stats, model_meta['predict']))
columns_sql += ',`select_data_query` Nullable(String)'
+ columns_sql += ',`external_datasource` Nullable(String)'
for col in model_meta['predict']:
columns_sql += f',`{col}_confidence` Nullable(Float64)'
if model_meta['data_analysis'][col]['typing']['data_type'] == 'Numeric':
diff --git a/mindsdb/integrations/mariadb/mariadb.py b/mindsdb/integrations/mariadb/mariadb.py
--- a/mindsdb/integrations/mariadb/mariadb.py
+++ b/mindsdb/integrations/mariadb/mariadb.py
@@ -80,6 +80,7 @@ def setup(self):
accuracy VARCHAR(500),
predict VARCHAR(500),
select_data_query VARCHAR(500),
+ external_datasource VARCHAR(500),
training_options VARCHAR(500)
) ENGINE=CONNECT TABLE_TYPE=MYSQL CONNECTION='{connect}';
"""
@@ -100,6 +101,7 @@ def register_predictors(self, model_data_arr):
stats = model_meta['data_analysis']
columns_sql = ','.join(self._to_mariadb_table(stats, model_meta['predict']))
columns_sql += ',`select_data_query` varchar(500)'
+ columns_sql += ',`external_datasource` varchar(500)'
for col in model_meta['predict']:
columns_sql += f',`{col}_confidence` double'
if model_meta['data_analysis'][col]['typing']['data_type'] == 'Numeric':
diff --git a/mindsdb/interfaces/datastore/datastore.py b/mindsdb/interfaces/datastore/datastore.py
--- a/mindsdb/interfaces/datastore/datastore.py
+++ b/mindsdb/interfaces/datastore/datastore.py
@@ -14,8 +14,6 @@
from mindsdb.interfaces.datastore.sqlite_helpers import *
from mindsdb.interfaces.native.mindsdb import MindsdbNative
from mindsdb_native import FileDS, ClickhouseDS, MariaDS
-from mindsdb.interfaces.datastore.sqlite_helpers import create_sqlite_db
-
class DataStore():
def __init__(self, config, storage_dir=None):
@@ -78,7 +76,6 @@ def save_datasource(self, name, source_type, source, file_path=None):
ds_dir = os.path.join(ds_meta_dir, 'datasource')
os.mkdir(ds_dir)
- print(source_type)
if source_type == 'file':
source = os.path.join(ds_dir, source)
os.replace(file_path, source)
| diff --git a/tests/integration_tests/api/test_http.py b/tests/integration_tests/api/test_http.py
--- a/tests/integration_tests/api/test_http.py
+++ b/tests/integration_tests/api/test_http.py
@@ -2,6 +2,7 @@
import time
import os
import signal
+import psutil
from random import randint
import unittest
@@ -14,6 +15,7 @@
pred_name = f'hr_predictor_{rand}'
root = 'http://localhost:47334'
+
class HTTPTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
@@ -24,7 +26,7 @@ def setUpClass(cls):
res = requests.get(f'{root}/util/ping')
if res.status_code != 200:
raise Exception('')
- except:
+ except Exception:
time.sleep(1)
if i == 19:
raise Exception("Can't connect !")
@@ -32,8 +34,12 @@ def setUpClass(cls):
@classmethod
def tearDownClass(cls):
try:
+ conns = psutil.net_connections()
+ pid = [x.pid for x in conns if x.status == 'LISTEN' and x.laddr[1] == 47334 and x.pid is not None]
+ if len(pid) > 0:
+ os.kill(pid[0], 9)
cls.sp.kill()
- except:
+ except Exception:
pass
def test_1_put_ds(self):
diff --git a/tests/integration_tests/flows/common.py b/tests/integration_tests/flows/common.py
--- a/tests/integration_tests/flows/common.py
+++ b/tests/integration_tests/flows/common.py
@@ -1,4 +1,5 @@
import psutil
+import shutil
import time
import pathlib
import json
@@ -63,8 +64,12 @@ def prepare_config(config, db):
config._config['integrations'][key]['enabled'] = key == db
datastore_dir = TEMP_DIR.joinpath('datastore/')
+ if datastore_dir.exists():
+ shutil.rmtree(datastore_dir)
datastore_dir.mkdir(parents=True, exist_ok=True)
mindsdb_native_dir = TEMP_DIR.joinpath('predictors/')
+ if mindsdb_native_dir.exists():
+ shutil.rmtree(mindsdb_native_dir)
mindsdb_native_dir.mkdir(parents=True, exist_ok=True)
config['interface']['datastore']['storage_dir'] = str(datastore_dir)
diff --git a/tests/integration_tests/flows/test_clickhouse.py b/tests/integration_tests/flows/test_clickhouse.py
--- a/tests/integration_tests/flows/test_clickhouse.py
+++ b/tests/integration_tests/flows/test_clickhouse.py
@@ -211,7 +211,34 @@ def test_2_insert_predictor(self):
mindsdb_tables = [x['name'] for x in mindsdb_tables]
self.assertTrue(TEST_PREDICTOR_NAME in mindsdb_tables)
- def test_3_query_predictor(self):
+ def test_3_insert_predictor_with_existing_ds(self):
+ name = f'{TEST_PREDICTOR_NAME}_2'
+ models = self.mdb.get_models()
+ models = [x['name'] for x in models]
+ if name in models:
+ self.mdb.delete_model(name)
+
+ query(f"""
+ insert into mindsdb.predictors (name, predict, external_datasource, training_options) values
+ (
+ '{name}',
+ 'rental_price, location',
+ '{TEST_PREDICTOR_NAME}',
+ '{{"join_learn_process": true, "stop_training_in_x_seconds": 3}}'
+ );
+ """)
+
+ print('predictor record in mindsdb.predictors')
+ res = query(f"select status from mindsdb.predictors where name = '{name}'")
+ self.assertTrue(len(res) == 1)
+ self.assertTrue(res[0]['status'] == 'complete')
+
+ print('predictor table in mindsdb db')
+ mindsdb_tables = query('show tables from mindsdb')
+ mindsdb_tables = [x['name'] for x in mindsdb_tables]
+ self.assertTrue(name in mindsdb_tables)
+
+ def test_4_query_predictor(self):
print(f'\nExecuting {inspect.stack()[0].function}')
res = query(f"""
select
@@ -236,7 +263,7 @@ def test_3_query_predictor(self):
self.assertIsInstance(res['rental_price_explain'], str)
self.assertTrue(res['number_of_rooms'] == 'None' or res['number_of_rooms'] is None)
- def test_4_range_query(self):
+ def test_5_range_query(self):
print(f'\nExecuting {inspect.stack()[0].function}')
results = query(f"""
@@ -257,7 +284,7 @@ def test_4_range_query(self):
self.assertIsInstance(res['rental_price_max'], int)
self.assertIsInstance(res['rental_price_explain'], str)
- def test_5_delete_predictor_by_command(self):
+ def test_6_delete_predictor_by_command(self):
print(f'\nExecuting {inspect.stack()[0].function}')
query(f"""
diff --git a/tests/integration_tests/flows/test_mariadb.py b/tests/integration_tests/flows/test_mariadb.py
--- a/tests/integration_tests/flows/test_mariadb.py
+++ b/tests/integration_tests/flows/test_mariadb.py
@@ -194,7 +194,34 @@ def test_2_insert_predictor(self):
mindsdb_tables = [x[0] for x in mindsdb_tables]
self.assertTrue(TEST_PREDICTOR_NAME in mindsdb_tables)
- def test_3_query_predictor(self):
+ def test_3_insert_predictor_with_existing_ds(self):
+ name = f'{TEST_PREDICTOR_NAME}_2'
+ models = self.mdb.get_models()
+ models = [x['name'] for x in models]
+ if name in models:
+ self.mdb.delete_model(name)
+
+ query(f"""
+ insert into mindsdb.predictors (name, predict, external_datasource, training_options) values
+ (
+ '{name}',
+ 'rental_price, location',
+ '{TEST_PREDICTOR_NAME}',
+ '{{"join_learn_process": true, "stop_training_in_x_seconds": 3}}'
+ );
+ """)
+
+ print('predictor record in mindsdb.predictors')
+ res = query(f"select status from mindsdb.predictors where name = '{name}'", as_dict=True)
+ self.assertTrue(len(res) == 1)
+ self.assertTrue(res[0]['status'] == 'complete')
+
+ print('predictor table in mindsdb db')
+ mindsdb_tables = query('show tables from mindsdb')
+ mindsdb_tables = [x[0] for x in mindsdb_tables]
+ self.assertTrue(name in mindsdb_tables)
+
+ def test_4_query_predictor(self):
print(f'\nExecuting {inspect.stack()[0].function}')
res = query(f"""
select
@@ -218,7 +245,7 @@ def test_3_query_predictor(self):
self.assertIsInstance(res['rental_price_explain'], str)
self.assertTrue(res['number_of_rooms'] == 'None' or res['number_of_rooms'] is None)
- def test_4_range_query(self):
+ def test_5_range_query(self):
print(f'\nExecuting {inspect.stack()[0].function}')
results = query(f"""
@@ -239,7 +266,7 @@ def test_4_range_query(self):
self.assertIsInstance(res['rental_price_max'], float)
self.assertIsInstance(res['rental_price_explain'], str)
- def test_5_delete_predictor_by_command(self):
+ def test_6_delete_predictor_by_command(self):
print(f'\nExecuting {inspect.stack()[0].function}')
query(f"""
@@ -255,11 +282,11 @@ def test_5_delete_predictor_by_command(self):
mindsdb_tables = [x[0] for x in mindsdb_tables]
self.assertTrue(TEST_PREDICTOR_NAME not in mindsdb_tables)
- def test_6_insert_predictor_again(self):
+ def test_7_insert_predictor_again(self):
print(f'\nExecuting {inspect.stack()[0].function}')
self.test_2_insert_predictor()
- def test_7_delete_predictor_by_delete_statement(self):
+ def test_8_delete_predictor_by_delete_statement(self):
print(f'\nExecuting {inspect.stack()[0].function}')
query(f"""
delete from mindsdb.predictors where name='{TEST_PREDICTOR_NAME}';
| Allow predicting and learning from external datasources in the database integrations
Allow predicting and learning from external datasources in the database integrations.
Maybe using a magic variable again, something like `external_datasource` that's just a string to which the name of the datasource is passed.
| 2020-07-15T12:59:22Z | [] | [] |
|
mindsdb/mindsdb | 597 | mindsdb__mindsdb-597 | [
"555"
] | f86f997b3e5579031284c58e6369f334171021c9 | diff --git a/mindsdb/__about__.py b/mindsdb/__about__.py
--- a/mindsdb/__about__.py
+++ b/mindsdb/__about__.py
@@ -1,6 +1,6 @@
__title__ = 'MindsDB'
__package_name__ = 'mindsdb'
-__version__ = '1.99.11'
+__version__ = '2.0.0'
__description__ = "MindsDB server, provides server capabilities to mindsdb native python library"
__email__ = "[email protected]"
__author__ = 'MindsDB Inc'
diff --git a/mindsdb/api/http/initialize.py b/mindsdb/api/http/initialize.py
--- a/mindsdb/api/http/initialize.py
+++ b/mindsdb/api/http/initialize.py
@@ -4,6 +4,7 @@
from mindsdb.interfaces.datastore.datastore import DataStore
from mindsdb.interfaces.native.mindsdb import MindsdbNative
+from mindsdb.utilities.config import Config
class Swagger_Api(Api):
@@ -34,3 +35,4 @@ def initialize_flask(config):
def initialize_interfaces(config, app):
app.default_store = DataStore(config)
app.mindsdb_native = MindsdbNative(config)
+ app.config_obj = config
diff --git a/mindsdb/api/http/namespaces/config.py b/mindsdb/api/http/namespaces/config.py
--- a/mindsdb/api/http/namespaces/config.py
+++ b/mindsdb/api/http/namespaces/config.py
@@ -1,100 +1,43 @@
-mport datetime
-import json
-import os
-import re
-
-import tempfile
-import multipart
-import csv
-
-import mindsdb
-from dateutil.parser import parse
-from flask import request, send_file
-from flask_restx import Resource, abort, Namespace
+from flask import request
+from flask_restx import Resource
from flask import current_app as ca
-ns_conf = Namespace('config', description='Configuration changes')
+from mindsdb.api.http.namespaces.configs.config import ns_conf
+from mindsdb.interfaces.database.database import DatabaseWrapper
-@ns_conf.route('/')
-class DatasourcesList(Resource):
- @ns_conf.doc('get_datasources_list')
- @ns_conf.marshal_list_with(datasource_metadata)
- def get(self):
- '''List all datasources'''
- return ca.default_store.get_datasources()
+@ns_conf.route('/integrations')
+@ns_conf.param('name', 'List all database integration')
+class Integration(Resource):
+ @ns_conf.doc('get_integrations')
+ def get(self):
+ return {'integrations': [k for k in ca.config_obj['integrations']]}
-@ns_conf.route('/<name>')
-@ns_conf.param('name', 'Datasource name')
-class Datasource(Resource):
- @ns_conf.doc('get_datasource')
- @ns_conf.marshal_with(datasource_metadata)
+@ns_conf.route('/integrations/<name>')
+@ns_conf.param('name', 'Database integration')
+class Integration(Resource):
+ @ns_conf.doc('get_integration')
def get(self, name):
'''return datasource metadata'''
- ds = ca.default_store.get_datasource(name)
- if ds is not None:
- return ds
- return '', 404
+ return ca.config_obj['integrations'][name]
- @ns_conf.doc('delete_datasource')
- def delete(self, name):
- '''delete datasource'''
- try:
- ca.default_store.delete_datasource(name)
- except Exception as e:
- print(e)
- abort(400, str(e))
- return '', 200
-
- @ns_conf.doc('put_datasource', params=put_datasource_params)
- @ns_conf.marshal_with(datasource_metadata)
+ @ns_conf.doc('put_integration')
def put(self, name):
- '''add new datasource'''
- data = {}
- def on_field(field):
- print(f'\n\n{field}\n\n')
- name = field.field_name.decode()
- value = field.value.decode()
- data[name] = value
-
- def on_file(file):
- data['file'] = file.file_name.decode()
-
- temp_dir_path = tempfile.mkdtemp(prefix='datasource_file_')
-
- if request.headers['Content-Type'].startswith('multipart/form-data'):
- parser = multipart.create_form_parser(
- headers=request.headers,
- on_field=on_field,
- on_file=on_file,
- config={
- 'UPLOAD_DIR': temp_dir_path.encode(), # bytes required
- 'UPLOAD_KEEP_FILENAME': True,
- 'UPLOAD_KEEP_EXTENSIONS': True,
- 'MAX_MEMORY_FILE_SIZE': 0
- }
- )
-
- while True:
- chunk = request.stream.read(8192)
- if not chunk:
- break
- parser.write(chunk)
- parser.finalize()
- parser.close()
- else:
- data = request.json
-
- ds_name = data['name'] if 'name' in data else name
- source = data['source'] if 'source' in data else name
- source_type = data['source_type']
-
- if source_type == 'file':
- file_path = os.path.join(temp_dir_path, data['file'])
- else:
- file_path = None
+ '''return datasource metadata'''
+ params = request.json.get('params')
+ ca.config_obj.add_db_integration(name, params)
+ DatabaseWrapper(ca.config_obj)
+ return 'added'
- ca.default_store.save_datasource(ds_name, source_type, source, file_path)
- os.rmdir(temp_dir_path)
+ @ns_conf.doc('delete_integration')
+ def delete(self, name):
+ ca.config_obj.remove_db_integration(name)
+ return 'deleted'
- return ca.default_store.get_datasource(ds_name)
+ @ns_conf.doc('modify_integration')
+ def post(self, name):
+ '''return datasource metadata'''
+ params = request.json.get('params')
+ ca.config_obj.modify_db_integration(name, params)
+ DatabaseWrapper(ca.config_obj)
+ return 'modified'
diff --git a/mindsdb/api/http/namespaces/configs/config.py b/mindsdb/api/http/namespaces/configs/config.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/api/http/namespaces/configs/config.py
@@ -0,0 +1,3 @@
+from flask_restx import Namespace
+
+ns_conf = Namespace('config', description='Configuration changes')
diff --git a/mindsdb/api/http/start.py b/mindsdb/api/http/start.py
--- a/mindsdb/api/http/start.py
+++ b/mindsdb/api/http/start.py
@@ -1,25 +1,21 @@
-import json
import os
import mindsdb
import logging
import sys
-import random
from mindsdb.api.http.namespaces.predictor import ns_conf as predictor_ns
from mindsdb.api.http.namespaces.datasource import ns_conf as datasource_ns
from mindsdb.api.http.namespaces.util import ns_conf as utils_ns
+from mindsdb.api.http.namespaces.config import ns_conf as conf_ns
from mindsdb.api.http.initialize import initialize_flask, initialize_interfaces
from mindsdb.utilities.config import Config
+
def start(config, initial=False):
if not initial:
print('\n\nWarning, this process should not have been started... nothing is "wrong" but it needlessly ate away a tiny bit of precious comute !\n\n')
config = Config(config)
- port=47334
- host='0.0.0.0'
- debug=False
-
-
+ debug = False
if not logging.root.handlers:
rootLogger = logging.getLogger()
@@ -32,22 +28,24 @@ def start(config, initial=False):
errStream.addFilter(lambda record: record.levelno > logging.INFO)
rootLogger.addHandler(errStream)
- mindsdb.CONFIG.MINDSDB_DATASOURCES_PATH = os.path.join(mindsdb.CONFIG.MINDSDB_STORAGE_PATH,'datasources')
- mindsdb.CONFIG.MINDSDB_TEMP_PATH = os.path.join(mindsdb.CONFIG.MINDSDB_STORAGE_PATH,'tmp')
+ mindsdb.CONFIG.MINDSDB_DATASOURCES_PATH = os.path.join(mindsdb.CONFIG.MINDSDB_STORAGE_PATH, 'datasources')
+ mindsdb.CONFIG.MINDSDB_TEMP_PATH = os.path.join(mindsdb.CONFIG.MINDSDB_STORAGE_PATH, 'tmp')
os.makedirs(mindsdb.CONFIG.MINDSDB_STORAGE_PATH, exist_ok=True)
os.makedirs(mindsdb.CONFIG.MINDSDB_DATASOURCES_PATH, exist_ok=True)
os.makedirs(mindsdb.CONFIG.MINDSDB_TEMP_PATH, exist_ok=True)
- #'''
-
+
app, api = initialize_flask(config)
initialize_interfaces(config, app)
api.add_namespace(predictor_ns)
api.add_namespace(datasource_ns)
api.add_namespace(utils_ns)
+ api.add_namespace(conf_ns)
+ print(f"Start on {config['api']['http']['host']}:{config['api']['http']['port']}")
app.run(debug=debug, port=config['api']['http']['port'], host=config['api']['http']['host'])
+
if __name__ == '__main__':
start()
diff --git a/mindsdb/api/mysql/mysql_proxy/classes/sql_query.py b/mindsdb/api/mysql/mysql_proxy/classes/sql_query.py
--- a/mindsdb/api/mysql/mysql_proxy/classes/sql_query.py
+++ b/mindsdb/api/mysql/mysql_proxy/classes/sql_query.py
@@ -12,7 +12,6 @@
import re
import traceback
-from pprint import pprint
from moz_sql_parser import parse
from mindsdb.api.mysql.mysql_proxy.classes.com_operators import join_keywords, binary_ops, unary_ops, operator_map
@@ -54,18 +53,18 @@ def parse_insert(sql):
search = re.search(r'(\(.*\)).*(\(.*\))', sql)
columns = search.groups()[0].split(',')
columns = [x.strip('(` )') for x in columns]
- p = re.compile( '\s*,\s*'.join(["('.*')"]*len(columns)) )
+ p = re.compile('\s*,\s*'.join(["('.*')"] * len(columns)))
values = re.search(p, search.groups()[1])
values = [x.strip("( ')") for x in values.groups()]
return dict(zip(columns, values))
- def __init__(self, sql, default_dn=None):
+ def __init__(self, sql, session=None):
# parse
-
- self.default_datanode = None
- if isinstance(default_dn, str) and len(default_dn) > 0:
- self.default_datanode = default_dn
+ self.session = session
+ self.integration = None
+ if session is not None:
+ self.integration = session.integration
# 'offset x, y' - specific just for mysql, parser dont understand it
sql = re.sub(r'\n?limit([\n\d\s]*),([\n\d\s]*)', ' limit \g<1> offset \g<1> ', sql)
@@ -76,12 +75,15 @@ def __init__(self, sql, default_dn=None):
# prepare
self._prepareQuery()
- def fetch(self, datahub):
+ def fetch(self, datahub, view='list'):
try:
self.datahub = datahub
self._fetchData()
data = self._processData()
- self.result = self._makeResultVeiw(data)
+ if view == 'dict':
+ self.result = self._makeDictResultVeiw(data)
+ elif view == 'list':
+ self.result = self._makeListResultVeiw(data)
except (TableWithoutDatasourceException,
UndefinedColumnTableException,
DuplicateTableNameException,
@@ -114,28 +116,31 @@ def _format_from_statement(self, s):
{'left join': {'name': 'b', 'value': 'xxx.zzz'}, 'on': {'eq': ['a.id', 'b.id']}}]
This function do:
1. replace string view 'xxx.zzz' to {'value': 'xxx.zzz', 'name': 'zzz'}
- 2. if exists default_datanode, then replace 'zzz' to 'default_datanode.zzz'
+ 2. if exists db info, then replace 'zzz' to 'db.zzz'
3. if database marks (as _clickhouse or _mariadb) in datasource name, than do:
{'value': 'xxx.zzz_mariadb', 'name': 'a'}
-> {'value': 'xxx.zzz', 'name': 'a', source: 'mariadb'}
"""
+ database = None
+ if self.session is not None:
+ database = self.session.database
if isinstance(s, str):
if '.' in s:
s = {
'name': s.split('.')[-1],
'value': s
}
- elif self.default_datanode is not None:
+ elif database is not None:
s = {
'name': s,
- 'value': f'{self.default_datanode}.{s}'
+ 'value': f'{database}.{s}'
}
else:
raise SqlError('table without datasource %s ' % s)
elif isinstance(s, dict):
if 'value' in s and 'name' in s:
- if '.' not in s['value'] and self.default_datanode is not None:
- s['value'] = f"{self.default_datanode}.{s['value']}"
+ if '.' not in s['value'] and database is not None:
+ s['value'] = f"{database}.{s['value']}"
elif '.' not in s['value']:
raise SqlError('table without datasource %s ' % s['value'])
elif 'left join' in s:
@@ -146,18 +151,11 @@ def _format_from_statement(self, s):
s['join'] = self._format_from_statement(s['join'])
else:
raise SqlError('Something wrong in query parsing process')
-
- for x in ['clickhouse', 'mariadb']:
- _x = '_' + x
- if s['value'].endswith(_x):
- s['value'] = s['value'][:s['value'].rfind(_x)]
- s['source'] = x
-
return s
def _parseQuery(self, sql):
self.struct = parse(sql)
-
+
if 'limit' in self.struct:
limit = self.struct.get('limit')
if isinstance(limit, int) is False:
@@ -420,23 +418,39 @@ def _fetchData(self):
and table['join']['type'] == 'left join':
condition = {}
- if tablenum > 0 \
- and isinstance(table['join'], dict) \
- and table['join']['type'] == 'left join' \
- and dn.type == 'mindsdb':
+ if 'external_datasource' in condition:
+ external_datasource = condition['external_datasource']['$eq']
+ result = []
+ if 'select ' not in external_datasource.lower():
+ external_datasource = f'select * from {external_datasource}'
+ query = SQLQuery(external_datasource, default_dn='datasource')
+ result = query.fetch(self.datahub, view='dict')
+ if result['success'] is False:
+ raise Exception(result['msg'])
data = dn.select(
table=table_name,
columns=fields,
where=condition,
- where_data=self.table_data[prev_table_name],
+ where_data=result['result'],
came_from=table.get('source')
)
+ elif tablenum > 0 \
+ and isinstance(table['join'], dict) \
+ and table['join']['type'] == 'left join' \
+ and dn.type == 'mindsdb':
+ data = dn.select(
+ table=table_name,
+ columns=fields,
+ where=condition,
+ where_data=self.table_data[prev_table_name],
+ came_from=self.integration
+ )
else:
data = dn.select(
table=table_name,
columns=fields,
where=condition,
- came_from=table.get('source')
+ came_from=self.integration
)
self.table_data[full_table_name] = data
@@ -543,7 +557,19 @@ def fnc(record):
return data
- def _makeResultVeiw(self, data):
+ def _makeDictResultVeiw(self, data):
+ result = []
+
+ for record in data:
+ row = {}
+ for col in self.columns:
+ table_record = record[f"{col['database']}.{col['table_name']}"]
+ row[col['name']] = table_record[col['name']]
+ result.append(row)
+
+ return result
+
+ def _makeListResultVeiw(self, data):
result = []
for record in data:
diff --git a/mindsdb/api/mysql/mysql_proxy/datahub/datahub.py b/mindsdb/api/mysql/mysql_proxy/datahub/datahub.py
--- a/mindsdb/api/mysql/mysql_proxy/datahub/datahub.py
+++ b/mindsdb/api/mysql/mysql_proxy/datahub/datahub.py
@@ -1,14 +1,17 @@
from mindsdb.api.mysql.mysql_proxy.datahub.information_schema import InformationSchema
from mindsdb.api.mysql.mysql_proxy.datahub.datanodes.mindsdb_datanode import MindsDBDataNode
+from mindsdb.api.mysql.mysql_proxy.datahub.datanodes.datasource_datanode import DataSourceDataNode
def init_datahub(config):
- all_ds = config['api']['mysql'].get('datasources', [])
+ # TODO remove 'datasources' from config
+ # all_ds = config['api']['mysql'].get('datasources', [])
datahub = InformationSchema()
datahub.add({
- 'mindsdb': MindsDBDataNode(config)
+ 'mindsdb': MindsDBDataNode(config),
+ 'datasource': DataSourceDataNode(config)
})
return datahub
diff --git a/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/datasource_datanode.py b/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/datasource_datanode.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/datasource_datanode.py
@@ -0,0 +1,33 @@
+import json
+
+import pandas
+
+from mindsdb.api.mysql.mysql_proxy.datahub.datanodes.datanode import DataNode
+# from mindsdb.interfaces.native.mindsdb import MindsdbNative
+from mindsdb.interfaces.datastore.datastore import DataStore
+from mindsdb.integrations.clickhouse.clickhouse import Clickhouse
+from mindsdb.integrations.mariadb.mariadb import Mariadb
+
+
+class DataSourceDataNode(DataNode):
+ type = 'mindsdb-datasource'
+
+ def __init__(self, config):
+ self.config = config
+ self.datastore = DataStore(config)
+ # self.mindsdb_native = MindsdbNative(config)
+
+ def getTables(self):
+ dss = self.datastore.get_datasources()
+ return [x['name'] for x in dss]
+
+ def hasTable(self, table):
+ return table in self.getTables()
+
+ def getTableColumns(self, table):
+ ds = self.datastore.get_datasource(table)
+ return [x['name'] for x in ds['columns']]
+
+ def select(self, table, columns=None, where=None, where_data=None, order_by=None, group_by=None, came_from=None):
+ data = self.datastore.get_data(table, where=None, limit=None, offset=None)
+ return data['data']
diff --git a/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/mindsdb_datanode.py b/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/mindsdb_datanode.py
--- a/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/mindsdb_datanode.py
+++ b/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/mindsdb_datanode.py
@@ -7,6 +7,7 @@
from mindsdb.integrations.clickhouse.clickhouse import Clickhouse
from mindsdb.integrations.mariadb.mariadb import Mariadb
+
class MindsDBDataNode(DataNode):
type = 'mindsdb'
@@ -25,7 +26,7 @@ def hasTable(self, table):
def getTableColumns(self, table):
if table == 'predictors':
- return ['name', 'status', 'accuracy', 'predict', 'select_data_query', 'training_options']
+ return ['name', 'status', 'accuracy', 'predict', 'select_data_query', 'external_datasource', 'training_options']
if table == 'commands':
return ['command']
model = self.mindsdb_native.get_model_data(name=table)
@@ -41,6 +42,7 @@ def getTableColumns(self, table):
# TODO this should be added just for clickhouse queries
columns += ['select_data_query']
+ columns += ['external_datasource']
return columns
def _select_predictors(self):
@@ -51,6 +53,7 @@ def _select_predictors(self):
'accuracy': x['accuracy'],
'predict': ', '.join(x['predict']),
'select_data_query': x['data_source'],
+ 'external_datasource': '', # TODO
'training_options': '' # TODO ?
} for x in models]
@@ -63,27 +66,23 @@ def select(self, table, columns=None, where=None, where_data=None, order_by=None
if table == 'predictors':
return self._select_predictors()
+ external_datasource = None
+ if 'external_datasource' in where:
+ external_datasource = where['external_datasource']['$eq']
+ del where['external_datasource']
+
select_data_query = None
if came_from is not None and 'select_data_query' in where:
select_data_query = where['select_data_query']['$eq']
del where['select_data_query']
- '''
- @TODO (Urgent~ish)
-
- This is a horrible but function hack, however the proper way to do this is:
- 1. Figure out the alias of the database sending the query
- 2. Lookup the connection information in the config
- 3. Send that information + the query + a name (maybe the hash of the query or the query itself) to the Datastore API and ask it to create a datasource
-
- That way we also avoid making the same query twice and we don't use the database integrations (meant to sync predictors) in order to query data (the role of the mindsdb_native datasources / the datastore / data skillet)
- '''
- if came_from == 'clickhouse':
- ch = Clickhouse(self.config, 'default_clickhouse')
- res = ch._query(select_data_query.strip(' ;') + ' FORMAT JSON')
+ dbtype = self.config['integrations'][came_from]['type']
+ if dbtype == 'clickhouse':
+ ch = Clickhouse(self.config, came_from)
+ res = ch._query(select_data_query.strip(' ;\n') + ' FORMAT JSON')
data = res.json()['data']
- elif came_from == 'mariadb':
- maria = Mariadb(self.config, 'default_mariadb')
+ elif dbtype == 'mariadb':
+ maria = Mariadb(self.config, came_from)
data = maria._query(select_data_query)
if where_data is None:
@@ -110,13 +109,13 @@ def select(self, table, columns=None, where=None, where_data=None, order_by=None
original_target_values = {}
for col in predicted_columns:
- if type(where_data) == list:
- original_target_values[col + '_original'] = [None] * len(where_data)
- for row in where_data:
- if col in row:
- original_target_values[col + '_original'].append(row[col])
+ if where_data is not None:
+ if col in where_data:
+ original_target_values[col + '_original'] = list(where_data[col])
+ else:
+ original_target_values[col + '_original'] = [None] * len(where_data)
else:
- original_target_values[col + '_original'] = list(where_data[col])
+ original_target_values[col + '_original'] = [None]
res = self.mindsdb_native.predict(name=table, when_data=where_data)
@@ -140,6 +139,7 @@ def select(self, table, columns=None, where=None, where_data=None, order_by=None
row[key + '_min'] = explanation[key]['confidence_interval'][0]
row[key + '_max'] = explanation[key]['confidence_interval'][-1]
row['select_data_query'] = select_data_query
+ row['external_datasource'] = external_datasource
for k in original_target_values:
row[k] = original_target_values[k][i]
data.append(row)
diff --git a/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py b/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py
--- a/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py
+++ b/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py
@@ -28,11 +28,7 @@
from mindsdb.api.mysql.mysql_proxy.classes.sql_query import (
SQLQuery,
- TableWithoutDatasourceException,
- UndefinedColumnTableException,
- DuplicateTableNameException,
- NotImplementedError,
- SqlError
+ NotImplementedError
)
from mindsdb.api.mysql.mysql_proxy.libs.constants.mysql import (
@@ -75,6 +71,7 @@
default_store = None
mdb = None
datahub = None
+config = None
class MysqlProxy(SocketServer.BaseRequestHandler):
@@ -131,7 +128,7 @@ def isAuthOk(self, user, orig_user, password, orig_password):
log.error(traceback.format_exc())
def handshake(self):
- global HARDCODED_PASSWORD, HARDCODED_USER, CERT_PATH
+ global HARDCODED_PASSWORD, HARDCODED_USER, CERT_PATH, config
def switch_auth(method='mysql_native_password'):
self.packet(SwitchOutPacket, seed=self.salt, method=method).send()
@@ -150,8 +147,8 @@ def get_fast_auth_password():
try:
password = password_answer.password.value.decode()
except Exception:
- log.info(f'error: no password in Fast Auth answer')
- self.packet(ErrPacket, err_code=ERR.ER_PASSWORD_NO_MATCH, msg=f'Is not password in connection query.').send()
+ log.info('error: no password in Fast Auth answer')
+ self.packet(ErrPacket, err_code=ERR.ER_PASSWORD_NO_MATCH, msg='Is not password in connection query.').send()
return None
return password
@@ -193,12 +190,20 @@ def get_fast_auth_password():
handshake_resp = self.packet(HandshakeResponsePacket)
handshake_resp.get()
client_auth_plugin = handshake_resp.client_auth_plugin.value.decode()
-
+
username = handshake_resp.username.value.decode()
+ # if connect come from 'integration', then username will be like username_dbname
+ integration = ''
+ prefix = orig_username + '_'
+ if username.startswith(prefix):
+ integration = username[len(prefix):]
+ if len(integration) > 0 and integration in config['integrations']:
+ self.session.integration = integration
+ username = orig_username
if client_auth_plugin != DEFAULT_AUTH_METHOD:
if client_auth_plugin == 'mysql_native_password' and \
- orig_password == '' and len(handshake_resp.enc_password.value) == 0:
+ orig_password == '' and len(handshake_resp.enc_password.value) == 0:
switch_auth('mysql_native_password')
password = ''
else:
@@ -206,12 +211,12 @@ def get_fast_auth_password():
if new_method == 'caching_sha2_password' and self.session.is_ssl is False:
log.info(f'Check auth, user={username}, ssl={self.session.is_ssl}, auth_method={client_auth_plugin}: '
- 'error: cant switch to caching_sha2_password without SSL')
- self.packet(ErrPacket, err_code=ERR.ER_PASSWORD_NO_MATCH, msg=f'caching_sha2_password without SSL not supported').send()
+ 'error: cant switch to caching_sha2_password without SSL')
+ self.packet(ErrPacket, err_code=ERR.ER_PASSWORD_NO_MATCH, msg='caching_sha2_password without SSL not supported').send()
return False
log.info(f'Check auth, user={username}, ssl={self.session.is_ssl}, auth_method={client_auth_plugin}: '
- f'switch auth method to {new_method}')
+ f'switch auth method to {new_method}')
password = switch_auth(new_method)
if new_method == 'caching_sha2_password':
@@ -220,21 +225,21 @@ def get_fast_auth_password():
orig_password = orig_password_hash
elif orig_username == username and HARDCODED_PASSWORD == '':
log.info(f'Check auth, user={username}, ssl={self.session.is_ssl}, auth_method={client_auth_plugin}: '
- 'empty password')
+ 'empty password')
password = ''
elif 'caching_sha2_password' in client_auth_plugin:
log.info(f'Check auth, user={username}, ssl={self.session.is_ssl}, auth_method={client_auth_plugin}: '
- 'check auth using caching_sha2_password')
+ 'check auth using caching_sha2_password')
password = get_fast_auth_password()
orig_password = HARDCODED_PASSWORD
elif 'mysql_native_password' in client_auth_plugin:
log.info(f'Check auth, user={username}, ssl={self.session.is_ssl}, auth_method={client_auth_plugin}: '
- 'check auth using mysql_native_password')
+ 'check auth using mysql_native_password')
password = handshake_resp.enc_password.value
orig_password = orig_password_hash
else:
log.info(f'Check auth, user={username}, ssl={self.session.is_ssl}, auth_method={client_auth_plugin}: '
- 'unknown method, possible ERROR. Try to switch to mysql_native_password')
+ 'unknown method, possible ERROR. Try to switch to mysql_native_password')
password = switch_auth('mysql_native_password')
orig_password = orig_password_hash
@@ -243,7 +248,7 @@ def get_fast_auth_password():
except Exception:
self.session.database = None
log.info(f'Check auth, user={username}, ssl={self.session.is_ssl}, auth_method={client_auth_plugin}: '
- f'connecting to database {self.session.database}')
+ f'connecting to database {self.session.database}')
if self.isAuthOk(username, orig_username, password, orig_password):
self.packet(OkPacket).send()
@@ -296,9 +301,27 @@ def answerTableQuery(self, query):
self.sendPackageGroup(packages)
def insert_predictor_answer(self, sql):
- global mdb, default_store
+ global mdb, default_store, config
insert = SQLQuery.parse_insert(sql)
+ is_external_datasource = isinstance(insert.get('external_datasource'), str) and len(insert['external_datasource']) > 0
+ is_select_data_query = isinstance(insert.get('select_data_query'), str) and len(insert['select_data_query']) > 0
+
+ if is_external_datasource and is_select_data_query:
+ self.packet(
+ ErrPacket,
+ err_code=ERR.ER_WRONG_ARGUMENTS,
+ msg="'external_datasource' and 'select_data_query' should not be used in one query"
+ ).send()
+ return
+ elif is_external_datasource is False and is_select_data_query is False:
+ self.packet(
+ ErrPacket,
+ err_code=ERR.ER_WRONG_ARGUMENTS,
+ msg="in query should be 'external_datasource' or 'select_data_query'"
+ ).send()
+ return
+
models = mdb.get_models()
if insert['name'] in [x['name'] for x in models]:
self.packet(
@@ -310,10 +333,10 @@ def insert_predictor_answer(self, sql):
kwargs = {}
if isinstance(insert.get('training_options'), str) \
- and len(insert['training_options']) > 0:
+ and len(insert['training_options']) > 0:
try:
kwargs = json.loads(insert['training_options'])
- except Exception as e:
+ except Exception:
self.packet(
ErrPacket,
err_code=ERR.ER_WRONG_ARGUMENTS,
@@ -321,15 +344,15 @@ def insert_predictor_answer(self, sql):
).send()
return
- # TODO clickhouse with any type of used escaping sends escaped quotes as \'.
- # Need to check other clients, they behaviour can be differ
- insert['select_data_query'] = insert['select_data_query'].replace(r"\'", "'")
+ if is_select_data_query:
+ insert['select_data_query'] = insert['select_data_query'].replace(r"\'", "'")
+ ds_type = config['integrations'][self.session.integration]['type']
+ ds = default_store.save_datasource(insert['name'], ds_type, insert['select_data_query'])
+ elif is_external_datasource:
+ ds = default_store.get_datasource_obj(insert['external_datasource'], raw=True)
- db = sql.lower()[sql.lower().find('predictors_') + len('predictors_'):]
- db = db[:db.find(' ')].strip(' `')
- ds_type = db
- ds = default_store.save_datasource(insert['name'], ds_type, insert['select_data_query'])
insert['predict'] = [x.strip() for x in insert['predict'].split(',')]
+
mdb.learn(insert['name'], ds, insert['predict'], kwargs)
self.packet(OkPacket).send()
@@ -339,7 +362,7 @@ def delete_predictor_answer(self, sql):
fake_sql = sql.strip(' ')
fake_sql = 'select name ' + fake_sql[len('delete '):]
- query = SQLQuery(fake_sql)
+ query = SQLQuery(fake_sql, session=self.session)
result = query.fetch(datahub)
@@ -365,10 +388,10 @@ def handle_custom_command(self, sql):
insert = SQLQuery.parse_insert(sql)
if 'command' not in insert:
- self.packet(ErrPacket, err_code=ERR.ER_WRONG_ARGUMENTS, msg=f"command should be inserted").send()
+ self.packet(ErrPacket, err_code=ERR.ER_WRONG_ARGUMENTS, msg="command should be inserted").send()
return
if len(insert) > 1:
- self.packet(ErrPacket, err_code=ERR.ER_WRONG_ARGUMENTS, msg=f"only command should be inserted").send()
+ self.packet(ErrPacket, err_code=ERR.ER_WRONG_ARGUMENTS, msg="only command should be inserted").send()
return
command = insert['command'].strip(' ;').split()
@@ -449,15 +472,15 @@ def queryAnswer(self, sql):
self.answerShowCollation()
return
elif keyword == 'delete' and \
- ('mindsdb.predictors' in sql_lower or self.session.database == 'mindsdb' and 'predictors' in sql_lower):
+ ('mindsdb.predictors' in sql_lower or self.session.database == 'mindsdb' and 'predictors' in sql_lower):
self.delete_predictor_answer(sql)
return
elif keyword == 'insert' and \
- ('mindsdb.commands' in sql_lower or self.session.database == 'mindsdb' and 'commands' in sql_lower):
+ ('mindsdb.commands' in sql_lower or self.session.database == 'mindsdb' and 'commands' in sql_lower):
self.handle_custom_command(sql)
return
elif keyword == 'insert' and \
- ('mindsdb.predictors' in sql_lower or self.session.database == 'mindsdb' and 'predictors' in sql_lower):
+ ('mindsdb.predictors' in sql_lower or self.session.database == 'mindsdb' and 'predictors' in sql_lower):
self.insert_predictor_answer(sql)
return
elif keyword in ('update', 'insert'):
@@ -476,7 +499,7 @@ def queryAnswer(self, sql):
if 'database()' in sql_lower:
self.answerSelectDatabase()
return
- query = SQLQuery(sql, self.session.database)
+ query = SQLQuery(sql, session=self.session)
return self.selectAnswer(query)
elif keyword == 'rollback':
self.packet(OkPacket).send()
@@ -863,16 +886,18 @@ def packet(self, packetClass=Packet, **kwargs):
return p
@staticmethod
- def startProxy(config):
+ def startProxy(_config):
global HARDCODED_USER
global HARDCODED_PASSWORD
global CERT_PATH
global default_store
global mdb
global datahub
+ global config
"""
Create a server and wait for incoming connections until Ctrl-C
"""
+ config = _config
init_logger(config)
HARDCODED_USER = config['api']['mysql']['user']
diff --git a/mindsdb/integrations/clickhouse/clickhouse.py b/mindsdb/integrations/clickhouse/clickhouse.py
--- a/mindsdb/integrations/clickhouse/clickhouse.py
+++ b/mindsdb/integrations/clickhouse/clickhouse.py
@@ -1,5 +1,6 @@
import requests
-from mindsdb_native.libs.constants.mindsdb import DATA_TYPES, DATA_SUBTYPES
+from mindsdb_native.libs.constants.mindsdb import DATA_SUBTYPES
+
class Clickhouse():
def __init__(self, config, name):
@@ -41,12 +42,12 @@ def _query(self, query):
params = {'user': 'default'}
try:
params['user'] = self.config['integrations'][self.name]['user']
- except:
+ except Exception:
pass
try:
params['password'] = self.config['integrations'][self.name]['password']
- except:
+ except Exception:
pass
host = self.config['integrations'][self.name]['host']
@@ -59,35 +60,38 @@ def _query(self, query):
return response
+ def _get_mysql_user(self):
+ return f"{self.config['api']['mysql']['user']}_{self.name}"
+
def setup(self):
self._query('DROP DATABASE IF EXISTS mindsdb')
self._query('CREATE DATABASE IF NOT EXISTS mindsdb')
msqyl_conn = self.config['api']['mysql']['host'] + ':' + str(self.config['api']['mysql']['port'])
- msqyl_user = self.config['api']['mysql']['user']
msqyl_pass = self.config['api']['mysql']['password']
+ msqyl_user = self._get_mysql_user()
q = f"""
- CREATE TABLE IF NOT EXISTS mindsdb.predictors
- (name String,
+ CREATE TABLE IF NOT EXISTS mindsdb.predictors (
+ name String,
status String,
accuracy String,
predict String,
select_data_query String,
+ external_datasource String,
training_options String
- ) ENGINE=MySQL('{msqyl_conn}', 'mindsdb', 'predictors_clickhouse', '{msqyl_user}', '{msqyl_pass}')
+ ) ENGINE=MySQL('{msqyl_conn}', 'mindsdb', 'predictors', '{msqyl_user}', '{msqyl_pass}')
"""
self._query(q)
q = f"""
CREATE TABLE IF NOT EXISTS mindsdb.commands (
command String
- ) ENGINE=MySQL('{msqyl_conn}', 'mindsdb', 'commands_clickhouse', '{msqyl_user}', '{msqyl_pass}')
+ ) ENGINE=MySQL('{msqyl_conn}', 'mindsdb', 'commands', '{msqyl_user}', '{msqyl_pass}')
"""
self._query(q)
-
def register_predictors(self, model_data_arr):
for model_meta in model_data_arr:
name = model_meta['name']
@@ -96,6 +100,7 @@ def register_predictors(self, model_data_arr):
del stats['columns_to_ignore']
columns_sql = ','.join(self._to_clickhouse_table(stats, model_meta['predict']))
columns_sql += ',`select_data_query` Nullable(String)'
+ columns_sql += ',`external_datasource` Nullable(String)'
for col in model_meta['predict']:
columns_sql += f',`{col}_confidence` Nullable(Float64)'
if model_meta['data_analysis'][col]['typing']['data_type'] == 'Numeric':
@@ -104,13 +109,13 @@ def register_predictors(self, model_data_arr):
columns_sql += f',`{col}_explain` Nullable(String)'
msqyl_conn = self.config['api']['mysql']['host'] + ':' + str(self.config['api']['mysql']['port'])
- msqyl_user = self.config['api']['mysql']['user']
msqyl_pass = self.config['api']['mysql']['password']
+ msqyl_user = self._get_mysql_user()
q = f"""
CREATE TABLE mindsdb.{name}
({columns_sql}
- ) ENGINE=MySQL('{msqyl_conn}', 'mindsdb', '{name}_clickhouse', '{msqyl_user}', '{msqyl_pass}')
+ ) ENGINE=MySQL('{msqyl_conn}', 'mindsdb', {name}, '{msqyl_user}', '{msqyl_pass}')
"""
self._query(q)
@@ -120,7 +125,6 @@ def unregister_predictor(self, name):
"""
self._query(q)
-
def check_connection(self):
try:
res = self._query('select 1;')
diff --git a/mindsdb/integrations/mariadb/mariadb.py b/mindsdb/integrations/mariadb/mariadb.py
--- a/mindsdb/integrations/mariadb/mariadb.py
+++ b/mindsdb/integrations/mariadb/mariadb.py
@@ -1,8 +1,6 @@
-import requests
-
import mysql.connector
-from mindsdb_native.libs.constants.mindsdb import DATA_TYPES, DATA_SUBTYPES
+from mindsdb_native.libs.constants.mindsdb import DATA_SUBTYPES
class Mariadb():
@@ -35,7 +33,7 @@ def _to_mariadb_table(self, stats, predicted_cols):
column_declaration.append(f' `{name}` {new_type} ')
if name in predicted_cols:
column_declaration.append(f' `{name}_original` {new_type} ')
- except Exception as e:
+ except Exception:
print(f'Error: cant convert type {col_subtype} of column {name} to mariadb tpye')
return column_declaration
@@ -43,12 +41,12 @@ def _to_mariadb_table(self, stats, predicted_cols):
def _query(self, query):
con = mysql.connector.connect(host=self.config['integrations'][self.name]['host'], port=self.config['integrations'][self.name]['port'], user=self.config['integrations'][self.name]['user'], password=self.config['integrations'][self.name]['password'])
- cur = con.cursor(dictionary=True,buffered=True)
+ cur = con.cursor(dictionary=True, buffered=True)
cur.execute(query)
res = True
try:
res = cur.fetchall()
- except:
+ except Exception:
pass
con.commit()
con.close()
@@ -56,7 +54,7 @@ def _query(self, query):
return res
def _get_connect_string(self, table):
- user = self.config['api']['mysql']['user']
+ user = f"{self.config['api']['mysql']['user']}_{self.name}"
password = self.config['api']['mysql']['password']
host = self.config['api']['mysql']['host']
port = self.config['api']['mysql']['port']
@@ -73,7 +71,7 @@ def setup(self):
self._query('CREATE DATABASE IF NOT EXISTS mindsdb')
- connect = self._get_connect_string('predictors_mariadb')
+ connect = self._get_connect_string('predictors')
q = f"""
CREATE TABLE IF NOT EXISTS mindsdb.predictors
@@ -82,12 +80,13 @@ def setup(self):
accuracy VARCHAR(500),
predict VARCHAR(500),
select_data_query VARCHAR(500),
+ external_datasource VARCHAR(500),
training_options VARCHAR(500)
) ENGINE=CONNECT TABLE_TYPE=MYSQL CONNECTION='{connect}';
"""
self._query(q)
- connect = self._get_connect_string('commands_mariadb')
+ connect = self._get_connect_string('commands')
q = f"""
CREATE TABLE IF NOT EXISTS mindsdb.commands (
@@ -102,6 +101,7 @@ def register_predictors(self, model_data_arr):
stats = model_meta['data_analysis']
columns_sql = ','.join(self._to_mariadb_table(stats, model_meta['predict']))
columns_sql += ',`select_data_query` varchar(500)'
+ columns_sql += ',`external_datasource` varchar(500)'
for col in model_meta['predict']:
columns_sql += f',`{col}_confidence` double'
if model_meta['data_analysis'][col]['typing']['data_type'] == 'Numeric':
@@ -109,7 +109,7 @@ def register_predictors(self, model_data_arr):
columns_sql += f',`{col}_max` double'
columns_sql += f',`{col}_explain` varchar(500)'
- connect = self._get_connect_string(f'{name}_mariadb')
+ connect = self._get_connect_string(name)
q = f"""
CREATE TABLE mindsdb.{name}
diff --git a/mindsdb/interfaces/datastore/datastore.py b/mindsdb/interfaces/datastore/datastore.py
--- a/mindsdb/interfaces/datastore/datastore.py
+++ b/mindsdb/interfaces/datastore/datastore.py
@@ -14,8 +14,6 @@
from mindsdb.interfaces.datastore.sqlite_helpers import *
from mindsdb.interfaces.native.mindsdb import MindsdbNative
from mindsdb_native import FileDS, ClickhouseDS, MariaDS
-from mindsdb.interfaces.datastore.sqlite_helpers import create_sqlite_db
-
class DataStore():
def __init__(self, config, storage_dir=None):
@@ -78,7 +76,6 @@ def save_datasource(self, name, source_type, source, file_path=None):
ds_dir = os.path.join(ds_meta_dir, 'datasource')
os.mkdir(ds_dir)
- print(source_type)
if source_type == 'file':
source = os.path.join(ds_dir, source)
os.replace(file_path, source)
diff --git a/mindsdb/interfaces/native/mindsdb.py b/mindsdb/interfaces/native/mindsdb.py
--- a/mindsdb/interfaces/native/mindsdb.py
+++ b/mindsdb/interfaces/native/mindsdb.py
@@ -71,9 +71,9 @@ def rename_model(self, name, new_name):
self.dbw.register_predictors(new_name)
def load_model(self, fpath):
- F.load_model(model_archive_path=fpath)
+ F.import_model(model_archive_path=fpath)
# @TODO How do we figure out the name here ?
#dbw.register_predictor(...)
def export_model(self,name):
- F.export_model(model_name=name)
+ F.export_predictor(model_name=name)
diff --git a/mindsdb/utilities/config.py b/mindsdb/utilities/config.py
--- a/mindsdb/utilities/config.py
+++ b/mindsdb/utilities/config.py
@@ -19,7 +19,7 @@ def __init__(self, config_path):
def _read(self):
if isinstance(self.config_path, str) and os.path.isfile(self.config_path):
with open(self.config_path, 'r') as fp:
- self._config = config = json.load(fp)
+ self._config = json.load(fp)
else:
raise TypeError('`self.config_path` must be a string representing a local file path to a json config')
@@ -46,12 +46,40 @@ def get(self, key, default=None):
def get_all(self):
return self._config
- def set(self, key_chain, value):
- pass
+ def set(self, key_chain, value, delete=False):
+ with open(self.config_path, 'r') as fp:
+ self._config = json.load(fp)
+
+ c = self._config
+ for i, k in enumerate(key_chain):
+ if k in c and i+1 < len(key_chain):
+ c = c[k]
+ elif k not in c and i+1 < len(key_chain):
+ c[k] = {}
+ c = c[k]
+ else:
+ if delete:
+ del c[k]
+ else:
+ c[k] = value
+
+ with open(self.config_path, 'w') as fp:
+ json.dump(self._config, fp, indent=4, sort_keys=True)
# Higher level interface
def add_db_integration(self, name, dict):
- pass
+ if 'enabled' not in dict:
+ dict['enabled'] = True
+
+ self.set(['integrations', name], dict)
def modify_db_integration(self, name, dict):
- pass
+ old_dict = self._config['integrations'][name]
+ for k in old_dict:
+ if k not in dict:
+ dict[k] = old_dict[k]
+
+ self.add_db_integration(name, dict)
+
+ def remove_db_integration(self, name):
+ self.set(['integrations', name], None, True)
| diff --git a/requirements_test.txt b/requirements_test.txt
--- a/requirements_test.txt
+++ b/requirements_test.txt
@@ -1,3 +1,4 @@
pytest>=3.3.2
pytest-randomly>=3.3.1
-pytest-ordering >= 0.6
+pytest-ordering>=0.6
+docker>=4.2.2
diff --git a/tests/integration_tests/api/test_http.py b/tests/integration_tests/api/test_http.py
--- a/tests/integration_tests/api/test_http.py
+++ b/tests/integration_tests/api/test_http.py
@@ -2,6 +2,7 @@
import time
import os
import signal
+import psutil
from random import randint
import unittest
@@ -14,6 +15,7 @@
pred_name = f'hr_predictor_{rand}'
root = 'http://localhost:47334'
+
class HTTPTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
@@ -24,7 +26,7 @@ def setUpClass(cls):
res = requests.get(f'{root}/util/ping')
if res.status_code != 200:
raise Exception('')
- except:
+ except Exception:
time.sleep(1)
if i == 19:
raise Exception("Can't connect !")
@@ -32,11 +34,68 @@ def setUpClass(cls):
@classmethod
def tearDownClass(cls):
try:
+ conns = psutil.net_connections()
+ pid = [x.pid for x in conns if x.status == 'LISTEN' and x.laddr[1] == 47334 and x.pid is not None]
+ if len(pid) > 0:
+ os.kill(pid[0], 9)
cls.sp.kill()
- except:
+ except Exception:
pass
- def test_1_put_ds(self):
+ def test_1_config(self):
+ res = requests.get(f'{root}/config/integrations')
+ assert res.status_code == 200
+ integration_names = res.json()
+ assert set(integration_names['integrations']) == set(['default_mariadb', 'default_clickhouse'])
+
+ test_integration_data = {'enabled': False, 'host':'test'}
+ res = requests.put(f'{root}/config/integrations/test_integration', json={'params':test_integration_data})
+ assert res.status_code == 200
+
+ res = requests.get(f'{root}/config/integrations/test_integration')
+ assert res.status_code == 200
+ test_integration = res.json()
+ assert len(test_integration) == 2
+
+ res = requests.delete(f'{root}/config/integrations/test_integration')
+ assert res.status_code == 200
+
+ res = requests.get(f'{root}/config/integrations/test_integration')
+ assert res.status_code != 200
+
+ for k in test_integration_data:
+ assert test_integration[k] == test_integration_data[k]
+
+ for name in ['default_mariadb', 'default_clickhouse']:
+ # Get the original
+ res = requests.get(f'{root}/config/integrations/{name}')
+ assert res.status_code == 200
+
+ integration = res.json()
+ for k in ['enabled','host','port','password','type','user']:
+ assert k in integration
+ assert integration[k] is not None
+
+ # Modify it
+ res = requests.post(f'{root}/config/integrations/{name}', json={'params':{'password':'test'}})
+
+ res = requests.get(f'{root}/config/integrations/{name}')
+ assert res.status_code == 200
+ modified_integration = res.json()
+ assert modified_integration['password'] == 'test'
+ for k in integration:
+ if k != 'password':
+ assert modified_integration[k] == integration[k]
+
+ # Put the original values back in
+ res = requests.post(f'{root}/config/integrations/{name}', json={'params':integration})
+ res = requests.get(f'{root}/config/integrations/{name}')
+ assert res.status_code == 200
+ modified_integration = res.json()
+ for k in integration:
+ assert modified_integration[k] == integration[k]
+
+ def test_2_put_ds(self):
# PUT datasource
params = {
'name': ds_name,
@@ -47,7 +106,7 @@ def test_1_put_ds(self):
res = requests.put(url, json=params)
assert res.status_code == 200
- def test_2_analyze(self):
+ def test_3_analyze(self):
response = requests.get(f'{root}/datasources/{ds_name}/analyze')
assert response.status_code == 200
@@ -74,7 +133,7 @@ def test_3_put_predictor(self):
assert isinstance(res.json()[0]['rental_price']['predicted_value'],float)
assert res.status_code == 200
- def test_3_datasources(self):
+ def test_4_datasources(self):
"""
Call list datasources endpoint
THEN check the response is success
@@ -82,7 +141,7 @@ def test_3_datasources(self):
response = requests.get(f'{root}/datasources/')
assert response.status_code == 200
- def test_4_datasource_not_found(self):
+ def test_5_datasource_not_found(self):
"""
Call unexisting datasource
then check the response is NOT FOUND
@@ -90,7 +149,7 @@ def test_4_datasource_not_found(self):
response = requests.get(f'{root}/datasource/dummy_source')
assert response.status_code == 404
- def test_5_ping(self):
+ def test_6_ping(self):
"""
Call utilities ping endpoint
THEN check the response is success
@@ -98,7 +157,7 @@ def test_5_ping(self):
response = requests.get(f'{root}/util/ping')
assert response.status_code == 200
- def test_6_predictors(self):
+ def test_7_predictors(self):
"""
Call list predictors endpoint
THEN check the response is success
@@ -106,7 +165,7 @@ def test_6_predictors(self):
response = requests.get(f'{root}/predictors/')
assert response.status_code == 200
- def test_6_predictor_not_found(self):
+ def test_8_predictor_not_found(self):
"""
Call unexisting predictor
then check the response is NOT FOUND
@@ -115,4 +174,4 @@ def test_6_predictor_not_found(self):
assert response.status_code == 404
if __name__ == '__main__':
- unittest.main()
+ unittest.main(failfast=True)
diff --git a/tests/integration_tests/common.py b/tests/integration_tests/common.py
deleted file mode 100755
--- a/tests/integration_tests/common.py
+++ /dev/null
@@ -1,46 +0,0 @@
-import psutil
-import time
-import pathlib
-import os
-import json
-
-def is_port_in_use(port_num):
- portsinuse = []
- conns = psutil.net_connections()
- portsinuse = [x.laddr[1] for x in conns if x.status == 'LISTEN']
- portsinuse.sort()
- return int(port_num) in portsinuse
-
-def wait_port(port_num, timeout):
- start_time = time.time()
-
- in_use = is_port_in_use(port_num)
- while in_use is False and (time.time() - start_time) < timeout:
- time.sleep(2)
- in_use = is_port_in_use(port_num)
-
- return in_use
-
-def prepare_config(config):
- for key in config._config['integrations'].keys():
- config._config['integrations'][key]['enabled'] = key == 'default_mariadb'
-
- TEMP_DIR = pathlib.Path(__file__).parent.absolute().joinpath('../temp/').resolve()
- TEMP_DIR.mkdir(parents=True, exist_ok=True)
-
- config.merge({
- 'interface': {
- 'datastore': {
- 'storage_dir': str(TEMP_DIR.joinpath('datastore/'))
- },
- 'mindsdb_native': {
- 'storage_dir': str(TEMP_DIR.joinpath('predictors/'))
- }
- }
- })
-
- temp_config_path = str(TEMP_DIR.joinpath('config.json').resolve())
- with open(temp_config_path, 'wt') as f:
- f.write(json.dumps(config._config))
-
- return temp_config_path
diff --git a/tests/integration_tests/flows/common.py b/tests/integration_tests/flows/common.py
new file mode 100755
--- /dev/null
+++ b/tests/integration_tests/flows/common.py
@@ -0,0 +1,89 @@
+import psutil
+import shutil
+import time
+import pathlib
+import json
+import docker
+import subprocess
+
+from mindsdb.interfaces.database.database import DatabaseWrapper
+
+TEST_CONFIG = 'tests/integration_tests/flows/config/config.json'
+
+TESTS_ROOT = pathlib.Path(__file__).parent.absolute().joinpath('../../').resolve()
+
+START_TIMEOUT = 15
+
+OUTPUT = None # [None|subprocess.DEVNULL]
+
+TEMP_DIR = pathlib.Path(__file__).parent.absolute().joinpath('../../temp/').resolve()
+TEMP_DIR.mkdir(parents=True, exist_ok=True)
+
+
+def is_port_in_use(port_num):
+ portsinuse = []
+ conns = psutil.net_connections()
+ portsinuse = [x.laddr[1] for x in conns if x.status == 'LISTEN']
+ portsinuse.sort()
+ return int(port_num) in portsinuse
+
+
+def wait_port(port_num, timeout):
+ start_time = time.time()
+
+ in_use = is_port_in_use(port_num)
+ while in_use is False and (time.time() - start_time) < timeout:
+ time.sleep(2)
+ in_use = is_port_in_use(port_num)
+
+ return in_use
+
+
+def wait_api_ready(config):
+ port_num = config['api']['mysql']['port']
+ api_ready = wait_port(port_num, START_TIMEOUT)
+ return api_ready
+
+
+def wait_db(config, db_name):
+ m = DatabaseWrapper(config)
+
+ start_time = time.time()
+
+ connected = m.check_connections()[db_name]
+
+ while not connected and (time.time() - start_time) < START_TIMEOUT:
+ time.sleep(2)
+ connected = m.check_connections()[db_name]
+
+ return connected
+
+
+def prepare_config(config, db):
+ for key in config._config['integrations'].keys():
+ config._config['integrations'][key]['enabled'] = key == db
+
+ datastore_dir = TEMP_DIR.joinpath('datastore/')
+ if datastore_dir.exists():
+ shutil.rmtree(datastore_dir)
+ datastore_dir.mkdir(parents=True, exist_ok=True)
+ mindsdb_native_dir = TEMP_DIR.joinpath('predictors/')
+ if mindsdb_native_dir.exists():
+ shutil.rmtree(mindsdb_native_dir)
+ mindsdb_native_dir.mkdir(parents=True, exist_ok=True)
+
+ config['interface']['datastore']['storage_dir'] = str(datastore_dir)
+ config['interface']['mindsdb_native']['storage_dir'] = str(mindsdb_native_dir)
+
+ temp_config_path = str(TEMP_DIR.joinpath('config.json').resolve())
+ with open(temp_config_path, 'wt') as f:
+ f.write(json.dumps(config._config))
+
+ return temp_config_path
+
+
+def is_container_run(name):
+ docker_client = docker.from_env()
+ containers = docker_client.containers.list()
+ containers = [x.name for x in containers if x.status == 'running']
+ return name in containers
diff --git a/tests/integration_tests/flows/config/config.json b/tests/integration_tests/flows/config/config.json
--- a/tests/integration_tests/flows/config/config.json
+++ b/tests/integration_tests/flows/config/config.json
@@ -5,7 +5,7 @@
"port": "47334"
},
"mysql": {
- "certificate_path": "config/cert.pem",
+ "certificate_path": "tests/integration_tests/flows/config/cert.pem",
"datasources": [],
"host": "127.0.0.1",
"log": {
@@ -34,7 +34,7 @@
"default_mariadb": {
"enabled": true,
"host": "localhost",
- "password": "",
+ "password": "root",
"port": 3306,
"type": "mariadb",
"user": "root"
diff --git a/tests/integration_tests/flows/test_clickhouse.py b/tests/integration_tests/flows/test_clickhouse.py
--- a/tests/integration_tests/flows/test_clickhouse.py
+++ b/tests/integration_tests/flows/test_clickhouse.py
@@ -1,41 +1,53 @@
-from subprocess import Popen
-import time
-import os
-import signal
-import random
-import string
-import requests
-
import unittest
+import requests
+import os
+import csv
+import inspect
+import subprocess
+import atexit
+from mindsdb.interfaces.native.mindsdb import MindsdbNative
from mindsdb.utilities.config import Config
-import mindsdb
-
-rand = ''.join(random.choice(string.ascii_uppercase) for _ in range(11))
-ds_name = f'default.hr_ds_{rand}'
-pred_name = f'hr_predictor_{rand}'
-
-# Can't be a fixture since it's used in setup/teardown
-root = 'http://localhost:47334'
-
-def set_get_config_path():
- os.environ['DEV_CONFIG_PATH'] = 'config'
- return os.environ['DEV_CONFIG_PATH'] + '/config.json'
-
-def query_ch(query, database='default'):
- config = Config(set_get_config_path())
- add = ' FORMAT JSON'
- for ele in ['drop ', 'create ','insert ', 'show ']:
- if ele in query.lower():
- add = ''
- query += add
-
- connect_string = 'http://{}:{}'.format(
- config['integrations']['default_clickhouse']['host'],
- config['integrations']['default_clickhouse']['port']
- )
- params = {'user': config['integrations']['default_clickhouse']['user'], 'password': config['integrations']['default_clickhouse']['password'], 'database': database}
+from common import (
+ wait_api_ready,
+ prepare_config,
+ wait_db,
+ is_container_run,
+ TEST_CONFIG,
+ TESTS_ROOT,
+ OUTPUT
+)
+
+TEST_CSV = {
+ 'name': 'home_rentals.csv',
+ 'url': 'https://s3.eu-west-2.amazonaws.com/mindsdb-example-data/home_rentals.csv'
+}
+TEST_DATA_TABLE = 'home_rentals'
+TEST_PREDICTOR_NAME = 'test_predictor'
+
+config = Config(TEST_CONFIG)
+
+
+def query(query):
+ if 'CREATE ' not in query.upper() and 'INSERT ' not in query.upper():
+ query += ' FORMAT JSON'
+
+ host = config['integrations']['default_clickhouse']['host']
+ port = config['integrations']['default_clickhouse']['port']
+
+ connect_string = f'http://{host}:{port}'
+
+ params = {'user': 'default'}
+ try:
+ params['user'] = config['integrations']['default_clickhouse']['user']
+ except Exception:
+ pass
+
+ try:
+ params['password'] = config['integrations']['default_clickhouse']['password']
+ except Exception:
+ pass
res = requests.post(
connect_string,
@@ -44,92 +56,254 @@ def query_ch(query, database='default'):
)
if res.status_code != 200:
- print(f'Error in query: {query}')
+ print(f'ERROR: code={res.status_code} msg={res.text}')
+ raise Exception()
if ' FORMAT JSON' in query:
- return res.json()['data']
- else:
- return res.text
+ res = res.json()['data']
+
+ return res
+
+
+def stop_clickhouse():
+ ch_sp = subprocess.Popen(
+ ['./cli.sh', 'clickhouse-stop'],
+ cwd=TESTS_ROOT.joinpath('docker/').resolve(),
+ stdout=OUTPUT,
+ stderr=OUTPUT
+ )
+ ch_sp.wait()
+
class ClickhouseTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
- set_get_config_path()
-
- cls.sp = Popen(['python3', '-m', 'mindsdb'], close_fds=True)
-
- for i in range(20):
- try:
- res = requests.get(f'{root}/util/ping')
- if res.status_code != 200:
- raise Exception('')
- except:
- time.sleep(1)
- if i == 19:
- raise Exception("Can't connect !")
-
- #query_ch('DROP DATABASE mindsdb')
- query_ch(f"""
- CREATE TABLE {ds_name} (number_of_rooms String, number_of_bathrooms String, sqft Int64, location String, days_on_market Int64, initial_price Int64, neighborhood String, rental_price Float64) ENGINE=URL('https://raw.githubusercontent.com/mindsdb/mindsdb-examples/master/benchmarks/home_rentals/dataset/train.csv', CSVWithNames)
- """)
+ temp_config_path = prepare_config(config, 'default_clickhouse')
- @classmethod
- def tearDownClass(cls):
- try:
- pgrp = os.getpgid(cls.sp.pid)
- os.killpg(pgrp, signal.SIGINT)
- os.remove(set_get_config_path())
- os.system('fuser -k 47335/tcp ; fuser -k 47334/tcp')
- except:
- pass
-
- def test_1_setup(self):
- result = query_ch(f"show tables", 'mindsdb')
- assert 'predictors' in result
- assert 'commands' in result
-
- def test_2_learn(self):
- q = f"""
- insert into mindsdb.predictors
- (name, predict_cols, select_data_query, training_options)
- values (
- '{pred_name}',
- 'rental_price',
- 'SELECT * FROM {ds_name} LIMIT 400',
- '{{"stop_training_in_x_seconds": 10}}'
+ if is_container_run('clickhouse-test') is False:
+ subprocess.Popen(
+ ['./cli.sh', 'clickhouse'],
+ cwd=TESTS_ROOT.joinpath('docker/').resolve(),
+ stdout=OUTPUT,
+ stderr=OUTPUT
+ )
+ atexit.register(stop_clickhouse)
+ clickhouse_ready = wait_db(config, 'default_clickhouse')
+
+ if clickhouse_ready:
+ sp = subprocess.Popen(
+ ['python3', '-m', 'mindsdb', '--api', 'mysql', '--config', temp_config_path],
+ stdout=OUTPUT,
+ stderr=OUTPUT
)
- """
- result = query_ch(q)
- time.sleep(40)
- for i in range(40):
- try:
- result = query_ch(f"SELECT name FROM mindsdb.predictors where name='{pred_name}'")
- if result[0]['name'] != pred_name:
- raise Exception('not ready yet !')
- except:
- time.sleep(1)
- if i == 39:
- raise Exception("Can't get predictor !")
-
- result = query_ch(f"show tables", 'mindsdb')
- assert pred_name in result
-
- def test_3_predict_from_where(self):
- result = query_ch(f"SELECT rental_price FROM mindsdb.{pred_name} where sqft=1000 and location='good'")
- assert len(result) == 1
- assert 'rental_price' in result[0]
-
- def test_4_predict_from_query(self):
- len_ds = query_ch(f'SELECT COUNT(*) as len from {ds_name}')[0]['len']
- result = query_ch(f""" SELECT rental_price, rental_price_explain, rental_price_confidence, rental_price_max, rental_price_min FROM mindsdb.{pred_name} where select_data_query=='SELECT * FROM {ds_name}' """)
-
- assert int(len(result)) == int(len_ds)
- for res in result:
- assert 'rental_price' in res
- assert 'rental_price_explain' in res
- assert 'rental_price_confidence' in res
- assert 'rental_price_max' in res
- assert 'rental_price_min' in res
-
-if __name__ == '__main__':
- unittest.main()
+ atexit.register(sp.kill)
+
+ api_ready = clickhouse_ready and wait_api_ready(config)
+
+ if api_ready is False:
+ print(f'Failed by timeout. ClickHouse started={clickhouse_ready} MindsDB started={api_ready}')
+ raise Exception()
+
+ cls.mdb = MindsdbNative(config)
+
+ models = cls.mdb.get_models()
+ models = [x['name'] for x in models]
+ if TEST_PREDICTOR_NAME in models:
+ cls.mdb.delete_model(TEST_PREDICTOR_NAME)
+
+ query('create database if not exists test')
+ test_tables = query('show tables from test')
+ test_tables = [x['name'] for x in test_tables]
+ if TEST_DATA_TABLE not in test_tables:
+ print('creating test data table...')
+ query(f'''
+ CREATE TABLE test.{TEST_DATA_TABLE} (
+ id Int16,
+ number_of_rooms Int8,
+ number_of_bathrooms Int8,
+ sqft Int32,
+ location String,
+ days_on_market Int16,
+ initial_price Int32,
+ neighborhood String,
+ rental_price Int32
+ ) ENGINE = MergeTree()
+ ORDER BY id
+ PARTITION BY location
+ ''')
+
+ test_csv_path = str(TESTS_ROOT.joinpath('temp/', TEST_CSV['name']).resolve())
+ if os.path.isfile(test_csv_path) is False:
+ r = requests.get(TEST_CSV['url'])
+ with open(test_csv_path, 'wb') as f:
+ f.write(r.content)
+
+ with open(test_csv_path) as f:
+ csvf = csv.reader(f)
+ i = 0
+ for row in csvf:
+ if i > 0:
+ number_of_rooms = int(row[0])
+ number_of_bathrooms = int(row[1])
+ sqft = int(float(row[2].replace(',', '.')))
+ location = str(row[3])
+ days_on_market = int(row[4])
+ initial_price = int(row[5])
+ neighborhood = str(row[6])
+ rental_price = int(float(row[7]))
+ query(f'''INSERT INTO test.{TEST_DATA_TABLE} VALUES (
+ {i},
+ {number_of_rooms},
+ {number_of_bathrooms},
+ {sqft},
+ '{location}',
+ {days_on_market},
+ {initial_price},
+ '{neighborhood}',
+ {rental_price}
+ )''')
+ i += 1
+ print('done')
+
+ def test_1_initial_state(self):
+ print(f'\nExecuting {inspect.stack()[0].function}')
+ print('Check all testing objects not exists')
+
+ print(f'Predictor {TEST_PREDICTOR_NAME} not exists')
+ models = [x['name'] for x in self.mdb.get_models()]
+ self.assertTrue(TEST_PREDICTOR_NAME not in models)
+
+ print('Test datasource exists')
+ test_tables = query('show tables from test')
+ test_tables = [x['name'] for x in test_tables]
+ self.assertTrue(TEST_DATA_TABLE in test_tables)
+
+ print('Test predictor table not exists')
+ mindsdb_tables = query('show tables from mindsdb')
+ mindsdb_tables = [x['name'] for x in mindsdb_tables]
+ self.assertTrue(TEST_PREDICTOR_NAME not in mindsdb_tables)
+
+ print('mindsdb.predictors table exists')
+ self.assertTrue('predictors' in mindsdb_tables)
+
+ print('mindsdb.commands table exists')
+ self.assertTrue('commands' in mindsdb_tables)
+
+ def test_2_insert_predictor(self):
+ print(f'\nExecuting {inspect.stack()[0].function}')
+ query(f"""
+ insert into mindsdb.predictors (name, predict, select_data_query, training_options) values
+ (
+ '{TEST_PREDICTOR_NAME}',
+ 'rental_price, location',
+ 'select * from test.{TEST_DATA_TABLE} limit 100',
+ '{{"join_learn_process": true, "stop_training_in_x_seconds": 3}}'
+ );
+ """)
+
+ print('predictor record in mindsdb.predictors')
+ res = query(f"select status from mindsdb.predictors where name = '{TEST_PREDICTOR_NAME}'")
+ self.assertTrue(len(res) == 1)
+ self.assertTrue(res[0]['status'] == 'complete')
+
+ print('predictor table in mindsdb db')
+ mindsdb_tables = query('show tables from mindsdb')
+ mindsdb_tables = [x['name'] for x in mindsdb_tables]
+ self.assertTrue(TEST_PREDICTOR_NAME in mindsdb_tables)
+
+ def test_3_insert_predictor_with_existing_ds(self):
+ name = f'{TEST_PREDICTOR_NAME}_2'
+ models = self.mdb.get_models()
+ models = [x['name'] for x in models]
+ if name in models:
+ self.mdb.delete_model(name)
+
+ query(f"""
+ insert into mindsdb.predictors (name, predict, external_datasource, training_options) values
+ (
+ '{name}',
+ 'rental_price, location',
+ '{TEST_PREDICTOR_NAME}',
+ '{{"join_learn_process": true, "stop_training_in_x_seconds": 3}}'
+ );
+ """)
+
+ print('predictor record in mindsdb.predictors')
+ res = query(f"select status from mindsdb.predictors where name = '{name}'")
+ self.assertTrue(len(res) == 1)
+ self.assertTrue(res[0]['status'] == 'complete')
+
+ print('predictor table in mindsdb db')
+ mindsdb_tables = query('show tables from mindsdb')
+ mindsdb_tables = [x['name'] for x in mindsdb_tables]
+ self.assertTrue(name in mindsdb_tables)
+
+ def test_4_query_predictor(self):
+ print(f'\nExecuting {inspect.stack()[0].function}')
+ res = query(f"""
+ select
+ rental_price, location, sqft, number_of_rooms,
+ rental_price_confidence, rental_price_min, rental_price_max, rental_price_explain
+ from
+ mindsdb.{TEST_PREDICTOR_NAME} where sqft=1000
+ """)
+
+ print('check result')
+ self.assertTrue(len(res) == 1)
+
+ res = res[0]
+
+ self.assertTrue(res['rental_price'] is not None and res['rental_price'] != 'None')
+ self.assertTrue(res['location'] is not None and res['location'] != 'None')
+ # NOTE in current Clickhouse all int fields returns as strings
+ self.assertTrue(res['sqft'] == '1000')
+ self.assertIsInstance(res['rental_price_confidence'], float)
+ self.assertIsInstance(res['rental_price_min'], int)
+ self.assertIsInstance(res['rental_price_max'], int)
+ self.assertIsInstance(res['rental_price_explain'], str)
+ self.assertTrue(res['number_of_rooms'] == 'None' or res['number_of_rooms'] is None)
+
+ def test_5_range_query(self):
+ print(f'\nExecuting {inspect.stack()[0].function}')
+
+ results = query(f"""
+ select
+ rental_price, location, sqft, number_of_rooms,
+ rental_price_confidence, rental_price_min, rental_price_max, rental_price_explain
+ from
+ mindsdb.{TEST_PREDICTOR_NAME} where select_data_query='select * from test.{TEST_DATA_TABLE} limit 3'
+ """)
+
+ print('check result')
+ self.assertTrue(len(results) == 3)
+ for res in results:
+ self.assertTrue(res['rental_price'] is not None and res['rental_price'] != 'None')
+ self.assertTrue(res['location'] is not None and res['location'] != 'None')
+ self.assertIsInstance(res['rental_price_confidence'], float)
+ self.assertIsInstance(res['rental_price_min'], int)
+ self.assertIsInstance(res['rental_price_max'], int)
+ self.assertIsInstance(res['rental_price_explain'], str)
+
+ def test_6_delete_predictor_by_command(self):
+ print(f'\nExecuting {inspect.stack()[0].function}')
+
+ query(f"""
+ insert into mindsdb.commands values ('delete predictor {TEST_PREDICTOR_NAME}');
+ """)
+
+ print(f'Predictor {TEST_PREDICTOR_NAME} not exists')
+ models = [x['name'] for x in self.mdb.get_models()]
+ self.assertTrue(TEST_PREDICTOR_NAME not in models)
+
+ print('Test predictor table not exists')
+ mindsdb_tables = query('show tables from mindsdb')
+ mindsdb_tables = [x['name'] for x in mindsdb_tables]
+ self.assertTrue(TEST_PREDICTOR_NAME not in mindsdb_tables)
+
+
+if __name__ == "__main__":
+ try:
+ unittest.main(failfast=True)
+ print('Tests passed!')
+ except Exception as e:
+ print(f'Tests Failed!\n{e}')
diff --git a/tests/integration_tests/flows/_test_mariadb.py b/tests/integration_tests/flows/test_mariadb.py
similarity index 62%
rename from tests/integration_tests/flows/_test_mariadb.py
rename to tests/integration_tests/flows/test_mariadb.py
--- a/tests/integration_tests/flows/_test_mariadb.py
+++ b/tests/integration_tests/flows/test_mariadb.py
@@ -2,24 +2,24 @@
import requests
import os
import csv
-import time
import inspect
import subprocess
-import pathlib
import atexit
-import json
import mysql.connector
from mindsdb.interfaces.native.mindsdb import MindsdbNative
from mindsdb.utilities.config import Config
-from mindsdb.interfaces.database.database import DatabaseWrapper
-from common import wait_port, prepare_config
-
-TEST_CONFIG = '/path_to/config.json'
-
-START_TIMEOUT = 15
+from common import (
+ wait_api_ready,
+ prepare_config,
+ wait_db,
+ is_container_run,
+ TEST_CONFIG,
+ TESTS_ROOT,
+ OUTPUT
+)
TEST_CSV = {
'name': 'home_rentals.csv',
@@ -30,6 +30,7 @@
config = Config(TEST_CONFIG)
+
def query(q, as_dict=False):
con = mysql.connector.connect(
host=config['integrations']['default_mariadb']['host'],
@@ -44,83 +45,52 @@ def query(q, as_dict=False):
res = True
try:
res = cur.fetchall()
- except:
+ except Exception:
pass
con.commit()
con.close()
return res
-def create_churn_dataset(self):
- for mode in ['train','test']:
- os.system(f'rm {test_csv}')
- cls.mdb = MindsdbNative(config)
-
- if os.path.isfile(test_csv) is False:
- r = requests.get(f"https://raw.githubusercontent.com/mindsdb/mindsdb-examples/master/benchmarks/churn/dataset/{mode}.csv")
- with open(test_csv, 'wb') as f:
- f.write(r.content)
- models = cls.mdb.get_models()
- models = [x['name'] for x in models]
- if TEST_PREDICTOR_NAME in models:
- cls.mdb.delete_model(TEST_PREDICTOR_NAME)
+def stop_mariadb():
+ maria_sp = subprocess.Popen(
+ ['./cli.sh', 'mariadb-stop'],
+ cwd=TESTS_ROOT.joinpath('docker/').resolve(),
+ stdout=OUTPUT,
+ stderr=OUTPUT
+ )
+ maria_sp.wait()
- query('create database if not exists test')
- test_tables = query('show tables from test')
- test_tables = [x[0] for x in test_tables]
- if TEST_DATA_TABLE not in test_tables:
- query(f'DROP TABLE IF EXISTS data.{TEST_DATA_TABLE}_{mode}')
- query(f'''
- CREATE TABLE data.{TEST_DATA_TABLE}_{mode} (
- CreditScore int,
- Geography varchar(300),
- Gender varchar(300),
- Age int,
- Tenure int,
- Balance float,
- NumOfProducts int,
- HasCrCard int,
- IsActiveMember int,
- EstimatedSalary float,
- Exited int
- )
- ''')
- with open(test_csv) as f:
- csvf = csv.reader(f)
- i = 0
- for row in csvf:
- if i > 0:
- CreditScore = int(row[0])
- Geography = str(row[1])
- Gender = str(row[2])
- Age = int(row[3])
- Tenure = int(row[4])
- Balance = float(row[5])
- NumOfProducts = int(row[6])
- HasCrCard = int(row[7])
- IsActiveMember = int(row[8])
- EstimatedSalary = float(row[9])
- Exited = int(row[10])
-
- query(f'''INSERT INTO data.{TEST_DATA_TABLE}_{mode} VALUES (
- {CreditScore},
- '{Geography}',
- '{Gender}',
- {Age},
- {Tenure},
- {Balance},
- {NumOfProducts},
- {HasCrCard},
- {IsActiveMember},
- {EstimatedSalary},
- {Exited}
- )''')
- i += 1
- os.system(f'rm {test_csv}')
class MariaDBTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
+ temp_config_path = prepare_config(config, 'default_mariadb')
+
+ if is_container_run('mariadb-test') is False:
+ subprocess.Popen(
+ ['./cli.sh', 'mariadb'],
+ cwd=TESTS_ROOT.joinpath('docker/').resolve(),
+ stdout=OUTPUT,
+ stderr=OUTPUT
+ )
+ atexit.register(stop_mariadb)
+ maria_ready = wait_db(config, 'default_mariadb')
+
+ if maria_ready:
+ sp = subprocess.Popen(
+ ['python3', '-m', 'mindsdb', '--api', 'mysql', '--config', temp_config_path],
+ stdout=OUTPUT,
+ stderr=OUTPUT
+ )
+ atexit.register(sp.kill)
+
+ api_ready = maria_ready and wait_api_ready(config)
+
+ if maria_ready is False or api_ready is False:
+ print(f'Failed by timeout. MariaDB started={maria_ready}, MindsDB started={api_ready}')
+ raise Exception()
+
cls.mdb = MindsdbNative(config)
models = cls.mdb.get_models()
@@ -146,11 +116,12 @@ def setUpClass(cls):
)
''')
- test_csv_path = str(pathlib.Path(__file__).parent.absolute().joinpath('../temp/', TEST_CSV['name']).resolve())
+ test_csv_path = str(TESTS_ROOT.joinpath('temp/', TEST_CSV['name']).resolve())
if os.path.isfile(test_csv_path) is False:
r = requests.get(TEST_CSV['url'])
with open(test_csv_path, 'wb') as f:
f.write(r.content)
+
with open(test_csv_path) as f:
csvf = csv.reader(f)
i = 0
@@ -158,7 +129,7 @@ def setUpClass(cls):
if i > 0:
number_of_rooms = int(row[0])
number_of_bathrooms = int(row[1])
- sqft = int(float(row[2].replace(',','.')))
+ sqft = int(float(row[2].replace(',', '.')))
location = str(row[3])
days_on_market = int(row[4])
initial_price = int(row[5])
@@ -185,27 +156,26 @@ def test_1_initial_state(self):
models = [x['name'] for x in self.mdb.get_models()]
self.assertTrue(TEST_PREDICTOR_NAME not in models)
- print(f'Test datasource exists')
+ print('Test datasource exists')
test_tables = query('show tables from test')
test_tables = [x[0] for x in test_tables]
self.assertTrue(TEST_DATA_TABLE in test_tables)
- print(f'Test predictor table not exists')
+ print('Test predictor table not exists')
mindsdb_tables = query('show tables from mindsdb')
mindsdb_tables = [x[0] for x in mindsdb_tables]
self.assertTrue(TEST_PREDICTOR_NAME not in mindsdb_tables)
- print(f'mindsdb.predictors table exists')
+ print('mindsdb.predictors table exists')
self.assertTrue('predictors' in mindsdb_tables)
- print(f'mindsdb.commands table exists')
+ print('mindsdb.commands table exists')
self.assertTrue('commands' in mindsdb_tables)
-
def test_2_insert_predictor(self):
print(f'\nExecuting {inspect.stack()[0].function}')
query(f"""
- insert into mindsdb.predictors (name, predict_cols, select_data_query, training_options) values
+ insert into mindsdb.predictors (name, predict, select_data_query, training_options) values
(
'{TEST_PREDICTOR_NAME}',
'rental_price, location',
@@ -214,17 +184,44 @@ def test_2_insert_predictor(self):
);
""")
- print(f'predictor record in mindsdb.predictors')
+ print('predictor record in mindsdb.predictors')
res = query(f"select status from mindsdb.predictors where name = '{TEST_PREDICTOR_NAME}'", as_dict=True)
self.assertTrue(len(res) == 1)
self.assertTrue(res[0]['status'] == 'complete')
- print(f'predictor table in mindsdb db')
+ print('predictor table in mindsdb db')
mindsdb_tables = query('show tables from mindsdb')
mindsdb_tables = [x[0] for x in mindsdb_tables]
self.assertTrue(TEST_PREDICTOR_NAME in mindsdb_tables)
- def test_3_query_predictor(self):
+ def test_3_insert_predictor_with_existing_ds(self):
+ name = f'{TEST_PREDICTOR_NAME}_2'
+ models = self.mdb.get_models()
+ models = [x['name'] for x in models]
+ if name in models:
+ self.mdb.delete_model(name)
+
+ query(f"""
+ insert into mindsdb.predictors (name, predict, external_datasource, training_options) values
+ (
+ '{name}',
+ 'rental_price, location',
+ '{TEST_PREDICTOR_NAME}',
+ '{{"join_learn_process": true, "stop_training_in_x_seconds": 3}}'
+ );
+ """)
+
+ print('predictor record in mindsdb.predictors')
+ res = query(f"select status from mindsdb.predictors where name = '{name}'", as_dict=True)
+ self.assertTrue(len(res) == 1)
+ self.assertTrue(res[0]['status'] == 'complete')
+
+ print('predictor table in mindsdb db')
+ mindsdb_tables = query('show tables from mindsdb')
+ mindsdb_tables = [x[0] for x in mindsdb_tables]
+ self.assertTrue(name in mindsdb_tables)
+
+ def test_4_query_predictor(self):
print(f'\nExecuting {inspect.stack()[0].function}')
res = query(f"""
select
@@ -248,7 +245,7 @@ def test_3_query_predictor(self):
self.assertIsInstance(res['rental_price_explain'], str)
self.assertTrue(res['number_of_rooms'] == 'None' or res['number_of_rooms'] is None)
- def test_4_range_query(self):
+ def test_5_range_query(self):
print(f'\nExecuting {inspect.stack()[0].function}')
results = query(f"""
@@ -269,7 +266,7 @@ def test_4_range_query(self):
self.assertIsInstance(res['rental_price_max'], float)
self.assertIsInstance(res['rental_price_explain'], str)
- def test_5_delete_predictor_by_command(self):
+ def test_6_delete_predictor_by_command(self):
print(f'\nExecuting {inspect.stack()[0].function}')
query(f"""
@@ -280,16 +277,16 @@ def test_5_delete_predictor_by_command(self):
models = [x['name'] for x in self.mdb.get_models()]
self.assertTrue(TEST_PREDICTOR_NAME not in models)
- print(f'Test predictor table not exists')
+ print('Test predictor table not exists')
mindsdb_tables = query('show tables from mindsdb')
mindsdb_tables = [x[0] for x in mindsdb_tables]
self.assertTrue(TEST_PREDICTOR_NAME not in mindsdb_tables)
- def test_6_insert_predictor_again(self):
+ def test_7_insert_predictor_again(self):
print(f'\nExecuting {inspect.stack()[0].function}')
self.test_2_insert_predictor()
- def test_7_delete_predictor_by_delete_statement(self):
+ def test_8_delete_predictor_by_delete_statement(self):
print(f'\nExecuting {inspect.stack()[0].function}')
query(f"""
delete from mindsdb.predictors where name='{TEST_PREDICTOR_NAME}';
@@ -299,63 +296,15 @@ def test_7_delete_predictor_by_delete_statement(self):
models = [x['name'] for x in self.mdb.get_models()]
self.assertTrue(TEST_PREDICTOR_NAME not in models)
- print(f'Test predictor table not exists')
+ print('Test predictor table not exists')
mindsdb_tables = query('show tables from mindsdb')
mindsdb_tables = [x[0] for x in mindsdb_tables]
self.assertTrue(TEST_PREDICTOR_NAME not in mindsdb_tables)
-def wait_mysql(timeout):
- global config
- m = DatabaseWrapper(config)
-
- start_time = time.time()
-
- connected = m.check_connections()['default_mariadb']
- while not connected and (time.time() - start_time) < timeout:
- time.sleep(2)
- connected = m.check_connections()['default_mariadb']
-
- return connected
-
-def stop_mariadb():
- maria_sp = subprocess.Popen(
- ['./cli.sh', 'mariadb-stop'],
- cwd=pathlib.Path(__file__).parent.absolute().joinpath('../docker/').resolve(),
- stdout=subprocess.DEVNULL,
- stderr=subprocess.DEVNULL
- )
- maria_sp.wait()
-
if __name__ == "__main__":
- temp_config_path = prepare_config(config)
-
- maria_sp = subprocess.Popen(
- ['./cli.sh', 'mariadb'],
- cwd=pathlib.Path(__file__).parent.absolute().joinpath('../docker/').resolve(),
- stdout=subprocess.DEVNULL,
- stderr=subprocess.DEVNULL
- )
- atexit.register(stop_mariadb)
- maria_ready = wait_mysql(START_TIMEOUT)
-
- if maria_ready:
- sp = subprocess.Popen(
- ['python3', '-m', 'mindsdb', '--api', 'mysql', '--config', temp_config_path],
- stdout=subprocess.DEVNULL,
- stderr=subprocess.DEVNULL
- )
- atexit.register(sp.kill)
- port_num = config['api']['mysql']['port']
- api_ready = maria_ready and wait_port(port_num, START_TIMEOUT)
-
try:
- if maria_ready is False or api_ready is False:
- print(f'Failed by timeout. MariaDB started={maria_ready}, MindsDB started={api_ready}')
- raise Exception()
unittest.main(failfast=True)
- print('Tests passed !')
+ print('Tests passed!')
except Exception as e:
- print('Tests Failed !')
- print(e)
- print('done')
+ print(f'Tests Failed!\n{e}')
| Capture information about the host database on receiving queries
The Mysql mime proxy should capture information about the sender databases when it receives a query (e.g. via the connect engine)... at least if at all possible. Things like:
What's the IP from which the query was sent ?
What's the port the database is running on (unlikely we can get this) ?
What's the type of the database (again, maybe not, since @StpMax you were mentioning `clickhouse` send `mariadb` as the name *and* `mariadb` sends `mysql`...) ?
... basically, any identifying information we should try to capture.
Alternatively, since this *might* be hard or impossible, whenever we create a `predictors` and `commands` table in the databases, we should add some sort of magic value that is a hash which informs us of the database's identity.
@StpMax let me know what are your thoughts on this ?
Obviously easy to do when we only have 2 integrations (clickhouse and mysql) but I'm thinking in the scenario where we could integrate with 3 or 4 clickhouse databases but want to know e.g. which one of them to query data from when a learn/predict query comes through.
| We have three sources to get info about client:
1. socket: here we can easy get ip and port. But that information not very usable (at least port, it will new on each connection).
2. mysql-protocol. In connection field client sends connection attributes, and one of them is 'client'. But it unusable, because clickhouse sends 'libmariadb', mariadb sends 'libmysql'. So it just library used by client, it can by anything. And is no way to ask about something from server to client.
3. use some dirty hack, as we do now with table names. When we create table in mariadb with name 'A', it creates remote connection to mindsdb with table name = 'A_mariadb', so we know what query comes from mariadb. Same way we can add some 'identificator' to table name. ~~But what to use as database-id?~~ As id we can use hash of this database config string
@StpMax In that case 3 it is, we add a hash of the database's config as part of... ???
Would we have to add it as part of every table ? Seems so :(
In which case I'd add a new column to each table `id_{config_hash}`.
Kinda of ugly for the user to see it but no way around that.
Or do you see a better way ? If so, do tell, I find this ugly but acceptable.
i mean add hash as part of remote table name. For example, we create predictor xxx and register it in mariadb, then we do:
1. get some hash from connection to mariadb:
```
x = config['integrations']['default_mariadb']
hash = sha1(x['host'] + x['port'] + x['user'])[:6]
```
2. on mindsdb start make remote table in mariadb:
`CREATE TABLE mindsdb.xxx (...) ENGINE=CONNECT TABLE_TYPE=MYSQL CONNECTION=mysql://{user}:{password}@{host}:{port}/mindsdb/xxx_mariadb_{hash}`
3. when user make query as
`select * from mindsdb.xxx;`
we get on mindsdb side such query:
`select * from mindsdb.xxx_mariadb_zzzzzz;`
so we can split table name on real name, source db and db connection hash.
In this case is no need to add some magic column to each table. All will be hidden from user.
cons of this way:
1. we decrease table (predictor) name limit. I find what mysql have only 64 chars limit for name, so hash should be short.
2. we can get hash only from queries to mindsb-related tables. Other db queries (ping, show databases, set variables etc) cant be determined on this way
or we can use similar durty hack :)
Put all info to username. So table create string will be like:
`CREATE TABLE mindsdb.xxx (...) ENGINE=CONNECT TABLE_TYPE=MYSQL CONNECTION=mysql://{user}_mariadb_{HASH}:{password}@{host}:{port}/mindsdb/xxx`
pros:
- table name length will be as native db length
- we will know what DB try connect on handshake step.
cons:
- DB can have limit on username length | 2020-07-16T07:18:34Z | [] | [] |
mindsdb/mindsdb | 601 | mindsdb__mindsdb-601 | [
"527"
] | 79d9f6a93d124be271bf430a4c79c22adcaa970b | diff --git a/mindsdb/api/mysql/mysql_proxy/classes/sql_query.py b/mindsdb/api/mysql/mysql_proxy/classes/sql_query.py
--- a/mindsdb/api/mysql/mysql_proxy/classes/sql_query.py
+++ b/mindsdb/api/mysql/mysql_proxy/classes/sql_query.py
@@ -59,12 +59,10 @@ def parse_insert(sql):
return dict(zip(columns, values))
- def __init__(self, sql, session=None):
+ def __init__(self, sql, integration=None, database=None):
# parse
- self.session = session
- self.integration = None
- if session is not None:
- self.integration = session.integration
+ self.integration = integration
+ self.database = database
# 'offset x, y' - specific just for mysql, parser dont understand it
sql = re.sub(r'\n?limit([\n\d\s]*),([\n\d\s]*)', ' limit \g<1> offset \g<1> ', sql)
@@ -121,9 +119,7 @@ def _format_from_statement(self, s):
{'value': 'xxx.zzz_mariadb', 'name': 'a'}
-> {'value': 'xxx.zzz', 'name': 'a', source: 'mariadb'}
"""
- database = None
- if self.session is not None:
- database = self.session.database
+ database = self.database
if isinstance(s, str):
if '.' in s:
s = {
@@ -423,7 +419,7 @@ def _fetchData(self):
result = []
if 'select ' not in external_datasource.lower():
external_datasource = f'select * from {external_datasource}'
- query = SQLQuery(external_datasource, default_dn='datasource')
+ query = SQLQuery(external_datasource, database='datasource', integration=self.integration)
result = query.fetch(self.datahub, view='dict')
if result['success'] is False:
raise Exception(result['msg'])
diff --git a/mindsdb/api/mysql/mysql_proxy/controllers/session_controller.py b/mindsdb/api/mysql/mysql_proxy/controllers/session_controller.py
--- a/mindsdb/api/mysql/mysql_proxy/controllers/session_controller.py
+++ b/mindsdb/api/mysql/mysql_proxy/controllers/session_controller.py
@@ -27,6 +27,9 @@ def __init__(self) -> object:
self.auth = False
self.logging = log
+ self.integration = None
+ self.database = None
+
self.current_transaction = None
def newTransaction(self, sql_query):
diff --git a/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py b/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py
--- a/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py
+++ b/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py
@@ -362,7 +362,7 @@ def delete_predictor_answer(self, sql):
fake_sql = sql.strip(' ')
fake_sql = 'select name ' + fake_sql[len('delete '):]
- query = SQLQuery(fake_sql, session=self.session)
+ query = SQLQuery(fake_sql, integration=self.session.integration, database=self.session.database)
result = query.fetch(datahub)
@@ -499,7 +499,7 @@ def queryAnswer(self, sql):
if 'database()' in sql_lower:
self.answerSelectDatabase()
return
- query = SQLQuery(sql, session=self.session)
+ query = SQLQuery(sql, integration=self.session.integration, database=self.session.database)
return self.selectAnswer(query)
elif keyword == 'rollback':
self.packet(OkPacket).send()
| diff --git a/tests/docker/mariadb/connect.cnf b/tests/docker/mariadb/connect.cnf
--- a/tests/docker/mariadb/connect.cnf
+++ b/tests/docker/mariadb/connect.cnf
@@ -2,3 +2,6 @@
plugin-load-add=ha_connect.so
connect_jvm_path="/usr/lib/jvm/java-1.8.0-openjdk-amd64/jre/lib/amd64/server"
connect_class_path="/usr/lib/mysql/plugin:/usr/lib/jvm/java-1.8.0-openjdk-amd64/jre/lib/ext/dremio-jdbc-driver.jar"
+wait_timeout=180
+connect_timeout=180
+interactive_timeout=180
\ No newline at end of file
diff --git a/tests/integration_tests/flows/common.py b/tests/integration_tests/flows/common.py
--- a/tests/integration_tests/flows/common.py
+++ b/tests/integration_tests/flows/common.py
@@ -1,22 +1,23 @@
import psutil
import shutil
import time
-import pathlib
+from pathlib import Path
import json
import docker
+import requests
import subprocess
from mindsdb.interfaces.database.database import DatabaseWrapper
TEST_CONFIG = 'tests/integration_tests/flows/config/config.json'
-TESTS_ROOT = pathlib.Path(__file__).parent.absolute().joinpath('../../').resolve()
+TESTS_ROOT = Path(__file__).parent.absolute().joinpath('../../').resolve()
START_TIMEOUT = 15
OUTPUT = None # [None|subprocess.DEVNULL]
-TEMP_DIR = pathlib.Path(__file__).parent.absolute().joinpath('../../temp/').resolve()
+TEMP_DIR = Path(__file__).parent.absolute().joinpath('../../temp/').resolve()
TEMP_DIR.mkdir(parents=True, exist_ok=True)
@@ -87,3 +88,22 @@ def is_container_run(name):
containers = docker_client.containers.list()
containers = [x.name for x in containers if x.status == 'running']
return name in containers
+
+
+def get_test_csv(name, url, lines_count=None, rewrite=False):
+ test_csv_path = TESTS_ROOT.joinpath('temp/', name).resolve()
+ if not test_csv_path.is_file() or rewrite:
+ r = requests.get(url)
+ with open(test_csv_path, 'wb') as f:
+ f.write(r.content)
+ if lines_count is not None:
+ fp = str(test_csv_path)
+ p = subprocess.Popen(
+ f"mv {fp} {fp}_2; sed -n '1,{lines_count}p' {fp}_2 >> {fp}; rm {fp}_2",
+ cwd=TESTS_ROOT.resolve(),
+ stdout=OUTPUT,
+ stderr=OUTPUT,
+ shell=True
+ )
+ p.wait()
+ return str(test_csv_path)
diff --git a/tests/integration_tests/flows/test_clickhouse.py b/tests/integration_tests/flows/test_clickhouse.py
--- a/tests/integration_tests/flows/test_clickhouse.py
+++ b/tests/integration_tests/flows/test_clickhouse.py
@@ -1,15 +1,16 @@
import unittest
import requests
-import os
import csv
import inspect
import subprocess
import atexit
from mindsdb.interfaces.native.mindsdb import MindsdbNative
+from mindsdb.interfaces.datastore.datastore import DataStore
from mindsdb.utilities.config import Config
from common import (
+ get_test_csv,
wait_api_ready,
prepare_config,
wait_db,
@@ -26,6 +27,8 @@
TEST_DATA_TABLE = 'home_rentals'
TEST_PREDICTOR_NAME = 'test_predictor'
+EXTERNAL_DS_NAME = 'test_external'
+
config = Config(TEST_CONFIG)
@@ -105,6 +108,7 @@ def setUpClass(cls):
raise Exception()
cls.mdb = MindsdbNative(config)
+ datastore = DataStore(config)
models = cls.mdb.get_models()
models = [x['name'] for x in models]
@@ -114,6 +118,9 @@ def setUpClass(cls):
query('create database if not exists test')
test_tables = query('show tables from test')
test_tables = [x['name'] for x in test_tables]
+
+ test_csv_path = get_test_csv(TEST_CSV['name'], TEST_CSV['url'])
+
if TEST_DATA_TABLE not in test_tables:
print('creating test data table...')
query(f'''
@@ -132,12 +139,6 @@ def setUpClass(cls):
PARTITION BY location
''')
- test_csv_path = str(TESTS_ROOT.joinpath('temp/', TEST_CSV['name']).resolve())
- if os.path.isfile(test_csv_path) is False:
- r = requests.get(TEST_CSV['url'])
- with open(test_csv_path, 'wb') as f:
- f.write(r.content)
-
with open(test_csv_path) as f:
csvf = csv.reader(f)
i = 0
@@ -165,6 +166,12 @@ def setUpClass(cls):
i += 1
print('done')
+ ds = datastore.get_datasource(EXTERNAL_DS_NAME)
+ if ds is not None:
+ datastore.delete_datasource(EXTERNAL_DS_NAME)
+ short_csv_file_path = get_test_csv(f'{EXTERNAL_DS_NAME}.csv', TEST_CSV['url'], lines_count=300, rewrite=True)
+ datastore.save_datasource(EXTERNAL_DS_NAME, 'file', 'test.csv', short_csv_file_path)
+
def test_1_initial_state(self):
print(f'\nExecuting {inspect.stack()[0].function}')
print('Check all testing objects not exists')
@@ -211,8 +218,8 @@ def test_2_insert_predictor(self):
mindsdb_tables = [x['name'] for x in mindsdb_tables]
self.assertTrue(TEST_PREDICTOR_NAME in mindsdb_tables)
- def test_3_insert_predictor_with_existing_ds(self):
- name = f'{TEST_PREDICTOR_NAME}_2'
+ def test_3_externael_ds(self):
+ name = f'{TEST_PREDICTOR_NAME}_external'
models = self.mdb.get_models()
models = [x['name'] for x in models]
if name in models:
@@ -223,7 +230,7 @@ def test_3_insert_predictor_with_existing_ds(self):
(
'{name}',
'rental_price, location',
- '{TEST_PREDICTOR_NAME}',
+ '{EXTERNAL_DS_NAME}',
'{{"join_learn_process": true, "stop_training_in_x_seconds": 3}}'
);
""")
@@ -238,6 +245,19 @@ def test_3_insert_predictor_with_existing_ds(self):
mindsdb_tables = [x['name'] for x in mindsdb_tables]
self.assertTrue(name in mindsdb_tables)
+ res = query(f"""
+ select
+ rental_price, location, sqft, number_of_rooms,
+ rental_price_confidence, rental_price_min, rental_price_max, rental_price_explain
+ from
+ mindsdb.{name} where external_datasource='{EXTERNAL_DS_NAME}'
+ """)
+
+ print('check result')
+ self.assertTrue(len(res) > 0)
+ self.assertTrue(res[0]['rental_price'] is not None and res[0]['rental_price'] != 'None')
+ self.assertTrue(res[0]['location'] is not None and res[0]['location'] != 'None')
+
def test_4_query_predictor(self):
print(f'\nExecuting {inspect.stack()[0].function}')
res = query(f"""
diff --git a/tests/integration_tests/flows/test_mariadb.py b/tests/integration_tests/flows/test_mariadb.py
--- a/tests/integration_tests/flows/test_mariadb.py
+++ b/tests/integration_tests/flows/test_mariadb.py
@@ -1,6 +1,4 @@
import unittest
-import requests
-import os
import csv
import inspect
import subprocess
@@ -9,9 +7,11 @@
import mysql.connector
from mindsdb.interfaces.native.mindsdb import MindsdbNative
+from mindsdb.interfaces.datastore.datastore import DataStore
from mindsdb.utilities.config import Config
from common import (
+ get_test_csv,
wait_api_ready,
prepare_config,
wait_db,
@@ -28,6 +28,8 @@
TEST_DATA_TABLE = 'home_rentals'
TEST_PREDICTOR_NAME = 'test_predictor'
+EXTERNAL_DS_NAME = 'test_external'
+
config = Config(TEST_CONFIG)
@@ -92,12 +94,15 @@ def setUpClass(cls):
raise Exception()
cls.mdb = MindsdbNative(config)
+ datastore = DataStore(config)
models = cls.mdb.get_models()
models = [x['name'] for x in models]
if TEST_PREDICTOR_NAME in models:
cls.mdb.delete_model(TEST_PREDICTOR_NAME)
+ test_csv_path = get_test_csv(TEST_CSV['name'], TEST_CSV['url'])
+
query('create database if not exists test')
test_tables = query('show tables from test')
test_tables = [x[0] for x in test_tables]
@@ -116,12 +121,6 @@ def setUpClass(cls):
)
''')
- test_csv_path = str(TESTS_ROOT.joinpath('temp/', TEST_CSV['name']).resolve())
- if os.path.isfile(test_csv_path) is False:
- r = requests.get(TEST_CSV['url'])
- with open(test_csv_path, 'wb') as f:
- f.write(r.content)
-
with open(test_csv_path) as f:
csvf = csv.reader(f)
i = 0
@@ -148,6 +147,12 @@ def setUpClass(cls):
i += 1
print('done')
+ ds = datastore.get_datasource(EXTERNAL_DS_NAME)
+ if ds is not None:
+ datastore.delete_datasource(EXTERNAL_DS_NAME)
+ short_csv_file_path = get_test_csv(f'{EXTERNAL_DS_NAME}.csv', TEST_CSV['url'], lines_count=300, rewrite=True)
+ datastore.save_datasource(EXTERNAL_DS_NAME, 'file', 'test.csv', short_csv_file_path)
+
def test_1_initial_state(self):
print(f'\nExecuting {inspect.stack()[0].function}')
print('Check all testing objects not exists')
@@ -194,8 +199,8 @@ def test_2_insert_predictor(self):
mindsdb_tables = [x[0] for x in mindsdb_tables]
self.assertTrue(TEST_PREDICTOR_NAME in mindsdb_tables)
- def test_3_insert_predictor_with_existing_ds(self):
- name = f'{TEST_PREDICTOR_NAME}_2'
+ def test_3_externael_ds(self):
+ name = f'{TEST_PREDICTOR_NAME}_external'
models = self.mdb.get_models()
models = [x['name'] for x in models]
if name in models:
@@ -206,7 +211,7 @@ def test_3_insert_predictor_with_existing_ds(self):
(
'{name}',
'rental_price, location',
- '{TEST_PREDICTOR_NAME}',
+ '{EXTERNAL_DS_NAME}',
'{{"join_learn_process": true, "stop_training_in_x_seconds": 3}}'
);
""")
@@ -221,6 +226,19 @@ def test_3_insert_predictor_with_existing_ds(self):
mindsdb_tables = [x[0] for x in mindsdb_tables]
self.assertTrue(name in mindsdb_tables)
+ res = query(f"""
+ select
+ rental_price, location, sqft, number_of_rooms,
+ rental_price_confidence, rental_price_min, rental_price_max, rental_price_explain
+ from
+ mindsdb.{name} where external_datasource='{EXTERNAL_DS_NAME}'
+ """, as_dict=True)
+
+ print('check result')
+ self.assertTrue(len(res) > 0)
+ self.assertTrue(res[0]['rental_price'] is not None and res[0]['rental_price'] != 'None')
+ self.assertTrue(res[0]['location'] is not None and res[0]['location'] != 'None')
+
def test_4_query_predictor(self):
print(f'\nExecuting {inspect.stack()[0].function}')
res = query(f"""
| Complete clickhouse & mariadb integration testing and make sure it's running as part of the CI
The integration test for clickhouse & mariadb should include:
1. (setup the env) Use docker compose to boot up the databases + Upload some data + start mindsdb
2. Test that all mindsdb tables are created (commands, predictors, the mindsdb db)
3. Train a predictor from an internal datasource (using some kwargs to validate they work)
4. Train a predictor from an external datasource (say, a file)
5. With both predictors from steps 3 and 4:
* Predict from a `WHEN` statement
* Predict from a query internal to the databases
* Predict from an external datasource
For point 5. please specify some kwargs to the predictions to, again, validate that those also work.
| 2020-07-20T08:40:50Z | [] | [] |
|
mindsdb/mindsdb | 604 | mindsdb__mindsdb-604 | [
"603"
] | dc6fc18b138e232a3f12bf267968c1366a77c804 | diff --git a/mindsdb/__main__.py b/mindsdb/__main__.py
--- a/mindsdb/__main__.py
+++ b/mindsdb/__main__.py
@@ -5,7 +5,8 @@
import os
import torch.multiprocessing as mp
-from torch.multiprocessing import Process
+
+from mindsdb_native.config import CONFIG
from mindsdb.utilities.config import Config
from mindsdb.interfaces.native.mindsdb import MindsdbNative
@@ -14,6 +15,7 @@
from mindsdb.utilities.fs import get_or_create_dir_struct
from mindsdb.interfaces.database.database import DatabaseWrapper
+
def close_api_gracefully(p_arr):
for p in p_arr:
sys.stdout.flush()
@@ -21,6 +23,7 @@ def close_api_gracefully(p_arr):
p.join()
sys.stdout.flush()
+
if __name__ == '__main__':
mp.freeze_support()
@@ -33,11 +36,13 @@ def close_api_gracefully(p_arr):
config_path = args.config
if config_path is None:
config_dir, _, _ = get_or_create_dir_struct()
- config_path = os.path.join(config_dir,'config.json')
+ config_path = os.path.join(config_dir, 'config.json')
print(f'Using configuration file: {config_path}')
config = Config(config_path)
+ CONFIG.MINDSDB_STORAGE_PATH = config['interface']['mindsdb_native']['storage_dir']
+
if args.api is None:
api_arr = [api for api in config['api']]
else:
@@ -68,7 +73,7 @@ def close_api_gracefully(p_arr):
for api in api_arr:
print(f'Starting Mindsdb {api} API !')
try:
- p = ctx.Process(target=start_functions[api], args=(config_path,True,))
+ p = ctx.Process(target=start_functions[api], args=(config_path, True,))
p.start()
p_arr.append(p)
print(f'Started Mindsdb {api} API !')
diff --git a/mindsdb/utilities/config.py b/mindsdb/utilities/config.py
--- a/mindsdb/utilities/config.py
+++ b/mindsdb/utilities/config.py
@@ -2,7 +2,9 @@
import json
import hashlib
+
class Config(object):
+ current_version = '1.1'
_config = {}
def __init__(self, config_path):
@@ -16,13 +18,58 @@ def __init__(self, config_path):
else:
raise TypeError('Argument must be string representing a file path <Later on to be switched to file path and/or database connection info>')
+ def _migrate(self):
+ def m1_0(config):
+ if 'default_clickhouse' in config['integrations'] and 'type' not in config['integrations']['default_clickhouse']:
+ config['integrations']['default_clickhouse']['type'] = 'clickhouse'
+ if 'default_mariadb' in config['integrations'] and 'type' not in config['integrations']['default_mariadb']:
+ config['integrations']['default_mariadb']['type'] = 'mariadb'
+ if 'datasources' in config['api']['mysql']:
+ del config['api']['mysql']['datasources']
+ config['config_version'] = '1.1'
+ return config
+
+ migrations = {
+ '1.0': m1_0
+ }
+
+ current_version = self._parse_version(self._config['config_version'])
+ target_version = self._parse_version(self.current_version)
+ while current_version < target_version:
+ str_version = '.'.join([str(x) for x in current_version])
+ self._config = migrations[str_version](self._config)
+ current_version = self._parse_version(self._config['config_version'])
+
+ def _validate(self):
+ integrations = self._config.get('integrations', {})
+ for key in integrations:
+ if 'type' not in integrations[key]:
+ raise KeyError(f"Config error: for integration '{key}' key 'type' must be specified")
+
+ def _parse_version(self, version):
+ if isinstance(version, str):
+ version = [int(x) for x in version.split('.')]
+ elif isinstance(version, int):
+ version = [version]
+ if len(version) == 1:
+ version.append(0)
+ return version
+
def _read(self):
if isinstance(self.config_path, str) and os.path.isfile(self.config_path):
with open(self.config_path, 'r') as fp:
self._config = json.load(fp)
+ if self._parse_version(self._config['config_version']) < self._parse_version(self.current_version):
+ self._migrate()
+ self._save()
+ self._validate()
else:
raise TypeError('`self.config_path` must be a string representing a local file path to a json config')
+ def _save(self):
+ with open(self.config_path, 'w') as fp:
+ json.dump(self._config, fp, indent=4, sort_keys=True)
+
def _gen_hash(self):
with open(self.config_path, 'rb') as fp:
return hashlib.md5(fp.read()).hexdigest()
@@ -52,9 +99,9 @@ def set(self, key_chain, value, delete=False):
c = self._config
for i, k in enumerate(key_chain):
- if k in c and i+1 < len(key_chain):
+ if k in c and i + 1 < len(key_chain):
c = c[k]
- elif k not in c and i+1 < len(key_chain):
+ elif k not in c and i + 1 < len(key_chain):
c[k] = {}
c = c[k]
else:
@@ -62,9 +109,7 @@ def set(self, key_chain, value, delete=False):
del c[k]
else:
c[k] = value
-
- with open(self.config_path, 'w') as fp:
- json.dump(self._config, fp, indent=4, sort_keys=True)
+ self._save()
# Higher level interface
def add_db_integration(self, name, dict):
diff --git a/mindsdb/utilities/wizards.py b/mindsdb/utilities/wizards.py
--- a/mindsdb/utilities/wizards.py
+++ b/mindsdb/utilities/wizards.py
@@ -21,7 +21,7 @@ def _in(ask, default, use_default):
def auto_config(python_path,pip_path,predictor_dir,datasource_dir):
config = {
"debug": False
- ,"config_version": 1
+ ,"config_version": "1.1"
,"python_interpreter": python_path
,"pip_path": pip_path
,"api": {
@@ -124,7 +124,6 @@ def cli_config(python_path,pip_path,predictor_dir,datasource_dir,config_dir,use_
"file_level": "INFO",
"console_level": "INFO"
}
- ,"datasources": []
}
config['api']['mysql']['host'] = _in('MYSQL interface host','127.0.0.1',use_default)
config['api']['mysql']['port'] = _in('MYSQL interface port','47335',use_default)
| diff --git a/tests/integration_tests/api/test_http.py b/tests/integration_tests/api/test_http.py
--- a/tests/integration_tests/api/test_http.py
+++ b/tests/integration_tests/api/test_http.py
@@ -48,14 +48,14 @@ def test_1_config(self):
integration_names = res.json()
assert set(integration_names['integrations']) == set(['default_mariadb', 'default_clickhouse'])
- test_integration_data = {'enabled': False, 'host':'test'}
- res = requests.put(f'{root}/config/integrations/test_integration', json={'params':test_integration_data})
+ test_integration_data = {'enabled': False, 'host': 'test', 'type': 'clickhouse'}
+ res = requests.put(f'{root}/config/integrations/test_integration', json={'params': test_integration_data})
assert res.status_code == 200
res = requests.get(f'{root}/config/integrations/test_integration')
assert res.status_code == 200
test_integration = res.json()
- assert len(test_integration) == 2
+ assert len(test_integration) == 3
res = requests.delete(f'{root}/config/integrations/test_integration')
assert res.status_code == 200
diff --git a/tests/integration_tests/flows/common.py b/tests/integration_tests/flows/common.py
--- a/tests/integration_tests/flows/common.py
+++ b/tests/integration_tests/flows/common.py
@@ -78,7 +78,7 @@ def prepare_config(config, db):
temp_config_path = str(TEMP_DIR.joinpath('config.json').resolve())
with open(temp_config_path, 'wt') as f:
- f.write(json.dumps(config._config))
+ json.dump(config._config, f, indent=4, sort_keys=True)
return temp_config_path
diff --git a/tests/integration_tests/flows/config/config.json b/tests/integration_tests/flows/config/config.json
--- a/tests/integration_tests/flows/config/config.json
+++ b/tests/integration_tests/flows/config/config.json
@@ -6,7 +6,6 @@
},
"mysql": {
"certificate_path": "tests/integration_tests/flows/config/cert.pem",
- "datasources": [],
"host": "127.0.0.1",
"log": {
"console_level": "INFO",
@@ -20,7 +19,7 @@
"user": "mindsdb"
}
},
- "config_version": 1,
+ "config_version": "1.1",
"debug": true,
"integrations": {
"default_clickhouse": {
| Validity check and migrations for Config
**Is your feature request related to a problem? Please describe.**
After updating from 1.99.11 to 2.0.0 is not possible to run Mindsb without editing/removing existing config
**Describe the solution you'd like**
Check config version on start and apply some migrations.
| 2020-07-21T09:32:40Z | [] | [] |
|
mindsdb/mindsdb | 606 | mindsdb__mindsdb-606 | [
"593"
] | f66217f9d3d7e40263b9435701f44d5a0ca26f73 | diff --git a/mindsdb/__about__.py b/mindsdb/__about__.py
--- a/mindsdb/__about__.py
+++ b/mindsdb/__about__.py
@@ -1,6 +1,6 @@
__title__ = 'MindsDB'
__package_name__ = 'mindsdb'
-__version__ = '2.0.0'
+__version__ = '2.1.0'
__description__ = "MindsDB server, provides server capabilities to mindsdb native python library"
__email__ = "[email protected]"
__author__ = 'MindsDB Inc'
diff --git a/mindsdb/__main__.py b/mindsdb/__main__.py
--- a/mindsdb/__main__.py
+++ b/mindsdb/__main__.py
@@ -5,7 +5,8 @@
import os
import torch.multiprocessing as mp
-from torch.multiprocessing import Process
+
+from mindsdb_native.config import CONFIG
from mindsdb.utilities.config import Config
from mindsdb.interfaces.native.mindsdb import MindsdbNative
@@ -14,6 +15,7 @@
from mindsdb.utilities.fs import get_or_create_dir_struct
from mindsdb.interfaces.database.database import DatabaseWrapper
+
def close_api_gracefully(p_arr):
for p in p_arr:
sys.stdout.flush()
@@ -21,6 +23,7 @@ def close_api_gracefully(p_arr):
p.join()
sys.stdout.flush()
+
if __name__ == '__main__':
mp.freeze_support()
@@ -33,11 +36,13 @@ def close_api_gracefully(p_arr):
config_path = args.config
if config_path is None:
config_dir, _, _ = get_or_create_dir_struct()
- config_path = os.path.join(config_dir,'config.json')
+ config_path = os.path.join(config_dir, 'config.json')
print(f'Using configuration file: {config_path}')
config = Config(config_path)
+ CONFIG.MINDSDB_STORAGE_PATH = config['interface']['mindsdb_native']['storage_dir']
+
if args.api is None:
api_arr = [api for api in config['api']]
else:
@@ -68,7 +73,7 @@ def close_api_gracefully(p_arr):
for api in api_arr:
print(f'Starting Mindsdb {api} API !')
try:
- p = ctx.Process(target=start_functions[api], args=(config_path,True,))
+ p = ctx.Process(target=start_functions[api], args=(config_path, True,))
p.start()
p_arr.append(p)
print(f'Started Mindsdb {api} API !')
diff --git a/mindsdb/api/mysql/mysql_proxy/classes/sql_query.py b/mindsdb/api/mysql/mysql_proxy/classes/sql_query.py
--- a/mindsdb/api/mysql/mysql_proxy/classes/sql_query.py
+++ b/mindsdb/api/mysql/mysql_proxy/classes/sql_query.py
@@ -50,21 +50,28 @@ class SQLQuery():
@staticmethod
def parse_insert(sql):
- search = re.search(r'(\(.*\)).*(\(.*\))', sql)
- columns = search.groups()[0].split(',')
- columns = [x.strip('(` )') for x in columns]
- p = re.compile('\s*,\s*'.join(["('.*')"] * len(columns)))
- values = re.search(p, search.groups()[1])
- values = [x.strip("( ')") for x in values.groups()]
+ columns = sql[:sql.find(')')]
+ values = sql[len(columns):]
+
+ columns = columns[columns.find('(') + 1:]
+ values = values[values.find('(') + 1:values.rfind(')')]
+
+ p = parse(f'select ({columns})')['select']['value']
+ columns = p['literal'] if isinstance(p, dict) else p
+ if isinstance(columns, list) is False:
+ columns = [columns]
+
+ p = parse(f'select ({values})')['select']['value']
+ values = p['literal'] if isinstance(p, dict) else p
+ if isinstance(values, list) is False:
+ values = [values]
return dict(zip(columns, values))
- def __init__(self, sql, session=None):
+ def __init__(self, sql, integration=None, database=None):
# parse
- self.session = session
- self.integration = None
- if session is not None:
- self.integration = session.integration
+ self.integration = integration
+ self.database = database
# 'offset x, y' - specific just for mysql, parser dont understand it
sql = re.sub(r'\n?limit([\n\d\s]*),([\n\d\s]*)', ' limit \g<1> offset \g<1> ', sql)
@@ -121,9 +128,7 @@ def _format_from_statement(self, s):
{'value': 'xxx.zzz_mariadb', 'name': 'a'}
-> {'value': 'xxx.zzz', 'name': 'a', source: 'mariadb'}
"""
- database = None
- if self.session is not None:
- database = self.session.database
+ database = self.database
if isinstance(s, str):
if '.' in s:
s = {
@@ -170,7 +175,7 @@ def _parseQuery(self, sql):
fromStatements = self.struct.get('from')
if isinstance(fromStatements, list) is False:
fromStatements = [fromStatements]
-
+
self.struct['from'] = [self._format_from_statement(x) for x in fromStatements]
orderby = self.struct.get('orderby')
@@ -423,7 +428,7 @@ def _fetchData(self):
result = []
if 'select ' not in external_datasource.lower():
external_datasource = f'select * from {external_datasource}'
- query = SQLQuery(external_datasource, default_dn='datasource')
+ query = SQLQuery(external_datasource, database='datasource', integration=self.integration)
result = query.fetch(self.datahub, view='dict')
if result['success'] is False:
raise Exception(result['msg'])
diff --git a/mindsdb/api/mysql/mysql_proxy/controllers/session_controller.py b/mindsdb/api/mysql/mysql_proxy/controllers/session_controller.py
--- a/mindsdb/api/mysql/mysql_proxy/controllers/session_controller.py
+++ b/mindsdb/api/mysql/mysql_proxy/controllers/session_controller.py
@@ -27,6 +27,9 @@ def __init__(self) -> object:
self.auth = False
self.logging = log
+ self.integration = None
+ self.database = None
+
self.current_transaction = None
def newTransaction(self, sql_query):
diff --git a/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py b/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py
--- a/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py
+++ b/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py
@@ -10,6 +10,7 @@
"""
+import os
import random
import socketserver as SocketServer
import ssl
@@ -17,9 +18,12 @@
import traceback
import json
import atexit
+import tempfile
from moz_sql_parser import parse
+from mindsdb.utilities.wizards import make_ssl_cert
+
from mindsdb.api.mysql.mysql_proxy.data_types.mysql_packet import Packet
from mindsdb.api.mysql.mysql_proxy.controllers.session_controller import SessionController
from mindsdb.api.mysql.mysql_proxy.controllers.log import init_logger, log
@@ -362,7 +366,7 @@ def delete_predictor_answer(self, sql):
fake_sql = sql.strip(' ')
fake_sql = 'select name ' + fake_sql[len('delete '):]
- query = SQLQuery(fake_sql, session=self.session)
+ query = SQLQuery(fake_sql, integration=self.session.integration, database=self.session.database)
result = query.fetch(datahub)
@@ -499,7 +503,7 @@ def queryAnswer(self, sql):
if 'database()' in sql_lower:
self.answerSelectDatabase()
return
- query = SQLQuery(sql, session=self.session)
+ query = SQLQuery(sql, integration=self.session.integration, database=self.session.database)
return self.selectAnswer(query)
elif keyword == 'rollback':
self.packet(OkPacket).send()
@@ -903,6 +907,11 @@ def startProxy(_config):
HARDCODED_USER = config['api']['mysql']['user']
HARDCODED_PASSWORD = config['api']['mysql']['password']
CERT_PATH = config['api']['mysql'].get('certificate_path')
+ if CERT_PATH is None or CERT_PATH == '':
+ CERT_PATH = tempfile.mkstemp(prefix='mindsdb_cert_', text=True)[1]
+ make_ssl_cert(CERT_PATH)
+ atexit.register(lambda: os.remove(CERT_PATH))
+
default_store = DataStore(config)
mdb = MindsdbNative(config)
datahub = init_datahub(config)
diff --git a/mindsdb/integrations/clickhouse/clickhouse.py b/mindsdb/integrations/clickhouse/clickhouse.py
--- a/mindsdb/integrations/clickhouse/clickhouse.py
+++ b/mindsdb/integrations/clickhouse/clickhouse.py
@@ -63,6 +63,9 @@ def _query(self, query):
def _get_mysql_user(self):
return f"{self.config['api']['mysql']['user']}_{self.name}"
+ def _escape_table_name(self, name):
+ return '`' + name.replace('`', '\\`') + '`'
+
def setup(self):
self._query('DROP DATABASE IF EXISTS mindsdb')
@@ -94,7 +97,7 @@ def setup(self):
def register_predictors(self, model_data_arr):
for model_meta in model_data_arr:
- name = model_meta['name']
+ name = self._escape_table_name(model_meta['name'])
stats = model_meta['data_analysis']
if 'columns_to_ignore' in stats:
del stats['columns_to_ignore']
@@ -121,7 +124,7 @@ def register_predictors(self, model_data_arr):
def unregister_predictor(self, name):
q = f"""
- drop table if exists mindsdb.{name};
+ drop table if exists mindsdb.{self._escape_table_name(name)};
"""
self._query(q)
diff --git a/mindsdb/integrations/mariadb/mariadb.py b/mindsdb/integrations/mariadb/mariadb.py
--- a/mindsdb/integrations/mariadb/mariadb.py
+++ b/mindsdb/integrations/mariadb/mariadb.py
@@ -38,6 +38,9 @@ def _to_mariadb_table(self, stats, predicted_cols):
return column_declaration
+ def _escape_table_name(self, name):
+ return '`' + name.replace('`', '``') + '`'
+
def _query(self, query):
con = mysql.connector.connect(host=self.config['integrations'][self.name]['host'], port=self.config['integrations'][self.name]['port'], user=self.config['integrations'][self.name]['user'], password=self.config['integrations'][self.name]['password'])
@@ -112,7 +115,7 @@ def register_predictors(self, model_data_arr):
connect = self._get_connect_string(name)
q = f"""
- CREATE TABLE mindsdb.{name}
+ CREATE TABLE mindsdb.{self._escape_table_name(name)}
({columns_sql}
) ENGINE=CONNECT TABLE_TYPE=MYSQL CONNECTION='{connect}';
"""
@@ -120,13 +123,18 @@ def register_predictors(self, model_data_arr):
def unregister_predictor(self, name):
q = f"""
- drop table if exists mindsdb.{name};
+ drop table if exists mindsdb.{self._escape_table_name(name)};
"""
self._query(q)
def check_connection(self):
try:
- con = mysql.connector.connect(host=self.config['integrations'][self.name]['host'], port=self.config['integrations'][self.name]['port'], user=self.config['integrations'][self.name]['user'], password=self.config['integrations'][self.name]['password'])
+ con = mysql.connector.connect(
+ host=self.config['integrations'][self.name]['host'],
+ port=self.config['integrations'][self.name]['port'],
+ user=self.config['integrations'][self.name]['user'],
+ password=self.config['integrations'][self.name]['password']
+ )
connected = con.is_connected()
con.close()
except Exception:
diff --git a/mindsdb/interfaces/datastore/datastore.py b/mindsdb/interfaces/datastore/datastore.py
--- a/mindsdb/interfaces/datastore/datastore.py
+++ b/mindsdb/interfaces/datastore/datastore.py
@@ -89,22 +89,27 @@ def save_datasource(self, name, source_type, source, file_path=None):
user = self.config['integrations']['default_clickhouse']['user']
password = self.config['integrations']['default_clickhouse']['password']
# TODO add host port params
- ds = ClickhouseDS(source, user=user, password=password)
+ ds = ClickhouseDS(query=source, user=user, password=password)
picklable = {
- 'class': 'ClickhouseDS'
- ,'args': [source]
- ,'kwargs': {'user': user,'password': password}
+ 'class': 'ClickhouseDS',
+ 'args': [],
+ 'kwargs': {
+ 'query': source,
+ 'user': user,
+ 'password': password
+ }
}
elif source_type == 'mariadb':
user = self.config['integrations']['default_mariadb']['user']
password = self.config['integrations']['default_mariadb']['password']
host = self.config['integrations']['default_mariadb']['host']
port = self.config['integrations']['default_mariadb']['port']
- ds = MariaDS(source, user=user, password=password, host=host, port=port)
+ ds = MariaDS(query=source, user=user, password=password, host=host, port=port)
picklable = {
- 'class': 'MariaDS'
- ,'args': [source]
- ,'kwargs': {
+ 'class': 'MariaDS',
+ 'args': [],
+ 'kwargs': {
+ 'query': source,
'user': user,
'password': password,
'host': host,
diff --git a/mindsdb/utilities/config.py b/mindsdb/utilities/config.py
--- a/mindsdb/utilities/config.py
+++ b/mindsdb/utilities/config.py
@@ -2,7 +2,9 @@
import json
import hashlib
+
class Config(object):
+ current_version = '1.1'
_config = {}
def __init__(self, config_path):
@@ -16,13 +18,58 @@ def __init__(self, config_path):
else:
raise TypeError('Argument must be string representing a file path <Later on to be switched to file path and/or database connection info>')
+ def _migrate(self):
+ def m1_0(config):
+ if 'default_clickhouse' in config['integrations'] and 'type' not in config['integrations']['default_clickhouse']:
+ config['integrations']['default_clickhouse']['type'] = 'clickhouse'
+ if 'default_mariadb' in config['integrations'] and 'type' not in config['integrations']['default_mariadb']:
+ config['integrations']['default_mariadb']['type'] = 'mariadb'
+ if 'datasources' in config['api']['mysql']:
+ del config['api']['mysql']['datasources']
+ config['config_version'] = '1.1'
+ return config
+
+ migrations = {
+ '1.0': m1_0
+ }
+
+ current_version = self._parse_version(self._config['config_version'])
+ target_version = self._parse_version(self.current_version)
+ while current_version < target_version:
+ str_version = '.'.join([str(x) for x in current_version])
+ self._config = migrations[str_version](self._config)
+ current_version = self._parse_version(self._config['config_version'])
+
+ def _validate(self):
+ integrations = self._config.get('integrations', {})
+ for key in integrations:
+ if 'type' not in integrations[key]:
+ raise KeyError(f"Config error: for integration '{key}' key 'type' must be specified")
+
+ def _parse_version(self, version):
+ if isinstance(version, str):
+ version = [int(x) for x in version.split('.')]
+ elif isinstance(version, int):
+ version = [version]
+ if len(version) == 1:
+ version.append(0)
+ return version
+
def _read(self):
if isinstance(self.config_path, str) and os.path.isfile(self.config_path):
with open(self.config_path, 'r') as fp:
self._config = json.load(fp)
+ if self._parse_version(self._config['config_version']) < self._parse_version(self.current_version):
+ self._migrate()
+ self._save()
+ self._validate()
else:
raise TypeError('`self.config_path` must be a string representing a local file path to a json config')
+ def _save(self):
+ with open(self.config_path, 'w') as fp:
+ json.dump(self._config, fp, indent=4, sort_keys=True)
+
def _gen_hash(self):
with open(self.config_path, 'rb') as fp:
return hashlib.md5(fp.read()).hexdigest()
@@ -52,9 +99,9 @@ def set(self, key_chain, value, delete=False):
c = self._config
for i, k in enumerate(key_chain):
- if k in c and i+1 < len(key_chain):
+ if k in c and i + 1 < len(key_chain):
c = c[k]
- elif k not in c and i+1 < len(key_chain):
+ elif k not in c and i + 1 < len(key_chain):
c[k] = {}
c = c[k]
else:
@@ -62,9 +109,7 @@ def set(self, key_chain, value, delete=False):
del c[k]
else:
c[k] = value
-
- with open(self.config_path, 'w') as fp:
- json.dump(self._config, fp, indent=4, sort_keys=True)
+ self._save()
# Higher level interface
def add_db_integration(self, name, dict):
diff --git a/mindsdb/utilities/wizards.py b/mindsdb/utilities/wizards.py
--- a/mindsdb/utilities/wizards.py
+++ b/mindsdb/utilities/wizards.py
@@ -21,7 +21,7 @@ def _in(ask, default, use_default):
def auto_config(python_path,pip_path,predictor_dir,datasource_dir):
config = {
"debug": False
- ,"config_version": 1
+ ,"config_version": "1.1"
,"python_interpreter": python_path
,"pip_path": pip_path
,"api": {
@@ -124,7 +124,6 @@ def cli_config(python_path,pip_path,predictor_dir,datasource_dir,config_dir,use_
"file_level": "INFO",
"console_level": "INFO"
}
- ,"datasources": []
}
config['api']['mysql']['host'] = _in('MYSQL interface host','127.0.0.1',use_default)
config['api']['mysql']['port'] = _in('MYSQL interface port','47335',use_default)
| diff --git a/tests/docker/mariadb/connect.cnf b/tests/docker/mariadb/connect.cnf
--- a/tests/docker/mariadb/connect.cnf
+++ b/tests/docker/mariadb/connect.cnf
@@ -2,3 +2,6 @@
plugin-load-add=ha_connect.so
connect_jvm_path="/usr/lib/jvm/java-1.8.0-openjdk-amd64/jre/lib/amd64/server"
connect_class_path="/usr/lib/mysql/plugin:/usr/lib/jvm/java-1.8.0-openjdk-amd64/jre/lib/ext/dremio-jdbc-driver.jar"
+wait_timeout=180
+connect_timeout=180
+interactive_timeout=180
\ No newline at end of file
diff --git a/tests/integration_tests/api/test_http.py b/tests/integration_tests/api/test_http.py
--- a/tests/integration_tests/api/test_http.py
+++ b/tests/integration_tests/api/test_http.py
@@ -48,14 +48,14 @@ def test_1_config(self):
integration_names = res.json()
assert set(integration_names['integrations']) == set(['default_mariadb', 'default_clickhouse'])
- test_integration_data = {'enabled': False, 'host':'test'}
- res = requests.put(f'{root}/config/integrations/test_integration', json={'params':test_integration_data})
+ test_integration_data = {'enabled': False, 'host': 'test', 'type': 'clickhouse'}
+ res = requests.put(f'{root}/config/integrations/test_integration', json={'params': test_integration_data})
assert res.status_code == 200
res = requests.get(f'{root}/config/integrations/test_integration')
assert res.status_code == 200
test_integration = res.json()
- assert len(test_integration) == 2
+ assert len(test_integration) == 3
res = requests.delete(f'{root}/config/integrations/test_integration')
assert res.status_code == 200
diff --git a/tests/integration_tests/flows/common.py b/tests/integration_tests/flows/common.py
--- a/tests/integration_tests/flows/common.py
+++ b/tests/integration_tests/flows/common.py
@@ -1,22 +1,23 @@
import psutil
import shutil
import time
-import pathlib
+from pathlib import Path
import json
import docker
+import requests
import subprocess
from mindsdb.interfaces.database.database import DatabaseWrapper
TEST_CONFIG = 'tests/integration_tests/flows/config/config.json'
-TESTS_ROOT = pathlib.Path(__file__).parent.absolute().joinpath('../../').resolve()
+TESTS_ROOT = Path(__file__).parent.absolute().joinpath('../../').resolve()
START_TIMEOUT = 15
OUTPUT = None # [None|subprocess.DEVNULL]
-TEMP_DIR = pathlib.Path(__file__).parent.absolute().joinpath('../../temp/').resolve()
+TEMP_DIR = Path(__file__).parent.absolute().joinpath('../../temp/').resolve()
TEMP_DIR.mkdir(parents=True, exist_ok=True)
@@ -77,7 +78,7 @@ def prepare_config(config, db):
temp_config_path = str(TEMP_DIR.joinpath('config.json').resolve())
with open(temp_config_path, 'wt') as f:
- f.write(json.dumps(config._config))
+ json.dump(config._config, f, indent=4, sort_keys=True)
return temp_config_path
@@ -87,3 +88,22 @@ def is_container_run(name):
containers = docker_client.containers.list()
containers = [x.name for x in containers if x.status == 'running']
return name in containers
+
+
+def get_test_csv(name, url, lines_count=None, rewrite=False):
+ test_csv_path = TESTS_ROOT.joinpath('temp/', name).resolve()
+ if not test_csv_path.is_file() or rewrite:
+ r = requests.get(url)
+ with open(test_csv_path, 'wb') as f:
+ f.write(r.content)
+ if lines_count is not None:
+ fp = str(test_csv_path)
+ p = subprocess.Popen(
+ f"mv {fp} {fp}_2; sed -n '1,{lines_count}p' {fp}_2 >> {fp}; rm {fp}_2",
+ cwd=TESTS_ROOT.resolve(),
+ stdout=OUTPUT,
+ stderr=OUTPUT,
+ shell=True
+ )
+ p.wait()
+ return str(test_csv_path)
diff --git a/tests/integration_tests/flows/config/config.json b/tests/integration_tests/flows/config/config.json
--- a/tests/integration_tests/flows/config/config.json
+++ b/tests/integration_tests/flows/config/config.json
@@ -6,7 +6,6 @@
},
"mysql": {
"certificate_path": "tests/integration_tests/flows/config/cert.pem",
- "datasources": [],
"host": "127.0.0.1",
"log": {
"console_level": "INFO",
@@ -20,7 +19,7 @@
"user": "mindsdb"
}
},
- "config_version": 1,
+ "config_version": "1.1",
"debug": true,
"integrations": {
"default_clickhouse": {
diff --git a/tests/integration_tests/flows/test_clickhouse.py b/tests/integration_tests/flows/test_clickhouse.py
--- a/tests/integration_tests/flows/test_clickhouse.py
+++ b/tests/integration_tests/flows/test_clickhouse.py
@@ -1,15 +1,16 @@
import unittest
import requests
-import os
import csv
import inspect
import subprocess
import atexit
from mindsdb.interfaces.native.mindsdb import MindsdbNative
+from mindsdb.interfaces.datastore.datastore import DataStore
from mindsdb.utilities.config import Config
from common import (
+ get_test_csv,
wait_api_ready,
prepare_config,
wait_db,
@@ -26,6 +27,8 @@
TEST_DATA_TABLE = 'home_rentals'
TEST_PREDICTOR_NAME = 'test_predictor'
+EXTERNAL_DS_NAME = 'test_external'
+
config = Config(TEST_CONFIG)
@@ -105,6 +108,7 @@ def setUpClass(cls):
raise Exception()
cls.mdb = MindsdbNative(config)
+ datastore = DataStore(config)
models = cls.mdb.get_models()
models = [x['name'] for x in models]
@@ -114,6 +118,9 @@ def setUpClass(cls):
query('create database if not exists test')
test_tables = query('show tables from test')
test_tables = [x['name'] for x in test_tables]
+
+ test_csv_path = get_test_csv(TEST_CSV['name'], TEST_CSV['url'])
+
if TEST_DATA_TABLE not in test_tables:
print('creating test data table...')
query(f'''
@@ -132,12 +139,6 @@ def setUpClass(cls):
PARTITION BY location
''')
- test_csv_path = str(TESTS_ROOT.joinpath('temp/', TEST_CSV['name']).resolve())
- if os.path.isfile(test_csv_path) is False:
- r = requests.get(TEST_CSV['url'])
- with open(test_csv_path, 'wb') as f:
- f.write(r.content)
-
with open(test_csv_path) as f:
csvf = csv.reader(f)
i = 0
@@ -165,6 +166,12 @@ def setUpClass(cls):
i += 1
print('done')
+ ds = datastore.get_datasource(EXTERNAL_DS_NAME)
+ if ds is not None:
+ datastore.delete_datasource(EXTERNAL_DS_NAME)
+ short_csv_file_path = get_test_csv(f'{EXTERNAL_DS_NAME}.csv', TEST_CSV['url'], lines_count=300, rewrite=True)
+ datastore.save_datasource(EXTERNAL_DS_NAME, 'file', 'test.csv', short_csv_file_path)
+
def test_1_initial_state(self):
print(f'\nExecuting {inspect.stack()[0].function}')
print('Check all testing objects not exists')
@@ -211,8 +218,8 @@ def test_2_insert_predictor(self):
mindsdb_tables = [x['name'] for x in mindsdb_tables]
self.assertTrue(TEST_PREDICTOR_NAME in mindsdb_tables)
- def test_3_insert_predictor_with_existing_ds(self):
- name = f'{TEST_PREDICTOR_NAME}_2'
+ def test_3_externael_ds(self):
+ name = f'{TEST_PREDICTOR_NAME}_external'
models = self.mdb.get_models()
models = [x['name'] for x in models]
if name in models:
@@ -223,7 +230,7 @@ def test_3_insert_predictor_with_existing_ds(self):
(
'{name}',
'rental_price, location',
- '{TEST_PREDICTOR_NAME}',
+ '{EXTERNAL_DS_NAME}',
'{{"join_learn_process": true, "stop_training_in_x_seconds": 3}}'
);
""")
@@ -238,6 +245,19 @@ def test_3_insert_predictor_with_existing_ds(self):
mindsdb_tables = [x['name'] for x in mindsdb_tables]
self.assertTrue(name in mindsdb_tables)
+ res = query(f"""
+ select
+ rental_price, location, sqft, number_of_rooms,
+ rental_price_confidence, rental_price_min, rental_price_max, rental_price_explain
+ from
+ mindsdb.{name} where external_datasource='{EXTERNAL_DS_NAME}'
+ """)
+
+ print('check result')
+ self.assertTrue(len(res) > 0)
+ self.assertTrue(res[0]['rental_price'] is not None and res[0]['rental_price'] != 'None')
+ self.assertTrue(res[0]['location'] is not None and res[0]['location'] != 'None')
+
def test_4_query_predictor(self):
print(f'\nExecuting {inspect.stack()[0].function}')
res = query(f"""
diff --git a/tests/integration_tests/flows/test_mariadb.py b/tests/integration_tests/flows/test_mariadb.py
--- a/tests/integration_tests/flows/test_mariadb.py
+++ b/tests/integration_tests/flows/test_mariadb.py
@@ -1,6 +1,4 @@
import unittest
-import requests
-import os
import csv
import inspect
import subprocess
@@ -9,9 +7,11 @@
import mysql.connector
from mindsdb.interfaces.native.mindsdb import MindsdbNative
+from mindsdb.interfaces.datastore.datastore import DataStore
from mindsdb.utilities.config import Config
from common import (
+ get_test_csv,
wait_api_ready,
prepare_config,
wait_db,
@@ -28,6 +28,8 @@
TEST_DATA_TABLE = 'home_rentals'
TEST_PREDICTOR_NAME = 'test_predictor'
+EXTERNAL_DS_NAME = 'test_external'
+
config = Config(TEST_CONFIG)
@@ -92,12 +94,15 @@ def setUpClass(cls):
raise Exception()
cls.mdb = MindsdbNative(config)
+ datastore = DataStore(config)
models = cls.mdb.get_models()
models = [x['name'] for x in models]
if TEST_PREDICTOR_NAME in models:
cls.mdb.delete_model(TEST_PREDICTOR_NAME)
+ test_csv_path = get_test_csv(TEST_CSV['name'], TEST_CSV['url'])
+
query('create database if not exists test')
test_tables = query('show tables from test')
test_tables = [x[0] for x in test_tables]
@@ -116,12 +121,6 @@ def setUpClass(cls):
)
''')
- test_csv_path = str(TESTS_ROOT.joinpath('temp/', TEST_CSV['name']).resolve())
- if os.path.isfile(test_csv_path) is False:
- r = requests.get(TEST_CSV['url'])
- with open(test_csv_path, 'wb') as f:
- f.write(r.content)
-
with open(test_csv_path) as f:
csvf = csv.reader(f)
i = 0
@@ -148,6 +147,12 @@ def setUpClass(cls):
i += 1
print('done')
+ ds = datastore.get_datasource(EXTERNAL_DS_NAME)
+ if ds is not None:
+ datastore.delete_datasource(EXTERNAL_DS_NAME)
+ short_csv_file_path = get_test_csv(f'{EXTERNAL_DS_NAME}.csv', TEST_CSV['url'], lines_count=300, rewrite=True)
+ datastore.save_datasource(EXTERNAL_DS_NAME, 'file', 'test.csv', short_csv_file_path)
+
def test_1_initial_state(self):
print(f'\nExecuting {inspect.stack()[0].function}')
print('Check all testing objects not exists')
@@ -194,8 +199,8 @@ def test_2_insert_predictor(self):
mindsdb_tables = [x[0] for x in mindsdb_tables]
self.assertTrue(TEST_PREDICTOR_NAME in mindsdb_tables)
- def test_3_insert_predictor_with_existing_ds(self):
- name = f'{TEST_PREDICTOR_NAME}_2'
+ def test_3_externael_ds(self):
+ name = f'{TEST_PREDICTOR_NAME}_external'
models = self.mdb.get_models()
models = [x['name'] for x in models]
if name in models:
@@ -206,7 +211,7 @@ def test_3_insert_predictor_with_existing_ds(self):
(
'{name}',
'rental_price, location',
- '{TEST_PREDICTOR_NAME}',
+ '{EXTERNAL_DS_NAME}',
'{{"join_learn_process": true, "stop_training_in_x_seconds": 3}}'
);
""")
@@ -221,6 +226,19 @@ def test_3_insert_predictor_with_existing_ds(self):
mindsdb_tables = [x[0] for x in mindsdb_tables]
self.assertTrue(name in mindsdb_tables)
+ res = query(f"""
+ select
+ rental_price, location, sqft, number_of_rooms,
+ rental_price_confidence, rental_price_min, rental_price_max, rental_price_explain
+ from
+ mindsdb.{name} where external_datasource='{EXTERNAL_DS_NAME}'
+ """, as_dict=True)
+
+ print('check result')
+ self.assertTrue(len(res) > 0)
+ self.assertTrue(res[0]['rental_price'] is not None and res[0]['rental_price'] != 'None')
+ self.assertTrue(res[0]['location'] is not None and res[0]['location'] != 'None')
+
def test_4_query_predictor(self):
print(f'\nExecuting {inspect.stack()[0].function}')
res = query(f"""
| DB exception when running mysql api
In the latest version on staging branch running mindsdb mysql api as ` python3 -m mindsdb --api mysql --config config.json` throws:
```
Using configuration file: config.json
Traceback (most recent call last):
File "/usr/local/lib/python3.7/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/local/lib/python3.7/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/home/zoran/MyProjects/mindsdb/mindsdb/__main__.py", line 61, in <module>
dbw.register_predictors(model_data_arr)
File "/home/zoran/MyProjects/mindsdb/mindsdb/interfaces/database/database.py", line 41, in register_predictors
for integration in it: integration.register_predictors(model_data_arr)
File "/home/zoran/MyProjects/mindsdb/mindsdb/integrations/clickhouse/clickhouse.py", line 115, in register_predictors
self._query(q)
File "/home/zoran/MyProjects/mindsdb/mindsdb/integrations/clickhouse/clickhouse.py", line 58, in _query
raise Exception(f'Error: {response.content}\nQuery:{query}')
Exception: Error: b'Code: 62, e.displayText() = DB::Exception: Syntax error: failed at position 51 (line 2, col 50): -class\n ( `Number of times pregnant` Nullable(String) , `Plasma glucose concentration` Nullable(Int64) , `Diastolic blood pressure` Nullable(. Expected one of: storage definition, ENGINE, AS, UUID, OpeningRoundBracket, ON, Dot, token (version 20.5.2.7 (official build))\n'
Query:
CREATE TABLE mindsdb.diabetes-class
( `Number of times pregnant` Nullable(String) , `Plasma glucose concentration` Nullable(Int64) , `Diastolic blood pressure` Nullable(Int64) , `Triceps skin fold thickness` Nullable(Int64) , `2-Hour serum insulin` Nullable(Int64) , `Body mass index` Nullable(Float64) , `Diabetes pedigree function` Nullable(Float64) , `Age` Nullable(Int64) , `Class` Nullable(String) , `Class_original` Nullable(String) ,`select_data_query` Nullable(String),`Class_confidence` Nullable(Float64),`Class_explain` Nullable(String)
) ENGINE=MySQL('127.0.0.1:47335', 'mindsdb', 'diabetes-class_clickhouse', 'root', 'somepass')
```
It looks like it tries to recreate some of the tables that already exist in ClickHouse. The config file used:
```
{
"use_gpu": false,
"api": {
"http": {
"host": "0.0.0.0",
"port": "47334"
},
"mysql": {
"certificate_path": "/home/zoran/MyProjects/mindsdb/tests/integration_tests/flows/config/cert.pem",
"datasources": [],
"host": "127.0.0.1",
"log": {
"console_level": "INFO",
"file": "mysql.log",
"file_level": "INFO",
"folder": "logs/",
"format": "%(asctime)s - %(levelname)s - %(message)s"
},
"password": "somepassword",
"port": "47335",
"user": "root"
}
},
"config_version": 1,
"debug": true,
"integrations": {
"default_clickhouse": {
"enabled": true,
"type": "clickhouse",
"host": "localhost",
"password": "somepassword",
"port": 8123,
"user": "default"
}
},
"interface":{
"datastore": {
"enabled": false,
"storage_dir": "/home/zoran/MyProjects/mindsdb-examples/tescl/datastore"
}
}
}
```
| 2020-07-22T15:30:04Z | [] | [] |
|
mindsdb/mindsdb | 632 | mindsdb__mindsdb-632 | [
"631"
] | 74f94a58a2f807aad01c87b6fa4ad50fafd9aa5a | diff --git a/mindsdb/api/http/namespaces/config.py b/mindsdb/api/http/namespaces/config.py
--- a/mindsdb/api/http/namespaces/config.py
+++ b/mindsdb/api/http/namespaces/config.py
@@ -8,11 +8,18 @@
@ns_conf.route('/integrations')
@ns_conf.param('name', 'List all database integration')
-class Integration(Resource):
+class ListIntegration(Resource):
@ns_conf.doc('get_integrations')
def get(self):
return {'integrations': [k for k in ca.config_obj['integrations']]}
+@ns_conf.route('/all_integrations')
+@ns_conf.param('name', 'List all database integration')
+class AllIntegration(Resource):
+ @ns_conf.doc('get_all_integrations')
+ def get(self):
+ return ca.config_obj['integrations']
+
@ns_conf.route('/integrations/<name>')
@ns_conf.param('name', 'Database integration')
class Integration(Resource):
@@ -41,3 +48,16 @@ def post(self, name):
ca.config_obj.modify_db_integration(name, params)
DatabaseWrapper(ca.config_obj)
return 'modified'
+
+
+@ns_conf.route('/integrations/<name>/check')
+@ns_conf.param('name', 'Database integration checks')
+class Check(Resource):
+ @ns_conf.doc('check')
+ def get(self, name):
+ '''return datasource metadata'''
+ dbw = DatabaseWrapper(ca.config_obj)
+ for db_name, connected in dbw.check_connections().items():
+ if db_name == name:
+ return connected, 200
+ return f'Can\'t find database integration: {name}', 400
diff --git a/mindsdb/api/http/namespaces/datasource.py b/mindsdb/api/http/namespaces/datasource.py
--- a/mindsdb/api/http/namespaces/datasource.py
+++ b/mindsdb/api/http/namespaces/datasource.py
@@ -67,6 +67,12 @@ def delete(self, name):
@ns_conf.marshal_with(datasource_metadata)
def put(self, name):
'''add new datasource'''
+ if 'query' in request.json:
+ query = request.json['query']
+ source_type = request.json['integration_id']
+ ca.default_store.save_datasource(name, source_type, query)
+ return ca.default_store.get_datasource(name)
+
data = {}
def on_field(field):
print(f'\n\n{field}\n\n')
@@ -79,6 +85,7 @@ def on_file(file):
temp_dir_path = tempfile.mkdtemp(prefix='datasource_file_')
+
if request.headers['Content-Type'].startswith('multipart/form-data'):
parser = multipart.create_form_parser(
headers=request.headers,
@@ -110,7 +117,7 @@ def on_file(file):
file_path = os.path.join(temp_dir_path, data['file'])
else:
file_path = None
-
+
ca.default_store.save_datasource(ds_name, source_type, source, file_path)
os.rmdir(temp_dir_path)
diff --git a/mindsdb/interfaces/datastore/datastore.py b/mindsdb/interfaces/datastore/datastore.py
--- a/mindsdb/interfaces/datastore/datastore.py
+++ b/mindsdb/interfaces/datastore/datastore.py
@@ -135,7 +135,7 @@ def save_datasource(self, name, source_type, source, file_path=None):
pickle.dump(picklable, fp)
with open(os.path.join(ds_dir,'metadata.json'), 'w') as fp:
- json.dump({
+ meta = {
'name': name,
'source_type': source_type,
'source': source,
@@ -143,7 +143,8 @@ def save_datasource(self, name, source_type, source, file_path=None):
'updated_at': str(datetime.datetime.now()).split('.')[0],
'row_count': len(df),
'columns': [dict(name=x) for x in list(df.keys())]
- }, fp)
+ }
+ json.dump(meta, fp)
return self.get_datasource_obj(name, raw=True)
diff --git a/mindsdb/utilities/config.py b/mindsdb/utilities/config.py
--- a/mindsdb/utilities/config.py
+++ b/mindsdb/utilities/config.py
@@ -1,6 +1,7 @@
import os
import json
import hashlib
+import datetime
class Config(object):
@@ -113,6 +114,9 @@ def set(self, key_chain, value, delete=False):
# Higher level interface
def add_db_integration(self, name, dict):
+ dict['date_last_update'] = str(datetime.datetime.now()).split('.')[0]
+ if 'database_name' not in dict:
+ dict['database_name'] = name
if 'enabled' not in dict:
dict['enabled'] = True
| diff --git a/tests/integration_tests/api/test_http.py b/tests/integration_tests/api/test_http.py
--- a/tests/integration_tests/api/test_http.py
+++ b/tests/integration_tests/api/test_http.py
@@ -46,7 +46,8 @@ def test_1_config(self):
res = requests.get(f'{root}/config/integrations')
assert res.status_code == 200
integration_names = res.json()
- assert set(integration_names['integrations']) == set(['default_mariadb', 'default_clickhouse', 'default_mysql'])
+ for integration_name in integration_names['integrations']:
+ assert integration_name in ['default_mariadb', 'default_clickhouse', 'default_mysql', 'test_integration']
test_integration_data = {'enabled': False, 'host': 'test', 'type': 'clickhouse'}
res = requests.put(f'{root}/config/integrations/test_integration', json={'params': test_integration_data})
@@ -55,7 +56,8 @@ def test_1_config(self):
res = requests.get(f'{root}/config/integrations/test_integration')
assert res.status_code == 200
test_integration = res.json()
- assert len(test_integration) == 3
+ print(test_integration, len(test_integration))
+ assert len(test_integration) == 5
res = requests.delete(f'{root}/config/integrations/test_integration')
assert res.status_code == 200
@@ -106,6 +108,20 @@ def test_2_put_ds(self):
res = requests.put(url, json=params)
assert res.status_code == 200
+ db_ds_name = ds_name + '_db'
+ params = {
+ 'name': db_ds_name
+ ,'query': 'SELECT arrayJoin([1,2,3]) as a, arrayJoin([1,2,3,4,5,6,7,8]) as b'
+ ,'integration_id': 'default_clickhouse'
+ }
+
+ url = f'{root}/datasources/{db_ds_name}'
+ res = requests.put(url, json=params)
+ assert res.status_code == 200
+ ds_data = res.json()
+ assert ds_data['source_type'] == 'default_clickhouse'
+ assert ds_data['row_count'] == 3 * 8
+
def test_3_analyze(self):
response = requests.get(f'{root}/datasources/{ds_name}/analyze')
assert response.status_code == 200
| As a API consumer I must have all integration details in one single endpoint
Hello,
it's possible to have all integration details in one endpoint, for example
```
GET http://0.0.0.0:47334/config/integrations
Response:
[
{
"conection_name": "conection_name_1"
"enabled": true,
"host": "localhost",
"password": "mypass",
"port": 3306,
"type": "mariadb",
"user": "root"
},
{
"conection_name": "conection_name_2"
"enabled": true,
"host": "HTTP://host.com/midb",
"password": "secret",
"port": 3306,
"type": "mariadb",
"user": "root"
},
{
"conection_name": "conection_name_3"
"enabled": true,
"host": "HTTP://host.com/midb",
"password": "123",
"port": 3306,
"type": "mariadb",
"user": "root"
},
]
```
| 2020-08-05T15:44:04Z | [] | [] |
|
mindsdb/mindsdb | 660 | mindsdb__mindsdb-660 | [
"656"
] | df4bbd1d55df5a46aa10885483449f3790684644 | diff --git a/mindsdb/__init__.py b/mindsdb/__init__.py
--- a/mindsdb/__init__.py
+++ b/mindsdb/__init__.py
@@ -1,4 +1,5 @@
import os
+import sys
from mindsdb.utilities.fs import get_or_create_dir_struct, create_directory
from mindsdb.utilities.wizards import cli_config
@@ -15,7 +16,11 @@
if args.config is not None:
config_path = args.config
-config = Config(config_path)
+try:
+ config = Config(config_path)
+except Exception as e:
+ print(str(e))
+ sys.exit(1)
try:
datasource_dir = config['interface']['datastore']['storage_dir']
@@ -26,7 +31,8 @@
except KeyError:
pass
-map(create_directory, [datasource_dir, predictor_dir])
+create_directory(datasource_dir)
+create_directory(predictor_dir)
os.environ['MINDSDB_STORAGE_PATH'] = datasource_dir
diff --git a/mindsdb/api/http/namespaces/config.py b/mindsdb/api/http/namespaces/config.py
--- a/mindsdb/api/http/namespaces/config.py
+++ b/mindsdb/api/http/namespaces/config.py
@@ -1,53 +1,97 @@
+import copy
+import traceback
+
from flask import request
-from flask_restx import Resource
+from flask_restx import Resource, abort
from flask import current_app as ca
from mindsdb.api.http.namespaces.configs.config import ns_conf
from mindsdb.interfaces.database.database import DatabaseWrapper
+def get_integration(name):
+ integrations = ca.config_obj.get('integrations', {})
+ return integrations.get(name)
+
+
@ns_conf.route('/integrations')
@ns_conf.param('name', 'List all database integration')
class ListIntegration(Resource):
@ns_conf.doc('get_integrations')
def get(self):
- return {'integrations': [k for k in ca.config_obj['integrations']]}
+ return {'integrations': [k for k in ca.config_obj.get('integrations', {})]}
+
@ns_conf.route('/all_integrations')
@ns_conf.param('name', 'List all database integration')
class AllIntegration(Resource):
@ns_conf.doc('get_all_integrations')
def get(self):
- return ca.config_obj['integrations']
+ integrations = copy.deepcopy(
+ ca.config_obj.get('integrations', {})
+ )
+ for integration in integrations.values():
+ if 'password' in integration:
+ integration['password'] = None
+ return integrations
+
@ns_conf.route('/integrations/<name>')
@ns_conf.param('name', 'Database integration')
class Integration(Resource):
@ns_conf.doc('get_integration')
def get(self, name):
- '''return datasource metadata'''
- return ca.config_obj['integrations'][name]
+ integration = get_integration(name)
+ if integration is None:
+ abort(404, f'Can\'t find database integration: {name}')
+ integration = copy.deepcopy(integration)
+ if 'password' in integration:
+ integration['password'] = None
+ return integration
@ns_conf.doc('put_integration')
def put(self, name):
- '''return datasource metadata'''
params = request.json.get('params')
- ca.config_obj.add_db_integration(name, params)
- DatabaseWrapper(ca.config_obj)
- return 'added'
+ if not isinstance(params, dict):
+ abort(400, "type of 'params' must be dict")
+ integration = get_integration(name)
+ if integration is not None:
+ abort(400, f"Integration with name '{name}' already exists")
+ try:
+ ca.config_obj.add_db_integration(name, params)
+ DatabaseWrapper(ca.config_obj)
+ except Exception as e:
+ print(traceback.format_exc())
+ abort(500, f'Error during config update: {str(e)}')
+ return '', 200
@ns_conf.doc('delete_integration')
def delete(self, name):
- ca.config_obj.remove_db_integration(name)
- return 'deleted'
+ integration = get_integration(name)
+ if integration is None:
+ abort(400, f"Nothing to delete. '{name}' not exists.")
+ try:
+ ca.config_obj.remove_db_integration(name)
+ except Exception as e:
+ print(traceback.format_exc())
+ abort(500, f'Error during integration delete: {str(e)}')
+ return '', 200
@ns_conf.doc('modify_integration')
def post(self, name):
- '''return datasource metadata'''
params = request.json.get('params')
- ca.config_obj.modify_db_integration(name, params)
- DatabaseWrapper(ca.config_obj)
- return 'modified'
+ if not isinstance(params, dict):
+ abort(400, "type of 'params' must be dict")
+ integration = get_integration(name)
+ if integration is None:
+ abort(400, f"Nothin to modify. '{name}' not exists.")
+ try:
+ ca.config_obj.modify_db_integration(name, params)
+ DatabaseWrapper(ca.config_obj)
+ except Exception as e:
+ print(traceback.format_exc())
+ abort(500, f'Error during integration modifycation: {str(e)}')
+ return '', 200
@ns_conf.route('/integrations/<name>/check')
@@ -55,9 +99,8 @@ def post(self, name):
class Check(Resource):
@ns_conf.doc('check')
def get(self, name):
- '''return datasource metadata'''
+ if get_integration(name) is None:
+ abort(404, f'Can\'t find database integration: {name}')
dbw = DatabaseWrapper(ca.config_obj)
- for db_name, connected in dbw.check_connections().items():
- if db_name == name:
- return connected, 200
- return f'Can\'t find database integration: {name}', 400
+ connections = dbw.check_connections()
+ return connections.get(name, False), 200
diff --git a/mindsdb/api/http/namespaces/datasource.py b/mindsdb/api/http/namespaces/datasource.py
--- a/mindsdb/api/http/namespaces/datasource.py
+++ b/mindsdb/api/http/namespaces/datasource.py
@@ -8,7 +8,7 @@
import mindsdb
from dateutil.parser import parse
from flask import request, send_file
-from flask_restx import Resource, abort
+from flask_restx import Resource, abort # 'abort' using to return errors as json: {'message': 'error text'}
from flask import current_app as ca
from mindsdb.interfaces.datastore.sqlite_helpers import *
diff --git a/mindsdb/utilities/config.py b/mindsdb/utilities/config.py
--- a/mindsdb/utilities/config.py
+++ b/mindsdb/utilities/config.py
@@ -43,7 +43,9 @@ def m1_0(config):
def _validate(self):
integrations = self._config.get('integrations', {})
- for key in integrations:
+ for key, value in integrations.items():
+ if not isinstance(value, dict):
+ raise TypeError(f"Config error: integration '{key}' must be a json")
if 'type' not in integrations[key]:
raise KeyError(f"Config error: for integration '{key}' key 'type' must be specified")
| diff --git a/tests/integration_tests/api/test_http.py b/tests/integration_tests/api/test_http.py
--- a/tests/integration_tests/api/test_http.py
+++ b/tests/integration_tests/api/test_http.py
@@ -1,25 +1,23 @@
from subprocess import Popen
import time
import os
-import signal
import psutil
from random import randint
-
import unittest
import requests
-import runpy
+TEST_CONFIG = 'tests/integration_tests/flows/config/config.json'
-rand = randint(0,pow(10,12))
+rand = randint(0, pow(10, 12))
ds_name = f'hr_ds_{rand}'
-pred_name = f'hr_predictor_{rand}'
+pred_name = f'hr_predictor_{rand}'
root = 'http://localhost:47334'
class HTTPTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
- cls.sp = Popen(['python3', '-m', 'mindsdb', '--api', 'http'], close_fds=True)
+ cls.sp = Popen(['python3', '-m', 'mindsdb', '--api', 'http', '--config', TEST_CONFIG], close_fds=True)
for i in range(20):
try:
@@ -74,23 +72,29 @@ def test_1_config(self):
assert res.status_code == 200
integration = res.json()
- for k in ['enabled','host','port','password','type','user']:
+ for k in ['enabled', 'host', 'port', 'type', 'user']:
assert k in integration
assert integration[k] is not None
+ assert integration['password'] is None
# Modify it
- res = requests.post(f'{root}/config/integrations/{name}', json={'params':{'password':'test'}})
+ res = requests.post(
+ f'{root}/config/integrations/{name}',
+ json={'params': {'user': 'dr.Who'}}
+ )
res = requests.get(f'{root}/config/integrations/{name}')
assert res.status_code == 200
modified_integration = res.json()
- assert modified_integration['password'] == 'test'
+ assert modified_integration['password'] is None
+ assert modified_integration['user'] == 'dr.Who'
for k in integration:
- if k not in ['password', 'date_last_update']:
+ if k not in ['password', 'date_last_update', 'user']:
assert modified_integration[k] == integration[k]
- # Put the original values back in
- res = requests.post(f'{root}/config/integrations/{name}', json={'params':integration})
+ # Put the original values back in\
+ del integration['password']
+ res = requests.post(f'{root}/config/integrations/{name}', json={'params': integration})
res = requests.get(f'{root}/config/integrations/{name}')
assert res.status_code == 200
modified_integration = res.json()
@@ -111,9 +115,9 @@ def test_2_put_ds(self):
db_ds_name = ds_name + '_db'
params = {
- 'name': db_ds_name
- ,'query': 'SELECT arrayJoin([1,2,3]) as a, arrayJoin([1,2,3,4,5,6,7,8]) as b'
- ,'integration_id': 'default_clickhouse'
+ 'name': db_ds_name,
+ 'query': 'SELECT arrayJoin([1,2,3]) as a, arrayJoin([1,2,3,4,5,6,7,8]) as b',
+ 'integration_id': 'default_clickhouse'
}
url = f'{root}/datasources/{db_ds_name}'
@@ -143,11 +147,11 @@ def test_3_put_predictor(self):
# POST predictions
params = {
- 'when': {'sqft':500}
+ 'when': {'sqft': 500}
}
url = f'{root}/predictors/{pred_name}/predict'
res = requests.post(url, json=params)
- assert isinstance(res.json()[0]['rental_price']['predicted_value'],float)
+ assert isinstance(res.json()[0]['rental_price']['predicted_value'], float)
assert res.status_code == 200
def test_4_datasources(self):
| As an API consumer, I want to receive more error information in PUT /datasources/:db_id
**Is your feature request related to a problem? Please describe.**
As an API consumer wants to return to the user what kind of error I have
**Describe the solution you'd like**
actual behavior
```
PUT http://localhost:47334/datasources/hola2
response: error 500
```
actual behavior
```
PUT http://localhost:47334/datasources/hola2
response: error 403, {message : ' auth issues'} or {message : ' does not find any db'} or {message'CERTIFICATE_VERIFY_FAILED'}
```
| 2020-08-21T12:36:50Z | [] | [] |
|
mindsdb/mindsdb | 915 | mindsdb__mindsdb-915 | [
"911"
] | d22be0dbe9ada8131341b990a0b7cb852d844c12 | diff --git a/mindsdb/interfaces/datastore/sqlite_helpers.py b/mindsdb/interfaces/datastore/sqlite_helpers.py
--- a/mindsdb/interfaces/datastore/sqlite_helpers.py
+++ b/mindsdb/interfaces/datastore/sqlite_helpers.py
@@ -2,11 +2,13 @@
from mindsdb_native.libs.constants.mindsdb import DATA_TYPES, DATA_SUBTYPES
import re
+
def create_sqlite_db(path, data_frame):
con = sqlite3.connect(path)
data_frame.to_sql(name='data', con=con, index=False)
con.close()
+
def cast_df_columns_types(df, stats):
types_map = {
DATA_TYPES.NUMERIC: {
@@ -41,11 +43,14 @@ def cast_df_columns_types(df, stats):
for column in columns:
try:
name = column['name']
- col_type = stats[name]['typing']['data_type']
- col_subtype = stats[name]['typing']['data_subtype']
- new_type = types_map[col_type][col_subtype]
+ if stats[name].get('empty', {}).get('is_empty', False):
+ new_type = types_map[DATA_TYPES.NUMERIC][DATA_SUBTYPES.INT]
+ else:
+ col_type = stats[name]['typing']['data_type']
+ col_subtype = stats[name]['typing']['data_subtype']
+ new_type = types_map[col_type][col_subtype]
if new_type == 'int64' or new_type == 'float64':
- df[name] = df[name].apply(lambda x: x.replace(',','.') if isinstance(x, str) else x)
+ df[name] = df[name].apply(lambda x: x.replace(',', '.') if isinstance(x, str) else x)
if new_type == 'int64':
df = df.astype({name: 'float64'})
df = df.astype({name: new_type})
@@ -55,6 +60,7 @@ def cast_df_columns_types(df, stats):
return df
+
def parse_filter(key, value):
result = re.search(r'filter(_*.*)\[(.*)\]', key)
operator = result.groups()[0].strip('_') or 'like'
@@ -96,12 +102,14 @@ def prepare_sql_where(where):
where = ''
return where, marks
+
def get_sqlite_columns_names(cursor):
cursor.execute('pragma table_info(data);')
column_name_index = [x[0] for x in cursor.description].index('name')
columns = cursor.fetchall()
return [x[column_name_index] for x in columns]
+
def get_sqlite_data(db_path, where, limit, offset):
where = [] if where is None else where
| diff --git a/tests/integration_tests/flows/test_clickhouse.py b/tests/integration_tests/flows/test_clickhouse.py
--- a/tests/integration_tests/flows/test_clickhouse.py
+++ b/tests/integration_tests/flows/test_clickhouse.py
@@ -157,7 +157,7 @@ def test_2_insert_predictor(self):
(
'{TEST_PREDICTOR_NAME}',
'rental_price, location',
- 'select * from test.{TEST_DATA_TABLE} limit 100',
+ 'select * from test.{TEST_DATA_TABLE} limit 800',
'{{"join_learn_process": true, "stop_training_in_x_seconds": 3}}'
);
""")
| KeyError: 'typing' when creating new Datasource from MySQL
Using [airplane_satisfaction dataset](https://www.kaggle.com/teejmahal20/airline-passenger-satisfaction?select=train.csv) to create a data source from Scout through MySQL throws bellow error:
```
Traceback (most recent call last):
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/flask_restx/api.py", line 375, in wrapper
resp = resource(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/flask/views.py", line 89, in view
return self.dispatch_request(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/flask_restx/resource.py", line 44, in dispatch_request
resp = meth(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/flask_restx/marshalling.py", line 248, in wrapper
resp = f(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/mindsdb/api/http/namespaces/datasource.py", line 113, in put
ca.default_store.save_datasource(name, source_type, request.json)
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/mindsdb/interfaces/datastore/datastore.py", line 185, in save_datasource
df_with_types = cast_df_columns_types(df, self.get_analysis(df)['data_analysis_v2'])
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/mindsdb/interfaces/datastore/sqlite_helpers.py", line 54, in cast_df_columns_types
print(f'Error: cant convert type of DS column {name} to {new_type}')
UnboundLocalError: local variable 'new_type' referenced before assignment
ERROR:mindsdb.api.http.initialize:Exception on /api/datasources/AirplaneData [PUT]
Traceback (most recent call last):
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/mindsdb/interfaces/datastore/sqlite_helpers.py", line 44, in cast_df_columns_types
col_type = stats[name]['typing']['data_type']
KeyError: 'typing'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/flask_restx/api.py", line 375, in wrapper
resp = resource(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/flask/views.py", line 89, in view
return self.dispatch_request(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/flask_restx/resource.py", line 44, in dispatch_request
resp = meth(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/flask_restx/marshalling.py", line 248, in wrapper
resp = f(*args, **kwargs)
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/mindsdb/api/http/namespaces/datasource.py", line 113, in put
ca.default_store.save_datasource(name, source_type, request.json)
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/mindsdb/interfaces/datastore/datastore.py", line 185, in save_datasource
df_with_types = cast_df_columns_types(df, self.get_analysis(df)['data_analysis_v2'])
File "/home/zoran/MyProjects/mindsdb-examples/mdb/lib/python3.7/site-packages/mindsdb/interfaces/datastore/sqlite_helpers.py", line 54, in cast_df_columns_types
print(f'Error: cant convert type of DS column {name} to {new_type}')
UnboundLocalError: local variable 'new_type' referenced before assignment
```
| @ZoranPandovski That because ds have column without name (first column). That was fixed in [this pr](https://github.com/mindsdb/mindsdb/pull/854). Since that, it should be error 'Each column in datasource must have unique name'. But a this moment gui dont show error, [here is issue](https://github.com/mindsdb/mindsdb_gui_web/issues/265).
@StpMax I removed that one. There are no id or empty columns in my DB. Check the dump I've sent you. | 2020-11-05T14:57:29Z | [] | [] |
mindsdb/mindsdb | 943 | mindsdb__mindsdb-943 | [
"941"
] | b7f8d68aa7e9b6a6682f70403064dbfc258f4ae6 | diff --git a/mindsdb/api/http/namespaces/util.py b/mindsdb/api/http/namespaces/util.py
--- a/mindsdb/api/http/namespaces/util.py
+++ b/mindsdb/api/http/namespaces/util.py
@@ -1,5 +1,6 @@
from flask import request
from flask_restx import Resource, abort
+from flask import current_app as ca
from mindsdb.api.http.namespaces.configs.util import ns_conf
from mindsdb import __about__
@@ -11,25 +12,12 @@ def get(self):
'''Checks server avaliable'''
return {'status': 'ok'}
-
-@ns_conf.route('/shutdown')
-class Shutdown(Resource):
- @ns_conf.doc('get_shutdown')
- def get(self):
- '''Shutdown server'''
- if request.host.startswith('127.0.0.1') or request.host.startswith('localhost'):
- func = request.environ.get('werkzeug.server.shutdown')
- if func is None:
- return '', 500
- func()
- return '', 200
- abort(403, "")
-
-
-@ns_conf.route('/util/version')
-class Version(Resource):
- @ns_conf.doc('get_endpoint')
+@ns_conf.route('/report_uuid')
+class ReportUUID(Resource):
+ @ns_conf.doc('get_report_uuid')
def get(self):
- '''Check endpoint'''
- return {'mindsdb': "{__about__.__version__}"}
-
+ metamodel_name = '___monitroing_metamodel___'
+ predictor = ca.mindsdb_native.create(metamodel_name)
+ return {
+ 'report_uuid': predictor.report_uuid
+ }
diff --git a/mindsdb/interfaces/native/mindsdb.py b/mindsdb/interfaces/native/mindsdb.py
--- a/mindsdb/interfaces/native/mindsdb.py
+++ b/mindsdb/interfaces/native/mindsdb.py
@@ -17,16 +17,24 @@ def __init__(self, config):
self.config = config
self.dbw = DatabaseWrapper(self.config)
+ def _setup_for_creation(self, name):
+ predictor_dir = Path(self.config.paths['predictors']).joinpath(name)
+ create_directory(predictor_dir)
+ versions_file_path = predictor_dir.joinpath('versions.json')
+ with open(str(versions_file_path), 'wt') as f:
+ json.dump(self.config.versions, f, indent=4, sort_keys=True)
+
+ def create(self, name):
+ self._setup_for_creation(name)
+ predictor = mindsdb_native.Predictor(name=name, run_env={'trigger': 'mindsdb'})
+ return predictor
+
def learn(self, name, from_data, to_predict, kwargs={}):
join_learn_process = kwargs.get('join_learn_process', False)
if 'join_learn_process' in kwargs:
del kwargs['join_learn_process']
- predictor_dir = Path(self.config.paths['predictors']).joinpath(name)
- create_directory(predictor_dir)
- versions_file_path = predictor_dir.joinpath('versions.json')
- with open(str(versions_file_path), 'wt') as f:
- json.dump(self.config.versions, f, indent=4, sort_keys=True)
+ self._setup_for_creation(name)
p = PredictorProcess(name, from_data, to_predict, kwargs, self.config.get_all(), 'learn')
p.start()
@@ -43,7 +51,7 @@ def predict(self, name, when_data=None, kwargs={}):
p.start()
predictions = p.join()
'''
- mdb = mindsdb_native.Predictor(name=name)
+ mdb = mindsdb_native.Predictor(name=name, run_env={'trigger': 'mindsdb'})
predictions = mdb.predict(
when_data=when_data,
diff --git a/mindsdb/interfaces/native/predictor_process.py b/mindsdb/interfaces/native/predictor_process.py
--- a/mindsdb/interfaces/native/predictor_process.py
+++ b/mindsdb/interfaces/native/predictor_process.py
@@ -22,7 +22,7 @@ def run(self):
name, from_data, to_predict, kwargs, config, trx_type = self._args
- mdb = mindsdb_native.Predictor(name=name)
+ mdb = mindsdb_native.Predictor(name=name, run_env={'trigger': 'mindsdb'})
if trx_type == 'learn':
to_predict = to_predict if isinstance(to_predict, list) else [to_predict]
| diff --git a/tests/docker/postgres/Dockerfile b/tests/docker/postgres/Dockerfile
--- a/tests/docker/postgres/Dockerfile
+++ b/tests/docker/postgres/Dockerfile
@@ -3,5 +3,6 @@ FROM postgres:12.3
RUN apt-get update && apt-get install -y \
postgresql-12-mysql-fdw
+
RUN mkdir -p /docker-entrypoint-initdb.d && \
echo 'CREATE EXTENSION mysql_fdw;' > /docker-entrypoint-initdb.d/init.sql
diff --git a/tests/integration_tests/api/test_http.py b/tests/integration_tests/api/test_http.py
--- a/tests/integration_tests/api/test_http.py
+++ b/tests/integration_tests/api/test_http.py
@@ -182,14 +182,29 @@ def test_5_datasource_not_found(self):
response = requests.get(f'{root}/datasource/dummy_source')
assert response.status_code == 404
- def test_6_ping(self):
+ def test_6_utils(self):
"""
Call utilities ping endpoint
THEN check the response is success
+
+ Call utilities report_uuid endpoint
+ THEN check the response is success
+ THEN check if the report_uuid is present in the report json and well fromated
+ THEN Call the endpoint again and check that the two report_uuids returned match
"""
+
response = requests.get(f'{root}/util/ping')
assert response.status_code == 200
+ response = requests.get(f'{root}/util/report_uuid')
+ assert response.status_code == 200
+ report_uuid = response.json()['report_uuid']
+ assert report_uuid == 'no_report'
+
+ # Make sure the uuid doesn't change on subsequent requests
+ response = requests.get(f'{root}/util/report_uuid')
+ assert report_uuid == response.json()['report_uuid']
+
def test_7_predictors(self):
"""
Call list predictors endpoint
diff --git a/tests/integration_tests/flows/config/config.json b/tests/integration_tests/flows/config/config.json
--- a/tests/integration_tests/flows/config/config.json
+++ b/tests/integration_tests/flows/config/config.json
@@ -16,7 +16,7 @@
"user": "mindsdb"
}
},
- "config_version": "1.2",
+ "config_version": "1.3",
"debug": true,
"integrations": {
"default_clickhouse": {
@@ -35,6 +35,23 @@
"type": "mariadb",
"user": "root"
},
+ "default_mongodb": {
+ "enabled": false,
+ "host": "127.0.0.1",
+ "password": "",
+ "port": 27001,
+ "type": "mongodb",
+ "user": ""
+ },
+ "default_mssql": {
+ "enabled": true,
+ "host": "127.0.0.1",
+ "odbc_driver_name": "MySQL ODBC 8.0 Unicode Driver",
+ "password": "password",
+ "port": 1433,
+ "type": "mssql",
+ "user": "sa"
+ },
"default_mysql": {
"enabled": true,
"host": "localhost",
@@ -44,57 +61,24 @@
"user": "root"
},
"default_postgres": {
+ "database": "postgres",
"enabled": true,
"host": "localhost",
- "user": "postgres",
"password": "",
- "database": "postgres",
"port": 5432,
- "type": "postgres"
- },
- "default_mssql": {
- "enabled": true,
- "host": "127.0.0.1",
- "odbc_driver_name": "MySQL ODBC 8.0 Unicode Driver",
- "password": "password",
- "port": 1433,
- "type": "mssql",
- "user": "sa"
- },
- "default_mongodb": {
- "enabled": false,
- "host": "127.0.0.1",
- "password": "",
- "port": 27001,
- "type": "mongodb",
- "user": ""
+ "type": "postgres",
+ "user": "postgres"
},
"default_snowflake": {
"enabled": false,
"type": "snowflake"
}
},
- "interface": {
- "dataskillet": {
- "enabled": false
- },
- "datastore": {
- "enabled": true
- },
- "lightwood": {
- "enabled": true
- },
- "mindsdb_native": {
- "enabled": true
- }
- },
"log": {
"level": {
"console": "WARNING",
"file": "ERROR"
}
},
- "pip_path": null,
- "python_interpreter": null,
"storage_dir": "/tmp/mindsdb_storage"
-}
+}
\ No newline at end of file
| UUID endpoint
We need a UUID endpoint, say, `/api/config/reporting_uuid` that returns the mindsdb_native telemtry uuid to scout.
This will make less sense when/if mindsdb native starts wrapping over multiple machines running native, but we'll cross that bridge when we get there.
For now the impl is simple:
* When mindsdb starts create a predictor with a placeholder name
* Make the uuid endpoint call `.get_model_data` on that predictor and get the `report_uuid` key
* Send this key to the frontend in a json: `{"report_uuid": "{{value_of_the_report_uuid_key}}" }
| 2020-11-17T12:46:31Z | [] | [] |
|
mindsdb/mindsdb | 951 | mindsdb__mindsdb-951 | [
"899"
] | de5222e7a8553417d92869a53ef4779c7b7264f4 | diff --git a/mindsdb/api/http/namespaces/predictor.py b/mindsdb/api/http/namespaces/predictor.py
--- a/mindsdb/api/http/namespaces/predictor.py
+++ b/mindsdb/api/http/namespaces/predictor.py
@@ -150,12 +150,15 @@ def put(self, name):
retrain = True
else:
retrain = False
- except:
+ except Exception:
retrain = None
ds_name = data.get('data_source_name') if data.get('data_source_name') is not None else data.get('from_data')
from_data = ca.default_store.get_datasource_obj(ds_name, raw=True)
+ if from_data is None:
+ return {'message': f'Can not find datasource: {ds_name}'}, 400
+
if retrain is True:
original_name = name
name = name + '_retrained'
diff --git a/mindsdb/utilities/functions.py b/mindsdb/utilities/functions.py
--- a/mindsdb/utilities/functions.py
+++ b/mindsdb/utilities/functions.py
@@ -42,7 +42,7 @@ def get_all_models_meta_data(mindsdb_native, custom_models):
'name': x['name'],
'predict': x['predict'],
'data_analysis': mindsdb_native.get_model_data(x['name'])['data_analysis_v2']
- } for x in mindsdb_native.get_models()
+ } for x in mindsdb_native.get_models() if x['status'] == 'complete'
]
model_data_arr.extend(custom_models.get_models())
| diff --git a/tests/integration_tests/flows/http_test_helpers.py b/tests/integration_tests/flows/http_test_helpers.py
--- a/tests/integration_tests/flows/http_test_helpers.py
+++ b/tests/integration_tests/flows/http_test_helpers.py
@@ -59,7 +59,7 @@ def check_ds_analyzable(ds_name):
def wait_predictor_learn(predictor_name):
start_time = time.time()
learn_done = False
- while learn_done is False and (time.time() - start_time) < 60:
+ while learn_done is False and (time.time() - start_time) < 120:
learn_done = get_predictor_data(predictor_name)['status'] == 'complete'
time.sleep(1)
assert learn_done
diff --git a/tests/integration_tests/flows/test_mistakes.py b/tests/integration_tests/flows/test_mistakes.py
new file mode 100644
--- /dev/null
+++ b/tests/integration_tests/flows/test_mistakes.py
@@ -0,0 +1,221 @@
+import unittest
+import requests
+import asyncio
+import time
+
+from mindsdb.utilities.config import Config
+
+from common import (
+ MINDSDB_DATABASE,
+ HTTP_API_ROOT,
+ TEST_CONFIG,
+ run_environment,
+ stop_mindsdb
+)
+
+from http_test_helpers import (
+ wait_predictor_learn,
+ check_predictor_not_exists,
+ check_ds_not_exists,
+ check_ds_exists
+)
+
+# +++ define test data
+TEST_DATASET = 'us_health_insurance'
+
+TO_PREDICT = {
+ 'smoker': str
+}
+CONDITION = {
+ 'age': 20,
+ 'sex': 'female'
+}
+# ---
+
+TEST_DATA_TABLE = TEST_DATASET
+TEST_PREDICTOR_NAME = f'{TEST_DATASET}_predictor'
+EXTERNAL_DS_NAME = f'{TEST_DATASET}_external'
+
+TEST_INTEGRATION = 'test_integration'
+TEST_DS = 'test_ds'
+TEST_PREDICTOR = 'test_predictor'
+
+config = Config(TEST_CONFIG)
+
+
+class UserFlowTest_1(unittest.TestCase):
+ def test_1_wrong_integration(self):
+ '''
+ start mindsdb with enabled integration with wrong password
+ try create ds
+ change password to correct
+ '''
+ original_db_password = config['integrations']['default_mariadb']['password']
+ self.mdb, datastore = run_environment(
+ config,
+ apis=['mysql', 'http'],
+ override_integration_config={
+ 'default_mariadb': {
+ 'enabled': True,
+ 'password': 'broken'
+ }
+ },
+ mindsdb_database=MINDSDB_DATABASE
+ )
+
+ check_ds_not_exists(TEST_DS)
+
+ # TODO creating DS from unexists integration raise not critical error in code.
+ # need fix it and return human-readable error
+ # related issue: https://github.com/mindsdb/mindsdb/issues/945
+ # data = {
+ # "integration_id": 'unexists_integration',
+ # "name": TEST_DS,
+ # "query": f"select * from test_data.{TEST_DATASET} limit 50;"
+ # }
+ # res = requests.put(f'{HTTP_API_ROOT}/datasources/{TEST_DS}', json=data)
+ # assert res ?
+
+ # check create DS with wrong integration password
+ data = {
+ "integration_id": 'default_mariadb',
+ "name": TEST_DS,
+ "query": f"select * from test_data.{TEST_DATASET} limit 100;"
+ }
+ res = requests.put(f'{HTTP_API_ROOT}/datasources/{TEST_DS}', json=data)
+ assert 'Access denied for user' in res.json()['message']
+
+ check_ds_not_exists(TEST_DS)
+
+ # restore password
+ res = requests.post(
+ f'{HTTP_API_ROOT}/config/integrations/default_mariadb',
+ json={'params': {'password': original_db_password}}
+ )
+ assert res.status_code == 200
+ config['integrations']['default_mariadb']['password'] = original_db_password
+
+ def test_2_broke_analisys(self):
+ '''
+ stop mindsdb while analyse dataset
+ '''
+ data = {
+ "integration_id": 'default_mariadb',
+ "name": TEST_DS,
+ "query": f"select * from test_data.{TEST_DATASET} limit 100;"
+ }
+ res = requests.put(f'{HTTP_API_ROOT}/datasources/{TEST_DS}', json=data)
+ assert res.status_code == 200
+
+ res = requests.get(f'{HTTP_API_ROOT}/datasources/{TEST_DS}/analyze')
+ assert res.status_code == 200
+
+ stop_mindsdb()
+
+ self.mdb, datastore = run_environment(
+ config,
+ apis=['mysql', 'http'],
+ override_integration_config={
+ 'default_mariadb': {
+ 'enabled': True
+ }
+ },
+ mindsdb_database=MINDSDB_DATABASE,
+ clear_storage=False
+ )
+
+ check_ds_exists(TEST_DS)
+
+ def test_3_wrong_predictor(self):
+ '''
+ try create predictor with wrong parameters,
+ close mindsdb while model training
+ check mindsdb can start again
+ '''
+ check_predictor_not_exists(TEST_PREDICTOR)
+
+ data = {
+ 'to_predict': list(TO_PREDICT.keys()),
+ 'data_source_name': 'wrong ds'
+ }
+ res = requests.put(f'{HTTP_API_ROOT}/predictors/{TEST_PREDICTOR}', json=data)
+ assert 'Can not find datasource' in res.json()['message']
+
+ check_predictor_not_exists(TEST_PREDICTOR)
+
+ data = {
+ 'to_predict': list(TO_PREDICT.keys()),
+ 'data_source_name': TEST_DS
+ }
+ res = requests.put(f'{HTTP_API_ROOT}/predictors/{TEST_PREDICTOR}', json=data)
+ assert res.status_code == 200
+
+ stop_mindsdb()
+
+ self.mdb, datastore = run_environment(
+ config,
+ apis=['mysql', 'http'],
+ override_integration_config={
+ 'default_mariadb': {
+ 'enabled': True
+ }
+ },
+ mindsdb_database=MINDSDB_DATABASE,
+ clear_storage=False
+ )
+
+ # TODO add after this issue will be closed: https://github.com/mindsdb/mindsdb/issues/948
+ # check_predictor_not_exists(TEST_PREDICTOR)
+
+ data = {
+ 'to_predict': list(TO_PREDICT.keys()),
+ 'data_source_name': TEST_DS
+ }
+ res = requests.put(f'{HTTP_API_ROOT}/predictors/{TEST_PREDICTOR}_2', json=data)
+ assert res.status_code == 200
+
+ wait_predictor_learn(f'{TEST_PREDICTOR}_2')
+
+ def test_4_wrong_prediction(self):
+ '''
+ close mindsdb while make prediction, then try run it again
+ '''
+ ioloop = asyncio.get_event_loop()
+ if ioloop.is_closed():
+ ioloop = asyncio.new_event_loop()
+ ioloop.run_in_executor(
+ None,
+ lambda: requests.post(
+ f'{HTTP_API_ROOT}/predictors/{TEST_PREDICTOR}_2/predict',
+ json={'when': CONDITION}
+ )
+ )
+ time.sleep(0.5)
+ stop_mindsdb()
+ ioloop.close()
+
+ self.mdb, datastore = run_environment(
+ config,
+ apis=['mysql', 'http'],
+ override_integration_config={
+ 'default_mariadb': {
+ 'enabled': True
+ }
+ },
+ mindsdb_database=MINDSDB_DATABASE,
+ clear_storage=False
+ )
+
+ res = requests.post(
+ f'{HTTP_API_ROOT}/predictors/{TEST_PREDICTOR}_2/predict',
+ json={'when': CONDITION}
+ )
+ assert res.status_code == 200
+
+
+if __name__ == "__main__":
+ try:
+ unittest.main(failfast=True)
+ print('Tests passed!')
+ except Exception as e:
+ print(f'Tests Failed!\n{e}')
| Add mistakes to tests
We should add "mistakes" and "crashes" to a bunch of the tests, especially those specified in #898.
## I - Crash during tests
1. Start the tested action (e.g. training a model, uploading a dataset)
2. Before it finishes, intrerupt it using a separate process that kills mindsdb
3. Make sure mindsdb can start from the failed state and try the actions again
## II - Mistakes during tests
1. Add a test step that has a mistake (e.g. wrong password for integration, wrong columns for prediction)
2. Make sure a failure is obvious to the user once this step is done
3. Make sure mindsdb still works and can be stopped and started back up again without issues
4. Redo step 1) correctly and make sure everything is fine.
| 2020-11-18T16:12:14Z | [] | [] |
|
mindsdb/mindsdb | 974 | mindsdb__mindsdb-974 | [
"682",
"705",
"948"
] | 68aa120619f687869da8e135b34fbdca69ab749e | diff --git a/mindsdb/__main__.py b/mindsdb/__main__.py
--- a/mindsdb/__main__.py
+++ b/mindsdb/__main__.py
@@ -5,6 +5,7 @@
import time
import asyncio
import logging
+import datetime
from pkg_resources import get_distribution
import torch.multiprocessing as mp
@@ -28,7 +29,7 @@
def close_api_gracefully(apis):
- try:
+ try:
for api in apis.values():
process = api['process']
sys.stdout.flush()
@@ -84,6 +85,8 @@ def close_api_gracefully(apis):
os.environ['DEFAULT_LOG_LEVEL'] = config['log']['level']['console']
os.environ['LIGHTWOOD_LOG_LEVEL'] = config['log']['level']['console']
+ config.set(['mindsdb_last_started_at'], str(datetime.datetime.now()))
+
initialize_log(config)
log = logging.getLogger('mindsdb.main')
diff --git a/mindsdb/api/http/namespaces/config.py b/mindsdb/api/http/namespaces/config.py
--- a/mindsdb/api/http/namespaces/config.py
+++ b/mindsdb/api/http/namespaces/config.py
@@ -59,6 +59,9 @@ def put(self, name):
if integration is not None:
abort(400, f"Integration with name '{name}' already exists")
try:
+ if 'enabled' in params:
+ params['publish'] = params['enabled']
+ del params['enabled']
ca.config_obj.add_db_integration(name, params)
mdb = ca.mindsdb_native
@@ -92,6 +95,9 @@ def post(self, name):
if integration is None:
abort(400, f"Nothin to modify. '{name}' not exists.")
try:
+ if 'enabled' in params:
+ params['publish'] = params['enabled']
+ del params['enabled']
ca.config_obj.modify_db_integration(name, params)
DatabaseWrapper(ca.config_obj)
except Exception as e:
diff --git a/mindsdb/api/http/namespaces/predictor.py b/mindsdb/api/http/namespaces/predictor.py
--- a/mindsdb/api/http/namespaces/predictor.py
+++ b/mindsdb/api/http/namespaces/predictor.py
@@ -19,8 +19,6 @@
from mindsdb.api.http.namespaces.entitites.predictor_status import predictor_status
-model_swapping_map = {}
-
def debug_pkey_type(model, keys=None, reset_keyes=True, type_to_check=list, append_key=True):
if type(model) != dict:
return
@@ -122,8 +120,6 @@ def delete(self, name):
@ns_conf.doc('put_predictor', params=put_predictor_params)
def put(self, name):
'''Learning new predictor'''
- global model_swapping_map
-
data = request.json
to_predict = data.get('to_predict')
@@ -174,12 +170,10 @@ def put(self, name):
if retrain is True:
try:
- model_swapping_map[original_name] = True
ca.mindsdb_native.delete_model(original_name)
ca.mindsdb_native.rename_model(name, original_name)
- model_swapping_map[original_name] = False
except:
- model_swapping_map[original_name] = False
+ pass
return '', 200
@@ -234,14 +228,12 @@ def get(self, name):
return columns, 200
-
@ns_conf.route('/<name>/predict')
@ns_conf.param('name', 'The predictor identifier')
class PredictorPredict(Resource):
@ns_conf.doc('post_predictor_predict', params=predictor_query_params)
def post(self, name):
'''Queries predictor'''
- global model_swapping_map
data = request.json
@@ -259,10 +251,6 @@ def post(self, name):
if type(kwargs) != type({}):
kwargs = {}
- # Not the fanciest semaphore, but should work since restplus is multi-threaded and this condition should rarely be reached
- while name in model_swapping_map and model_swapping_map[name] is True:
- time.sleep(1)
-
if is_custom(name):
return ca.custom_models.predict(name, when_data=when, **kwargs)
else:
@@ -276,7 +264,6 @@ def post(self, name):
class PredictorPredictFromDataSource(Resource):
@ns_conf.doc('post_predictor_predict', params=predictor_query_params)
def post(self, name):
- global model_swapping_map
data = request.json
from_data = ca.default_store.get_datasource_obj(data.get('data_source_name'), raw=True)
@@ -296,10 +283,6 @@ def post(self, name):
if type(kwargs) != type({}):
kwargs = {}
- # Not the fanciest semaphore, but should work since restplus is multi-threaded and this condition should rarely be reached
- while name in model_swapping_map and model_swapping_map[name] is True:
- time.sleep(1)
-
if is_custom(name):
return ca.custom_models.predict(name, from_data=from_data, **kwargs)
else:
diff --git a/mindsdb/interfaces/database/database.py b/mindsdb/interfaces/database/database.py
--- a/mindsdb/interfaces/database/database.py
+++ b/mindsdb/interfaces/database/database.py
@@ -24,7 +24,7 @@ def _get_integrations(self):
# @TODO Once we have a presistent state sorted out this should be simplified as to not refresh the existing integrations every single time
integration_arr = []
for db_alias in self.config['integrations']:
- if self.config['integrations'][db_alias]['enabled']:
+ if self.config['integrations'][db_alias]['publish']:
db_type = self.config['integrations'][db_alias]['type']
if db_type == 'clickhouse':
integration_arr.append(Clickhouse(self.config, db_alias))
diff --git a/mindsdb/interfaces/datastore/datastore.py b/mindsdb/interfaces/datastore/datastore.py
--- a/mindsdb/interfaces/datastore/datastore.py
+++ b/mindsdb/interfaces/datastore/datastore.py
@@ -182,7 +182,8 @@ def save_datasource(self, name, source_type, source, file_path=None):
shutil.rmtree(ds_meta_dir)
raise Exception('Each column in datasource must have unique name')
- df_with_types = cast_df_columns_types(df, self.get_analysis(df)['data_analysis_v2'])
+ # limit=200 for analysis might have odd results but it's a compromise that should work most of the time to make the sqlite db creation faster
+ df_with_types = cast_df_columns_types(df, self.get_analysis(ds.filter(limit=200))['data_analysis_v2'])
create_sqlite_db(os.path.join(ds_meta_dir, 'sqlite.db'), df_with_types)
with open(os.path.join(ds_meta_dir, 'ds.pickle'), 'wb') as fp:
diff --git a/mindsdb/interfaces/native/mindsdb.py b/mindsdb/interfaces/native/mindsdb.py
--- a/mindsdb/interfaces/native/mindsdb.py
+++ b/mindsdb/interfaces/native/mindsdb.py
@@ -81,6 +81,7 @@ def get_models(self, status='any'):
models = F.get_models()
if status != 'any':
models = [x for x in models if x['status'] == status]
+ models = [x for x in models if x['status'] != 'training' or parse_datetime(x['created_at']) > parse_datetime(self.config['mindsdb_last_started_at'])]
for i in range(len(models)):
for k in ['train_end_at', 'updated_at', 'created_at']:
diff --git a/mindsdb/utilities/config.py b/mindsdb/utilities/config.py
--- a/mindsdb/utilities/config.py
+++ b/mindsdb/utilities/config.py
@@ -269,9 +269,7 @@ def get_all(self):
return self._config
def set(self, key_chain, value, delete=False):
- with open(self.config_path, 'r') as fp:
- self._config = json.load(fp)
-
+ self._read()
c = self._config
for i, k in enumerate(key_chain):
if k in c and i + 1 < len(key_chain):
@@ -291,8 +289,8 @@ def add_db_integration(self, name, dict):
dict['date_last_update'] = str(datetime.datetime.now()).split('.')[0]
if 'database_name' not in dict:
dict['database_name'] = name
- if 'enabled' not in dict:
- dict['enabled'] = True
+ if 'publish' not in dict:
+ dict['publish'] = True
self.set(['integrations', name], dict)
diff --git a/mindsdb/utilities/wizards.py b/mindsdb/utilities/wizards.py
--- a/mindsdb/utilities/wizards.py
+++ b/mindsdb/utilities/wizards.py
@@ -28,28 +28,28 @@ def auto_config(python_path, pip_path, storage_dir):
},
"integrations": {
"default_clickhouse": {
- "enabled": False,
+ "publish": False,
"type": 'clickhouse'
},
"default_mariadb": {
- "enabled": False,
+ "publish": False,
"type": 'mariadb'
},
"default_mysql": {
- "enabled": False,
+ "publish": False,
"type": 'mysql'
},
"default_postgres": {
- "enabled": False,
+ "publish": False,
"type": 'postgres',
"database": 'postgres'
},
"default_mssql": {
- "enabled": False,
+ "publish": False,
"type": 'mssql'
},
"default_mongodb": {
- "enabled": False,
+ "publish": False,
"type": 'mongodb'
}
},
@@ -138,7 +138,7 @@ def cli_config(python_path, pip_path, storage_dir, config_dir, use_default=False
clickhouse = _in('Connect to clickhouse ? [Y/N]', 'Y', use_default)
if clickhouse in ['Y', 'y']:
- config['integrations']['default_clickhouse']['enabled'] = _in('Enable Clickhouse integration?: ', False, use_default)
+ config['integrations']['default_clickhouse']['publish'] = _in('Enable Clickhouse integration?: ', False, use_default)
config['integrations']['default_clickhouse']['host'] = _in('Clickhouse host: ', '127.0.0.1', use_default)
config['integrations']['default_clickhouse']['port'] = _in('Clickhouse port: ', 8123, use_default)
config['integrations']['default_clickhouse']['user'] = _in('Clickhouse user: ', 'default', use_default)
@@ -147,7 +147,7 @@ def cli_config(python_path, pip_path, storage_dir, config_dir, use_default=False
mariadb = _in('Connect to Mariadb ? [Y/N]', 'Y', use_default)
if mariadb in ['Y', 'y']:
- config['integrations']['default_mariadb']['enabled'] = _in('Enable Mariadb integration?: ', False, use_default)
+ config['integrations']['default_mariadb']['publish'] = _in('Enable Mariadb integration?: ', False, use_default)
config['integrations']['default_mariadb']['host'] = _in('Mariadb host: ', '127.0.0.1', use_default)
config['integrations']['default_mariadb']['port'] = _in('Mariadb port: ', 3306, use_default)
config['integrations']['default_mariadb']['user'] = _in('Mariadb user: ', 'root', use_default)
@@ -156,7 +156,7 @@ def cli_config(python_path, pip_path, storage_dir, config_dir, use_default=False
mysql = _in('Connect to MySQL ? [Y/N]', 'Y', use_default)
if mysql in ['Y', 'y']:
- config['integrations']['default_mysql']['enabled'] = _in('Enable MySQL integration?: ', False, use_default)
+ config['integrations']['default_mysql']['publish'] = _in('Enable MySQL integration?: ', False, use_default)
config['integrations']['default_mysql']['host'] = _in('MySQL host: ', '127.0.0.1', use_default)
config['integrations']['default_mysql']['port'] = _in('MySQL port: ', 3306, use_default)
config['integrations']['default_mysql']['user'] = _in('MySQL user: ', 'root', use_default)
@@ -165,7 +165,7 @@ def cli_config(python_path, pip_path, storage_dir, config_dir, use_default=False
mysql = _in('Connect to PostgreSQL ? [Y/N]', 'Y', use_default)
if mysql in ['Y', 'y']:
- config['integrations']['default_postgres']['enabled'] = _in('Enable PostgreSQL integration?: ', False, use_default)
+ config['integrations']['default_postgres']['publish'] = _in('Enable PostgreSQL integration?: ', False, use_default)
config['integrations']['default_postgres']['host'] = _in('PostgreSQL host: ', '127.0.0.1', use_default)
config['integrations']['default_postgres']['port'] = _in('PostgreSQL port: ', 5432, use_default)
config['integrations']['default_postgres']['user'] = _in('PostgreSQL user: ', 'postgres', use_default)
@@ -175,7 +175,7 @@ def cli_config(python_path, pip_path, storage_dir, config_dir, use_default=False
mssql = _in('Connect to MSSQL ? [Y/N]', 'Y', use_default)
if mssql in ['Y', 'y']:
- config['integrations']['default_mssql']['enabled'] = _in('Enable MSSQL integration?: ', False, use_default)
+ config['integrations']['default_mssql']['publish'] = _in('Enable MSSQL integration?: ', False, use_default)
config['integrations']['default_mssql']['host'] = _in('MSSQL host: ', '127.0.0.1', use_default)
config['integrations']['default_mssql']['port'] = _in('MSSQL port: ', 1433, use_default)
config['integrations']['default_mssql']['user'] = _in('MSSQL user: ', 'sa', use_default)
@@ -185,7 +185,7 @@ def cli_config(python_path, pip_path, storage_dir, config_dir, use_default=False
mongodb = _in('Connect to MongoDB ? [Y/N]', 'Y', use_default)
if mongodb in ['Y', 'y']:
- config['integrations']['default_mongodb']['enabled'] = _in('Enable MongoDB integration?: ', False, use_default)
+ config['integrations']['default_mongodb']['publish'] = _in('Enable MongoDB integration?: ', False, use_default)
config['integrations']['default_mongodb']['host'] = _in('MongoDB host: ', '127.0.0.1', use_default)
config['integrations']['default_mongodb']['port'] = _in('MongoDB port: ', 27017, use_default)
config['integrations']['default_mongodb']['user'] = _in('MongoDB user: ', '', use_default)
@@ -193,7 +193,7 @@ def cli_config(python_path, pip_path, storage_dir, config_dir, use_default=False
config['integrations']['default_mongodb']['type'] = 'mongodb'
for db_name in list(config['integrations'].keys()):
- if not config['integrations'][db_name]['enabled']:
+ if not config['integrations'][db_name]['publish']:
del config['integrations'][db_name]
| diff --git a/tests/integration_tests/api/test_http.py b/tests/integration_tests/api/test_http.py
--- a/tests/integration_tests/api/test_http.py
+++ b/tests/integration_tests/api/test_http.py
@@ -32,10 +32,10 @@ def setUpClass(cls):
apis=['http'],
override_integration_config={
'default_mariadb': {
- 'enabled': True
+ 'publish': True
},
'default_clickhouse': {
- 'enabled': True
+ 'publish': True
}
},
mindsdb_database=common.MINDSDB_DATABASE
@@ -60,7 +60,7 @@ def test_1_config(self):
for integration_name in integration_names['integrations']:
assert integration_name in self.initial_integrations_names
- test_integration_data = {'enabled': False, 'host': 'test', 'type': 'clickhouse'}
+ test_integration_data = {'publish': False, 'host': 'test', 'type': 'clickhouse'}
res = requests.put(f'{root}/config/integrations/test_integration', json={'params': test_integration_data})
assert res.status_code == 200
@@ -85,7 +85,7 @@ def test_1_config(self):
assert res.status_code == 200
integration = res.json()
- for k in ['enabled', 'host', 'port', 'type', 'user']:
+ for k in ['publish', 'host', 'port', 'type', 'user']:
assert k in integration
assert integration[k] is not None
assert integration['password'] is None
diff --git a/tests/integration_tests/flows/common.py b/tests/integration_tests/flows/common.py
--- a/tests/integration_tests/flows/common.py
+++ b/tests/integration_tests/flows/common.py
@@ -101,7 +101,7 @@
def prepare_config(config, mindsdb_database='mindsdb', override_integration_config={}, override_api_config={}, clear_storage=True):
for key in config._config['integrations']:
- config._config['integrations'][key]['enabled'] = False
+ config._config['integrations'][key]['publish'] = False
if USE_EXTERNAL_DB_SERVER:
with open(EXTERNAL_DB_CREDENTIALS, 'rt') as f:
diff --git a/tests/integration_tests/flows/config/config.json b/tests/integration_tests/flows/config/config.json
--- a/tests/integration_tests/flows/config/config.json
+++ b/tests/integration_tests/flows/config/config.json
@@ -20,7 +20,7 @@
"debug": true,
"integrations": {
"default_clickhouse": {
- "enabled": true,
+ "publish": true,
"host": "localhost",
"password": "",
"port": 8123,
@@ -28,7 +28,7 @@
"user": "default"
},
"default_mariadb": {
- "enabled": true,
+ "publish": true,
"host": "localhost",
"password": "root",
"port": 3306,
@@ -36,7 +36,7 @@
"user": "root"
},
"default_mongodb": {
- "enabled": false,
+ "publish": false,
"host": "127.0.0.1",
"password": "",
"port": 27001,
@@ -44,7 +44,7 @@
"user": ""
},
"default_mssql": {
- "enabled": true,
+ "publish": true,
"host": "127.0.0.1",
"odbc_driver_name": "MySQL ODBC 8.0 Unicode Driver",
"password": "password",
@@ -53,7 +53,7 @@
"user": "sa"
},
"default_mysql": {
- "enabled": true,
+ "publish": true,
"host": "localhost",
"password": "root",
"port": 3307,
@@ -62,7 +62,7 @@
},
"default_postgres": {
"database": "postgres",
- "enabled": true,
+ "publish": true,
"host": "localhost",
"password": "",
"port": 5432,
@@ -70,7 +70,7 @@
"user": "postgres"
},
"default_snowflake": {
- "enabled": false,
+ "publish": false,
"type": "snowflake"
}
},
diff --git a/tests/integration_tests/flows/test_clickhouse.py b/tests/integration_tests/flows/test_clickhouse.py
--- a/tests/integration_tests/flows/test_clickhouse.py
+++ b/tests/integration_tests/flows/test_clickhouse.py
@@ -98,7 +98,7 @@ def setUpClass(cls):
apis=['mysql'],
override_integration_config={
'default_clickhouse': {
- 'enabled': True
+ 'publish': True
}
},
override_api_config={
diff --git a/tests/integration_tests/flows/test_custom_model.py b/tests/integration_tests/flows/test_custom_model.py
--- a/tests/integration_tests/flows/test_custom_model.py
+++ b/tests/integration_tests/flows/test_custom_model.py
@@ -80,7 +80,7 @@ def setUpClass(cls):
apis=['http', 'mysql'],
override_integration_config={
'default_mariadb': {
- 'enabled': True
+ 'publish': True
}
},
mindsdb_database=MINDSDB_DATABASE
diff --git a/tests/integration_tests/flows/test_mariadb.py b/tests/integration_tests/flows/test_mariadb.py
--- a/tests/integration_tests/flows/test_mariadb.py
+++ b/tests/integration_tests/flows/test_mariadb.py
@@ -83,7 +83,7 @@ def setUpClass(cls):
apis=['mysql'],
override_integration_config={
'default_mariadb': {
- 'enabled': True
+ 'publish': True
}
},
mindsdb_database=MINDSDB_DATABASE
diff --git a/tests/integration_tests/flows/test_mistakes.py b/tests/integration_tests/flows/test_mistakes.py
--- a/tests/integration_tests/flows/test_mistakes.py
+++ b/tests/integration_tests/flows/test_mistakes.py
@@ -46,7 +46,7 @@
class UserFlowTest_1(unittest.TestCase):
def test_1_wrong_integration(self):
'''
- start mindsdb with enabled integration with wrong password
+ start mindsdb with publish integration with wrong password
try create ds
change password to correct
'''
@@ -56,7 +56,7 @@ def test_1_wrong_integration(self):
apis=['mysql', 'http'],
override_integration_config={
'default_mariadb': {
- 'enabled': True,
+ 'publish': True,
'password': 'broken'
}
},
@@ -117,7 +117,7 @@ def test_2_broke_analisys(self):
apis=['mysql', 'http'],
override_integration_config={
'default_mariadb': {
- 'enabled': True
+ 'publish': True
}
},
mindsdb_database=MINDSDB_DATABASE,
@@ -157,7 +157,7 @@ def test_3_wrong_predictor(self):
apis=['mysql', 'http'],
override_integration_config={
'default_mariadb': {
- 'enabled': True
+ 'publish': True
}
},
mindsdb_database=MINDSDB_DATABASE,
@@ -199,7 +199,7 @@ def test_4_wrong_prediction(self):
apis=['mysql', 'http'],
override_integration_config={
'default_mariadb': {
- 'enabled': True
+ 'publish': True
}
},
mindsdb_database=MINDSDB_DATABASE,
diff --git a/tests/integration_tests/flows/test_mongo.py b/tests/integration_tests/flows/test_mongo.py
--- a/tests/integration_tests/flows/test_mongo.py
+++ b/tests/integration_tests/flows/test_mongo.py
@@ -53,7 +53,7 @@ def setUpClass(cls):
apis=['mongodb'],
override_integration_config={
'default_mongodb': {
- 'enabled': True,
+ 'publish': True,
'port': 27002,
'host': '127.0.0.1',
'type': 'mongodb',
diff --git a/tests/integration_tests/flows/test_mssql.py b/tests/integration_tests/flows/test_mssql.py
--- a/tests/integration_tests/flows/test_mssql.py
+++ b/tests/integration_tests/flows/test_mssql.py
@@ -59,7 +59,7 @@ def setUpClass(cls):
apis=['mysql'],
override_integration_config={
'default_mssql': {
- 'enabled': True
+ 'publish': True
}
},
mindsdb_database=MINDSDB_DATABASE
diff --git a/tests/integration_tests/flows/test_mysql.py b/tests/integration_tests/flows/test_mysql.py
--- a/tests/integration_tests/flows/test_mysql.py
+++ b/tests/integration_tests/flows/test_mysql.py
@@ -83,7 +83,7 @@ def setUpClass(cls):
apis=['mysql'],
override_integration_config={
'default_mysql': {
- 'enabled': True
+ 'publish': True
}
},
mindsdb_database=MINDSDB_DATABASE
diff --git a/tests/integration_tests/flows/test_postgres.py b/tests/integration_tests/flows/test_postgres.py
--- a/tests/integration_tests/flows/test_postgres.py
+++ b/tests/integration_tests/flows/test_postgres.py
@@ -89,7 +89,7 @@ def setUpClass(cls):
apis=['mysql'],
override_integration_config={
'default_postgres': {
- 'enabled': True
+ 'publish': True
}
},
mindsdb_database=MINDSDB_DATABASE
diff --git a/tests/integration_tests/flows/test_user_flow_1.py b/tests/integration_tests/flows/test_user_flow_1.py
--- a/tests/integration_tests/flows/test_user_flow_1.py
+++ b/tests/integration_tests/flows/test_user_flow_1.py
@@ -123,7 +123,7 @@ def test_1_create_integration_via_http(self):
test_integration_data = {}
test_integration_data.update(config['integrations']['default_mariadb'])
- test_integration_data['enabled'] = True
+ test_integration_data['publish'] = True
test_integration_data['database_name'] = TEST_INTEGRATION
res = requests.put(f'{HTTP_API_ROOT}/config/integrations/{TEST_INTEGRATION}', json={'params': test_integration_data})
assert res.status_code == 200
@@ -132,7 +132,7 @@ def test_1_create_integration_via_http(self):
assert res.status_code == 200
test_integration = res.json()
assert test_integration['password'] is None
- for key in ['user', 'port', 'host', 'enabled']:
+ for key in ['user', 'port', 'host', 'publish']:
assert test_integration[key] == test_integration_data[key]
def test_3_create_ds_from_sql_by_http(self):
diff --git a/tests/integration_tests/flows/test_user_flow_2.py b/tests/integration_tests/flows/test_user_flow_2.py
--- a/tests/integration_tests/flows/test_user_flow_2.py
+++ b/tests/integration_tests/flows/test_user_flow_2.py
@@ -137,7 +137,7 @@ def test_1_upload_ds(self):
def test_2_add_integration(self):
test_integration_data = {}
test_integration_data.update(config['integrations']['default_mariadb'])
- test_integration_data['enabled'] = True
+ test_integration_data['publish'] = True
test_integration_data['database_name'] = TEST_INTEGRATION
res = requests.put(f'{HTTP_API_ROOT}/config/integrations/{TEST_INTEGRATION}', json={'params': test_integration_data})
assert res.status_code == 200
@@ -150,7 +150,7 @@ def test_3_restart_and_connect(self):
apis=['mysql'],
override_integration_config={
'default_mariadb': {
- 'enabled': True
+ 'publish': True
}
},
mindsdb_database=MINDSDB_DATABASE,
| Deprecate `model_swapping_map`
I believe `model_swapping_map` could be removed since mindsdb_native should have all the required thread-safety now, make sure this is true and if so, remove it.
PUT datasouse route fail by timeout
That happens if datasource is query to big table in database.
Predictor stuck at status `train` if mindsdb is closed
The reason for this is quite obvious, there is no way to tell if a predictor is actually "training" from the outside, by just looking at the model data.
So if mindsdb_native gets force killed, it will look like training never stopped.
There's 2 ways to check for this in a single machine scenario:
1. Check if any process called `mindsdb_native` is running. If not, then invalidate all "training" predictors.
2. Same thing as 1, but we need to add the specific uuid of the mindsdb_native predictor to the `learn` call process name in native (not sure how easy this is)
3. If a mindsdb is started, list all predictors before starting any APIs, if a predictor's status is "training" and mindsdb_native is not running, invalidate it.
| Retrain is just not working now, will remove it together with the model swaping map.
Most time take 'analyse' process. Is no way to make it significantly faster. To make this route async need to add statuses for datasources.
*If we merge remote SQL datasource this should be fixed*
depend on https://github.com/mindsdb/mindsdb/issues/908
We can run analysis on `ds.filter(limit=200)`... or something like that
We need to add a test for this.
Or maybe we should just wait until we figure out how to remove SQLite entirely.
| 2020-11-25T15:57:06Z | [] | [] |
mindsdb/mindsdb | 975 | mindsdb__mindsdb-975 | [
"718"
] | 86bcb0c030834362f47bf1a93d538b4eea0aaac6 | diff --git a/mindsdb/api/http/namespaces/config.py b/mindsdb/api/http/namespaces/config.py
--- a/mindsdb/api/http/namespaces/config.py
+++ b/mindsdb/api/http/namespaces/config.py
@@ -59,6 +59,8 @@ def put(self, name):
if integration is not None:
abort(400, f"Integration with name '{name}' already exists")
try:
+ if 'enabled' in params:
+ params['publish'] = params['enabled']
ca.config_obj.add_db_integration(name, params)
mdb = ca.mindsdb_native
@@ -92,6 +94,8 @@ def post(self, name):
if integration is None:
abort(400, f"Nothin to modify. '{name}' not exists.")
try:
+ if 'enabled' in params:
+ params['publish'] = params['enabled']
ca.config_obj.modify_db_integration(name, params)
DatabaseWrapper(ca.config_obj)
except Exception as e:
diff --git a/mindsdb/interfaces/database/database.py b/mindsdb/interfaces/database/database.py
--- a/mindsdb/interfaces/database/database.py
+++ b/mindsdb/interfaces/database/database.py
@@ -24,7 +24,7 @@ def _get_integrations(self):
# @TODO Once we have a presistent state sorted out this should be simplified as to not refresh the existing integrations every single time
integration_arr = []
for db_alias in self.config['integrations']:
- if self.config['integrations'][db_alias]['enabled']:
+ if self.config['integrations'][db_alias]['publish']:
db_type = self.config['integrations'][db_alias]['type']
if db_type == 'clickhouse':
integration_arr.append(Clickhouse(self.config, db_alias))
diff --git a/mindsdb/utilities/wizards.py b/mindsdb/utilities/wizards.py
--- a/mindsdb/utilities/wizards.py
+++ b/mindsdb/utilities/wizards.py
@@ -28,28 +28,28 @@ def auto_config(python_path, pip_path, storage_dir):
},
"integrations": {
"default_clickhouse": {
- "enabled": False,
+ "publish": False,
"type": 'clickhouse'
},
"default_mariadb": {
- "enabled": False,
+ "publish": False,
"type": 'mariadb'
},
"default_mysql": {
- "enabled": False,
+ "publish": False,
"type": 'mysql'
},
"default_postgres": {
- "enabled": False,
+ "publish": False,
"type": 'postgres',
"database": 'postgres'
},
"default_mssql": {
- "enabled": False,
+ "publish": False,
"type": 'mssql'
},
"default_mongodb": {
- "enabled": False,
+ "publish": False,
"type": 'mongodb'
}
},
@@ -138,7 +138,7 @@ def cli_config(python_path, pip_path, storage_dir, config_dir, use_default=False
clickhouse = _in('Connect to clickhouse ? [Y/N]', 'Y', use_default)
if clickhouse in ['Y', 'y']:
- config['integrations']['default_clickhouse']['enabled'] = _in('Enable Clickhouse integration?: ', False, use_default)
+ config['integrations']['default_clickhouse']['publish'] = _in('Enable Clickhouse integration?: ', False, use_default)
config['integrations']['default_clickhouse']['host'] = _in('Clickhouse host: ', '127.0.0.1', use_default)
config['integrations']['default_clickhouse']['port'] = _in('Clickhouse port: ', 8123, use_default)
config['integrations']['default_clickhouse']['user'] = _in('Clickhouse user: ', 'default', use_default)
@@ -147,7 +147,7 @@ def cli_config(python_path, pip_path, storage_dir, config_dir, use_default=False
mariadb = _in('Connect to Mariadb ? [Y/N]', 'Y', use_default)
if mariadb in ['Y', 'y']:
- config['integrations']['default_mariadb']['enabled'] = _in('Enable Mariadb integration?: ', False, use_default)
+ config['integrations']['default_mariadb']['publish'] = _in('Enable Mariadb integration?: ', False, use_default)
config['integrations']['default_mariadb']['host'] = _in('Mariadb host: ', '127.0.0.1', use_default)
config['integrations']['default_mariadb']['port'] = _in('Mariadb port: ', 3306, use_default)
config['integrations']['default_mariadb']['user'] = _in('Mariadb user: ', 'root', use_default)
@@ -156,7 +156,7 @@ def cli_config(python_path, pip_path, storage_dir, config_dir, use_default=False
mysql = _in('Connect to MySQL ? [Y/N]', 'Y', use_default)
if mysql in ['Y', 'y']:
- config['integrations']['default_mysql']['enabled'] = _in('Enable MySQL integration?: ', False, use_default)
+ config['integrations']['default_mysql']['publish'] = _in('Enable MySQL integration?: ', False, use_default)
config['integrations']['default_mysql']['host'] = _in('MySQL host: ', '127.0.0.1', use_default)
config['integrations']['default_mysql']['port'] = _in('MySQL port: ', 3306, use_default)
config['integrations']['default_mysql']['user'] = _in('MySQL user: ', 'root', use_default)
@@ -165,7 +165,7 @@ def cli_config(python_path, pip_path, storage_dir, config_dir, use_default=False
mysql = _in('Connect to PostgreSQL ? [Y/N]', 'Y', use_default)
if mysql in ['Y', 'y']:
- config['integrations']['default_postgres']['enabled'] = _in('Enable PostgreSQL integration?: ', False, use_default)
+ config['integrations']['default_postgres']['publish'] = _in('Enable PostgreSQL integration?: ', False, use_default)
config['integrations']['default_postgres']['host'] = _in('PostgreSQL host: ', '127.0.0.1', use_default)
config['integrations']['default_postgres']['port'] = _in('PostgreSQL port: ', 5432, use_default)
config['integrations']['default_postgres']['user'] = _in('PostgreSQL user: ', 'postgres', use_default)
@@ -175,7 +175,7 @@ def cli_config(python_path, pip_path, storage_dir, config_dir, use_default=False
mssql = _in('Connect to MSSQL ? [Y/N]', 'Y', use_default)
if mssql in ['Y', 'y']:
- config['integrations']['default_mssql']['enabled'] = _in('Enable MSSQL integration?: ', False, use_default)
+ config['integrations']['default_mssql']['publish'] = _in('Enable MSSQL integration?: ', False, use_default)
config['integrations']['default_mssql']['host'] = _in('MSSQL host: ', '127.0.0.1', use_default)
config['integrations']['default_mssql']['port'] = _in('MSSQL port: ', 1433, use_default)
config['integrations']['default_mssql']['user'] = _in('MSSQL user: ', 'sa', use_default)
@@ -185,7 +185,7 @@ def cli_config(python_path, pip_path, storage_dir, config_dir, use_default=False
mongodb = _in('Connect to MongoDB ? [Y/N]', 'Y', use_default)
if mongodb in ['Y', 'y']:
- config['integrations']['default_mongodb']['enabled'] = _in('Enable MongoDB integration?: ', False, use_default)
+ config['integrations']['default_mongodb']['publish'] = _in('Enable MongoDB integration?: ', False, use_default)
config['integrations']['default_mongodb']['host'] = _in('MongoDB host: ', '127.0.0.1', use_default)
config['integrations']['default_mongodb']['port'] = _in('MongoDB port: ', 27017, use_default)
config['integrations']['default_mongodb']['user'] = _in('MongoDB user: ', '', use_default)
@@ -193,7 +193,7 @@ def cli_config(python_path, pip_path, storage_dir, config_dir, use_default=False
config['integrations']['default_mongodb']['type'] = 'mongodb'
for db_name in list(config['integrations'].keys()):
- if not config['integrations'][db_name]['enabled']:
+ if not config['integrations'][db_name]['publish']:
del config['integrations'][db_name]
| diff --git a/tests/integration_tests/api/test_http.py b/tests/integration_tests/api/test_http.py
--- a/tests/integration_tests/api/test_http.py
+++ b/tests/integration_tests/api/test_http.py
@@ -32,10 +32,10 @@ def setUpClass(cls):
apis=['http'],
override_integration_config={
'default_mariadb': {
- 'enabled': True
+ 'publish': True
},
'default_clickhouse': {
- 'enabled': True
+ 'publish': True
}
},
mindsdb_database=common.MINDSDB_DATABASE
@@ -60,7 +60,7 @@ def test_1_config(self):
for integration_name in integration_names['integrations']:
assert integration_name in self.initial_integrations_names
- test_integration_data = {'enabled': False, 'host': 'test', 'type': 'clickhouse'}
+ test_integration_data = {'publish': False, 'host': 'test', 'type': 'clickhouse'}
res = requests.put(f'{root}/config/integrations/test_integration', json={'params': test_integration_data})
assert res.status_code == 200
@@ -85,7 +85,7 @@ def test_1_config(self):
assert res.status_code == 200
integration = res.json()
- for k in ['enabled', 'host', 'port', 'type', 'user']:
+ for k in ['publish', 'host', 'port', 'type', 'user']:
assert k in integration
assert integration[k] is not None
assert integration['password'] is None
diff --git a/tests/integration_tests/flows/common.py b/tests/integration_tests/flows/common.py
--- a/tests/integration_tests/flows/common.py
+++ b/tests/integration_tests/flows/common.py
@@ -101,7 +101,7 @@
def prepare_config(config, mindsdb_database='mindsdb', override_integration_config={}, override_api_config={}, clear_storage=True):
for key in config._config['integrations']:
- config._config['integrations'][key]['enabled'] = False
+ config._config['integrations'][key]['publish'] = False
if USE_EXTERNAL_DB_SERVER:
with open(EXTERNAL_DB_CREDENTIALS, 'rt') as f:
diff --git a/tests/integration_tests/flows/config/config.json b/tests/integration_tests/flows/config/config.json
--- a/tests/integration_tests/flows/config/config.json
+++ b/tests/integration_tests/flows/config/config.json
@@ -20,7 +20,7 @@
"debug": true,
"integrations": {
"default_clickhouse": {
- "enabled": true,
+ "publish": true,
"host": "localhost",
"password": "",
"port": 8123,
@@ -28,7 +28,7 @@
"user": "default"
},
"default_mariadb": {
- "enabled": true,
+ "publish": true,
"host": "localhost",
"password": "root",
"port": 3306,
@@ -36,7 +36,7 @@
"user": "root"
},
"default_mongodb": {
- "enabled": false,
+ "publish": false,
"host": "127.0.0.1",
"password": "",
"port": 27001,
@@ -44,7 +44,7 @@
"user": ""
},
"default_mssql": {
- "enabled": true,
+ "publish": true,
"host": "127.0.0.1",
"odbc_driver_name": "MySQL ODBC 8.0 Unicode Driver",
"password": "password",
@@ -53,7 +53,7 @@
"user": "sa"
},
"default_mysql": {
- "enabled": true,
+ "publish": true,
"host": "localhost",
"password": "root",
"port": 3307,
@@ -62,7 +62,7 @@
},
"default_postgres": {
"database": "postgres",
- "enabled": true,
+ "publish": true,
"host": "localhost",
"password": "",
"port": 5432,
@@ -70,7 +70,7 @@
"user": "postgres"
},
"default_snowflake": {
- "enabled": false,
+ "publish": false,
"type": "snowflake"
}
},
diff --git a/tests/integration_tests/flows/test_clickhouse.py b/tests/integration_tests/flows/test_clickhouse.py
--- a/tests/integration_tests/flows/test_clickhouse.py
+++ b/tests/integration_tests/flows/test_clickhouse.py
@@ -98,7 +98,7 @@ def setUpClass(cls):
apis=['mysql'],
override_integration_config={
'default_clickhouse': {
- 'enabled': True
+ 'publish': True
}
},
override_api_config={
diff --git a/tests/integration_tests/flows/test_custom_model.py b/tests/integration_tests/flows/test_custom_model.py
--- a/tests/integration_tests/flows/test_custom_model.py
+++ b/tests/integration_tests/flows/test_custom_model.py
@@ -80,7 +80,7 @@ def setUpClass(cls):
apis=['http', 'mysql'],
override_integration_config={
'default_mariadb': {
- 'enabled': True
+ 'publish': True
}
},
mindsdb_database=MINDSDB_DATABASE
diff --git a/tests/integration_tests/flows/test_mariadb.py b/tests/integration_tests/flows/test_mariadb.py
--- a/tests/integration_tests/flows/test_mariadb.py
+++ b/tests/integration_tests/flows/test_mariadb.py
@@ -83,7 +83,7 @@ def setUpClass(cls):
apis=['mysql'],
override_integration_config={
'default_mariadb': {
- 'enabled': True
+ 'publish': True
}
},
mindsdb_database=MINDSDB_DATABASE
diff --git a/tests/integration_tests/flows/test_mistakes.py b/tests/integration_tests/flows/test_mistakes.py
--- a/tests/integration_tests/flows/test_mistakes.py
+++ b/tests/integration_tests/flows/test_mistakes.py
@@ -46,7 +46,7 @@
class UserFlowTest_1(unittest.TestCase):
def test_1_wrong_integration(self):
'''
- start mindsdb with enabled integration with wrong password
+ start mindsdb with publish integration with wrong password
try create ds
change password to correct
'''
@@ -56,7 +56,7 @@ def test_1_wrong_integration(self):
apis=['mysql', 'http'],
override_integration_config={
'default_mariadb': {
- 'enabled': True,
+ 'publish': True,
'password': 'broken'
}
},
@@ -117,7 +117,7 @@ def test_2_broke_analisys(self):
apis=['mysql', 'http'],
override_integration_config={
'default_mariadb': {
- 'enabled': True
+ 'publish': True
}
},
mindsdb_database=MINDSDB_DATABASE,
@@ -157,7 +157,7 @@ def test_3_wrong_predictor(self):
apis=['mysql', 'http'],
override_integration_config={
'default_mariadb': {
- 'enabled': True
+ 'publish': True
}
},
mindsdb_database=MINDSDB_DATABASE,
@@ -199,7 +199,7 @@ def test_4_wrong_prediction(self):
apis=['mysql', 'http'],
override_integration_config={
'default_mariadb': {
- 'enabled': True
+ 'publish': True
}
},
mindsdb_database=MINDSDB_DATABASE,
diff --git a/tests/integration_tests/flows/test_mongo.py b/tests/integration_tests/flows/test_mongo.py
--- a/tests/integration_tests/flows/test_mongo.py
+++ b/tests/integration_tests/flows/test_mongo.py
@@ -53,7 +53,7 @@ def setUpClass(cls):
apis=['mongodb'],
override_integration_config={
'default_mongodb': {
- 'enabled': True,
+ 'publish': True,
'port': 27002,
'host': '127.0.0.1',
'type': 'mongodb',
diff --git a/tests/integration_tests/flows/test_mssql.py b/tests/integration_tests/flows/test_mssql.py
--- a/tests/integration_tests/flows/test_mssql.py
+++ b/tests/integration_tests/flows/test_mssql.py
@@ -59,7 +59,7 @@ def setUpClass(cls):
apis=['mysql'],
override_integration_config={
'default_mssql': {
- 'enabled': True
+ 'publish': True
}
},
mindsdb_database=MINDSDB_DATABASE
diff --git a/tests/integration_tests/flows/test_mysql.py b/tests/integration_tests/flows/test_mysql.py
--- a/tests/integration_tests/flows/test_mysql.py
+++ b/tests/integration_tests/flows/test_mysql.py
@@ -83,7 +83,7 @@ def setUpClass(cls):
apis=['mysql'],
override_integration_config={
'default_mysql': {
- 'enabled': True
+ 'publish': True
}
},
mindsdb_database=MINDSDB_DATABASE
diff --git a/tests/integration_tests/flows/test_postgres.py b/tests/integration_tests/flows/test_postgres.py
--- a/tests/integration_tests/flows/test_postgres.py
+++ b/tests/integration_tests/flows/test_postgres.py
@@ -89,7 +89,7 @@ def setUpClass(cls):
apis=['mysql'],
override_integration_config={
'default_postgres': {
- 'enabled': True
+ 'publish': True
}
},
mindsdb_database=MINDSDB_DATABASE
diff --git a/tests/integration_tests/flows/test_user_flow_1.py b/tests/integration_tests/flows/test_user_flow_1.py
--- a/tests/integration_tests/flows/test_user_flow_1.py
+++ b/tests/integration_tests/flows/test_user_flow_1.py
@@ -123,7 +123,7 @@ def test_1_create_integration_via_http(self):
test_integration_data = {}
test_integration_data.update(config['integrations']['default_mariadb'])
- test_integration_data['enabled'] = True
+ test_integration_data['publish'] = True
test_integration_data['database_name'] = TEST_INTEGRATION
res = requests.put(f'{HTTP_API_ROOT}/config/integrations/{TEST_INTEGRATION}', json={'params': test_integration_data})
assert res.status_code == 200
@@ -132,7 +132,7 @@ def test_1_create_integration_via_http(self):
assert res.status_code == 200
test_integration = res.json()
assert test_integration['password'] is None
- for key in ['user', 'port', 'host', 'enabled']:
+ for key in ['user', 'port', 'host', 'publish']:
assert test_integration[key] == test_integration_data[key]
def test_3_create_ds_from_sql_by_http(self):
diff --git a/tests/integration_tests/flows/test_user_flow_2.py b/tests/integration_tests/flows/test_user_flow_2.py
--- a/tests/integration_tests/flows/test_user_flow_2.py
+++ b/tests/integration_tests/flows/test_user_flow_2.py
@@ -137,7 +137,7 @@ def test_1_upload_ds(self):
def test_2_add_integration(self):
test_integration_data = {}
test_integration_data.update(config['integrations']['default_mariadb'])
- test_integration_data['enabled'] = True
+ test_integration_data['publish'] = True
test_integration_data['database_name'] = TEST_INTEGRATION
res = requests.put(f'{HTTP_API_ROOT}/config/integrations/{TEST_INTEGRATION}', json={'params': test_integration_data})
assert res.status_code == 200
@@ -150,7 +150,7 @@ def test_3_restart_and_connect(self):
apis=['mysql'],
override_integration_config={
'default_mariadb': {
- 'enabled': True
+ 'publish': True
}
},
mindsdb_database=MINDSDB_DATABASE,
| Datasource analysing refactoring
Couple thoughts about analysing:
History of analysing in 'upload ds' route:
1. Let come back to time when we work just with 'file' datasources. We dont run analysis after file uploaded. We do it if user want see analysis, or learn predictor.
2. We want add new feature: filter/sort for data in datasource in Scout 'data preview' screen. Doing in pandas.DataFrame is slow, so we decide use sqllite database. But how we can create database, if we dont know file columns types? If just copy csv to sqllite-db, then all columns will be text, and all filters in Scout will be text. So we need analyse datasource right after it uploaded.
3. We add database-based datasources without any changes in datasource upload flow. How this flow looks now: route get from user query string and 'integration' name. Analyse process run. After it done, sqllite db created.
Problems in this flow:
1. analysis can take long time
2. we save in sqllite db copy of data from real db.
3. if user will train predictor from that datasource after some time, predictor will be trained from current database data, not from sqllite data. But in 'datasource preview' will be still data from sqllite.
4. each time (not each really, it has some timeout), when user open 'datasource analysis', datasource analysis will be re-run.
How i see solve:
1. add statues for datasources. In this case, we can set status='analysing' for uploading datasource and make analysis in background. For datasources with this status should be disabled all interactions in Scout.
2. save datasource analysis in file in datasource folder. User should see date/time of last analysis and button 're-analyse' (probably just for db-based datasource, for file-based is make no sense). In this case we can open 'datasource analysis' screen without waiting.
3. dont make sqllite db for db-based datasources. User already have his data in database, we dont want make full copy of it.
4. point above mean we should disable 'datasource preview' screen for db-based datasources. Why: it will be difficult to make filter/sorting/pagination for that data. At least at this moment.
| 1. On upload, run an analysis on very little data, and don't cache.
2. We split analysis into two endpoints `analyze_refresh` and `analyze`
3. `analyze_refresh` will run an analysis on the whole datasource, and then dump that together with the rest of the datasource info.
4. `analyze` will check if the datasource has an analysis associated with it, and in that case return said analysis, otherwise it will call `analyze_refresh`
Store time when analysis was done (and return it together with the analysis) | 2020-11-25T17:18:42Z | [] | [] |
mindsdb/mindsdb | 996 | mindsdb__mindsdb-996 | [
"682",
"705",
"718"
] | 0f616504fc3bbda94f81f0161f2f69ad7121f178 | diff --git a/distributions/osx/build_installers.py b/distributions/osx/build_installers.py
new file mode 100644
--- /dev/null
+++ b/distributions/osx/build_installers.py
@@ -0,0 +1,25 @@
+import os
+import sys
+
+about = {}
+with open("mindsdb/__about__.py") as fp:
+ exec(fp.read(), about)
+
+with open('distributions/osx/install.py', 'r') as f:
+ install_py = f.read()
+
+if sys.argv[1] == 'beta':
+ LATEST_NAME = 'MindsDB_Latest_Beta'
+ FIXED_NAME = 'MindsDB_{}_Beta'.format(about['__version__'])
+elif sys.argv[1] == 'release':
+ LATEST_NAME = 'MindsDB_Latest'
+ FIXED_NAME = 'MindsDB-{}'.format(about['__version__'])
+
+with open('distributions/osx/latest.py', 'w+') as f:
+ f.write(install_py.replace('$name', LATEST_NAME).replace('$version', ''))
+
+with open('distributions/osx/fixed.py', 'w+') as f:
+ f.write(install_py.replace('$name', FIXED_NAME).replace('$version', about['__version__']))
+
+os.system('cd distributions/osx && pyinstaller latest.py -F --onefile -n {}-Setup'.format(LATEST_NAME))
+os.system('cd distributions/osx && pyinstaller fixed.py -F --onefile -n {}-Setup'.format(FIXED_NAME))
diff --git a/distributions/osx/install.py b/distributions/osx/install.py
new file mode 100644
--- /dev/null
+++ b/distributions/osx/install.py
@@ -0,0 +1,151 @@
+import os
+import sys
+import atexit
+import tarfile
+import subprocess
+import shutil
+import argparse
+
+import requests
+
+PIP_VERSION = "20.2.4"
+REQUIRED_UTILS = {"make": "brew install make",
+ "openssl": "brew install openssl",
+ "zlib": "brew install zlib",
+ "sqlite": "brew install sqlite",
+ "bzip2": "brew install bzip2",
+ "libiconv": "brew install libiconv",
+ "libzip": "brew install libzip"}
+
+def at_exit():
+ os.system('echo Press Enter to exit ...;read foo')
+
+def check_system_requirements():
+ res = subprocess.run(['which', 'brew'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ if res.returncode:
+ print("brew is not installed. How to install: /usr/local/opt/[email protected]/bin/openssl")
+ sys.exit("please install required utils and try again")
+ is_good = True
+
+ for util in REQUIRED_UTILS:
+ res = subprocess.run(['brew', "--prefix", util], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ if res.returncode:
+ print("{} is required but not installed. Installation instruction: {}".format(util, REQUIRED_UTILS[util]))
+ is_good = False
+ if not is_good:
+ sys.exit("please install required utils and try again")
+
+
+def make_dir(d):
+ if not os.path.isdir(d):
+ os.makedirs(d)
+
+def download_file(url):
+ filename = url.split('/')[-1]
+ with requests.get(url, stream=True) as r:
+ r.raise_for_status()
+ with open(filename, 'wb') as f:
+ for chunk in r.iter_content(chunk_size=8192):
+ f.write(chunk)
+ return filename
+
+def get_util_dir(util):
+ res = subprocess.check_output(["brew", "--prefix", util])
+ return res.decode('utf-8').rstrip()
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description='MindsDB Installer')
+ parser.add_argument("--install_dir", type=str, default=os.getenv("HOME"))
+ parser.add_argument("--python_version", type=str, default="3.8.5")
+ args = parser.parse_args()
+
+ assert os.name == 'posix'
+ check_system_requirements()
+ atexit.register(at_exit)
+
+
+ # must be replaced in another script
+ NAME = '$name'
+ VERSION = '$version'
+
+ assert NAME != '$' + 'name'
+ assert VERSION != '$' + 'version'
+
+
+ PY_EMBED_URL = f'https://www.python.org/ftp/python/{args.python_version}/Python-{args.python_version}.tgz'
+ GET_PIP_URL = 'https://bootstrap.pypa.io/get-pip.py'
+ VC_REDIST_URL = 'https://aka.ms/vs/16/release/vc_redist.x64.exe'
+
+ INSTALL_DIR = os.path.join(os.path.abspath(args.install_dir), NAME)
+
+ PYTHON_SRC_DIR = os.path.join(INSTALL_DIR, f"Python-{args.python_version}")
+ PYTHON_DIR = os.path.join(INSTALL_DIR, 'python')
+
+
+ try:
+ if os.path.exists(INSTALL_DIR):
+ print(f"{INSTALL_DIR} is exist. it will be re-created")
+ shutil.rmtree(INSTALL_DIR)
+ make_dir(INSTALL_DIR)
+ except PermissionError as e:
+ print('Please, run the installer as administrator (use "sudo")')
+ os.system('pause')
+ sys.exit(1)
+
+
+ print('extraction python source files')
+ py_embed_filename = download_file(PY_EMBED_URL)
+ with tarfile.open(py_embed_filename, 'r:gz') as z:
+ z.extractall(INSTALL_DIR)
+ os.remove(py_embed_filename)
+
+ for util in ("openssl", "zlib", "sqlite"):
+ util_path = get_util_dir(util)
+ if util_path not in os.getenv("CPPFLAGS", ""):
+ os.environ["CPPFLAGS"] = "{} -I{}/include".format(os.getenv("CPPFLAGS", ""), util_path)
+ if util_path not in os.getenv("LDFLAGS", ""):
+ os.environ["LDFLAGS"] = "{} -L{}/lib".format(os.getenv("LDFLAGS", ""), util_path)
+ if util in ("zlib", "sqlite"):
+ os.environ["PKG_CONFIG_PATH"] = "{} {}/lib/pkgconfig".format(os.getenv("PKG_CONFIG_PATH", ""), util_path)
+ config_cmd = "cd {};./configure --prefix={} --with-openssl={} --with-ssl-default-suites=openssl --silent".format(PYTHON_SRC_DIR, PYTHON_DIR, get_util_dir("openssl"))
+
+
+ print("configuring python: {}".format(config_cmd))
+ print("this may take a while...")
+ print("CPPFLAGS: {}".format(os.getenv("CPPFLAGS", "")))
+ print("LDFLAGS: {}".format(os.getenv("LDFLAGS", "")))
+ print("PKG_CONFIG_PATH: {}".format(os.getenv("PKG_CONFIG_PATH", "")))
+ status = os.system(config_cmd)
+ if status:
+ sys.exit("python configuring process finished with error(s)")
+ print("installing python to %s" % PYTHON_DIR)
+ print("this may take a while...")
+
+ status = os.system("cd {}; make install --silent".format(PYTHON_SRC_DIR))
+ shutil.rmtree(PYTHON_SRC_DIR)
+ if status:
+ sys.exit("installation process finished with error(s)")
+
+
+ PYTHON_EXEC = os.path.join(PYTHON_DIR, 'bin/python3')
+ print("python location: {}".format(PYTHON_EXEC))
+ os.system("{} -V".format(PYTHON_EXEC))
+
+ # it is a clutch to avoid conflicts since old mindsdb_native versions also install torch
+ # ideally we just need to install mindsdb. And torch with other dependencies would be installed properly as mindsdb dependency
+ if os.system('{} -m pip install "torch >= 1.4.0, <= 1.6.0" "torchvision >= 0.5.0, <= 0.7.0" -f https://download.pytorch.org/whl/torch_stable.html --no-warn-script-location'.format(PYTHON_EXEC)):
+ sys.exit("python packages installation finished with error(s)")
+ os.system('{} -m pip install --upgrade pip=={} --no-cache-dir'.format(PYTHON_EXEC, PIP_VERSION))
+ if VERSION == '':
+ os.system('{} -m pip install mindsdb --no-warn-script-location'.format(PYTHON_EXEC))
+ else:
+ os.system('{} -m pip install mindsdb=={} --no-warn-script-location'.format(PYTHON_EXEC, VERSION))
+
+ print('generating run_server.sh')
+ with open(os.path.join(INSTALL_DIR, 'run_server.sh'), 'w') as f:
+ lines = []
+ if VERSION == '':
+ lines.append('{} -m pip install mindsdb --upgrade --no-warn-script-location'.format(PYTHON_EXEC))
+ lines.append('{} -m mindsdb'.format(PYTHON_EXEC))
+ f.write('\n'.join(lines))
diff --git a/distributions/osx/sync_s3.py b/distributions/osx/sync_s3.py
new file mode 100644
--- /dev/null
+++ b/distributions/osx/sync_s3.py
@@ -0,0 +1,53 @@
+import os
+import boto3
+from pathlib import Path
+from bisect import bisect_left
+
+required_env_variables = [
+ 'AWS_ACCESS_KEY_ID',
+ 'AWS_SECRET_ACCESS_KEY',
+ 'AWS_REGION',
+ 'SOURCE_DIR',
+ 'DEST_DIR',
+ 'AWS_S3_BUCKET'
+]
+
+for var in required_env_variables:
+ if os.getenv(var) is None:
+ exit('environment variable {} is not set'.format(var))
+
+s3 = boto3.client(
+ 's3',
+ aws_access_key_id=os.getenv('AWS_ACCESS_KEY_ID'),
+ aws_secret_access_key=os.getenv('AWS_SECRET_ACCESS_KEY'),
+ region_name=os.getenv('AWS_REGION')
+)
+
+
+def list_source_objects(source_folder):
+ path = Path(source_folder)
+ paths = []
+ for file_path in path.rglob('*'):
+ if file_path.is_dir():
+ continue
+ paths.append(str(file_path))
+ return paths
+
+
+def sync(source, dest, bucket):
+ for path in list_source_objects(source_folder=source):
+ src_filename = Path(path).name
+ print('Uploading {} ({})'.format(path, src_filename))
+ s3.upload_file(
+ Filename=path,
+ Bucket=bucket,
+ Key=str(Path(dest).joinpath(src_filename).as_posix())
+ )
+
+
+if __name__ == '__main__':
+ sync(
+ source=os.getenv('SOURCE_DIR'),
+ dest=os.getenv('DEST_DIR'),
+ bucket=os.getenv('AWS_S3_BUCKET')
+ )
diff --git a/mindsdb/__about__.py b/mindsdb/__about__.py
--- a/mindsdb/__about__.py
+++ b/mindsdb/__about__.py
@@ -1,6 +1,6 @@
__title__ = 'MindsDB'
__package_name__ = 'mindsdb'
-__version__ = '2.13.8'
+__version__ = '2.14.0'
__description__ = "MindsDB server, provides server capabilities to mindsdb native python library"
__email__ = "[email protected]"
__author__ = 'MindsDB Inc'
diff --git a/mindsdb/__main__.py b/mindsdb/__main__.py
--- a/mindsdb/__main__.py
+++ b/mindsdb/__main__.py
@@ -5,6 +5,7 @@
import time
import asyncio
import logging
+import datetime
from pkg_resources import get_distribution
import torch.multiprocessing as mp
@@ -28,7 +29,7 @@
def close_api_gracefully(apis):
- try:
+ try:
for api in apis.values():
process = api['process']
sys.stdout.flush()
@@ -84,6 +85,8 @@ def close_api_gracefully(apis):
os.environ['DEFAULT_LOG_LEVEL'] = config['log']['level']['console']
os.environ['LIGHTWOOD_LOG_LEVEL'] = config['log']['level']['console']
+ config.set(['mindsdb_last_started_at'], str(datetime.datetime.now()))
+
initialize_log(config)
log = logging.getLogger('mindsdb.main')
diff --git a/mindsdb/api/http/namespaces/config.py b/mindsdb/api/http/namespaces/config.py
--- a/mindsdb/api/http/namespaces/config.py
+++ b/mindsdb/api/http/namespaces/config.py
@@ -55,10 +55,18 @@ def put(self, name):
params = request.json.get('params')
if not isinstance(params, dict):
abort(400, "type of 'params' must be dict")
+
+ is_test = params.get('test', False)
+ if is_test:
+ del params['test']
+
integration = get_integration(name)
if integration is not None:
abort(400, f"Integration with name '{name}' already exists")
try:
+ if 'enabled' in params:
+ params['publish'] = params['enabled']
+ del params['enabled']
ca.config_obj.add_db_integration(name, params)
mdb = ca.mindsdb_native
@@ -69,6 +77,12 @@ def put(self, name):
except Exception as e:
print(traceback.format_exc())
abort(500, f'Error during config update: {str(e)}')
+
+ if is_test:
+ cons = dbw.check_connections()
+ ca.config_obj.remove_db_integration(name)
+ return {'success': cons[name]}, 200
+
return '', 200
@ns_conf.doc('delete_integration')
@@ -92,6 +106,9 @@ def post(self, name):
if integration is None:
abort(400, f"Nothin to modify. '{name}' not exists.")
try:
+ if 'enabled' in params:
+ params['publish'] = params['enabled']
+ del params['enabled']
ca.config_obj.modify_db_integration(name, params)
DatabaseWrapper(ca.config_obj)
except Exception as e:
diff --git a/mindsdb/api/http/namespaces/datasource.py b/mindsdb/api/http/namespaces/datasource.py
--- a/mindsdb/api/http/namespaces/datasource.py
+++ b/mindsdb/api/http/namespaces/datasource.py
@@ -2,7 +2,7 @@
import os
import threading
import tempfile
-
+import re
import multipart
import mindsdb
@@ -11,7 +11,6 @@
from flask_restx import Resource, abort # 'abort' using to return errors as json: {'message': 'error text'}
from flask import current_app as ca
-from mindsdb.interfaces.datastore.sqlite_helpers import *
from mindsdb.api.http.namespaces.configs.datasources import ns_conf
from mindsdb.api.http.namespaces.entitites.datasources.datasource import (
datasource_metadata,
@@ -30,6 +29,27 @@
)
+def parse_filter(key, value):
+ result = re.search(r'filter(_*.*)\[(.*)\]', key)
+ operator = result.groups()[0].strip('_') or 'like'
+ field = result.groups()[1]
+ operators_map = {
+ 'like': 'like',
+ 'in': 'in',
+ 'nin': 'not in',
+ 'gt': '>',
+ 'lt': '<',
+ 'gte': '>=',
+ 'lte': '<=',
+ 'eq': '=',
+ 'neq': '!='
+ }
+ if operator not in operators_map:
+ return None
+ operator = operators_map[operator]
+ return [field, operator, value]
+
+
@ns_conf.route('/')
class DatasourcesList(Resource):
@ns_conf.doc('get_datasources_list')
@@ -68,7 +88,6 @@ def put(self, name):
data = {}
def on_field(field):
- print(f'\n\n{field}\n\n')
name = field.field_name.decode()
value = field.value.decode()
data[name] = value
@@ -152,8 +171,6 @@ def get(self, name):
if name in ds_analysis:
if ds_analysis[name] is None:
return {'status': 'analyzing'}, 200
- elif (datetime.datetime.utcnow() - ds_analysis[name]['created_at']) > datetime.timedelta(seconds=3600):
- del ds_analysis[name]
else:
analysis = ds_analysis[name]['data']
return analysis, 200
@@ -167,6 +184,27 @@ def get(self, name):
x.start()
return {'status': 'analyzing'}, 200
+@ns_conf.route('/<name>/analyze_refresh')
+@ns_conf.param('name', 'Datasource name')
+class Analyze(Resource):
+ @ns_conf.doc('analyze_refresh_dataset')
+ def get(self, name):
+ global ds_analysis
+ if name in ds_analysis:
+ if ds_analysis[name] is None:
+ return {'status': 'analyzing'}, 200
+ else:
+ del ds_analysis[name]
+
+ ds = ca.default_store.get_datasource(name)
+ if ds is None:
+ print('No valid datasource given')
+ abort(400, 'No valid datasource given')
+
+ x = threading.Thread(target=analyzing_thread, args=(name, ca.default_store))
+ x.start()
+ return {'status': 'analyzing'}, 200
+
@ns_conf.route('/<name>/analyze_subset')
@ns_conf.param('name', 'Datasource name')
diff --git a/mindsdb/api/http/namespaces/entitites/predictor_metadata.py b/mindsdb/api/http/namespaces/entitites/predictor_metadata.py
--- a/mindsdb/api/http/namespaces/entitites/predictor_metadata.py
+++ b/mindsdb/api/http/namespaces/entitites/predictor_metadata.py
@@ -21,6 +21,7 @@
'model_analysis': fields.List(fields.Nested(target_column_metadata), required=False, description='The model analysis stage, in which we extract statistical information from the input data for each target variable, thus, this is a list; one item per target column')
,'data_analysis_v2': fields.Raw(default={})
,'is_custom': fields.Boolean(default=False)
+ ,'data_source': fields.String(required=False, description='The data source it\'s learning from')
})
predictor_query_params = OrderedDict([
diff --git a/mindsdb/api/http/namespaces/predictor.py b/mindsdb/api/http/namespaces/predictor.py
--- a/mindsdb/api/http/namespaces/predictor.py
+++ b/mindsdb/api/http/namespaces/predictor.py
@@ -19,8 +19,6 @@
from mindsdb.api.http.namespaces.entitites.predictor_status import predictor_status
-model_swapping_map = {}
-
def debug_pkey_type(model, keys=None, reset_keyes=True, type_to_check=list, append_key=True):
if type(model) != dict:
return
@@ -122,8 +120,6 @@ def delete(self, name):
@ns_conf.doc('put_predictor', params=put_predictor_params)
def put(self, name):
'''Learning new predictor'''
- global model_swapping_map
-
data = request.json
to_predict = data.get('to_predict')
@@ -150,12 +146,15 @@ def put(self, name):
retrain = True
else:
retrain = False
- except:
+ except Exception:
retrain = None
ds_name = data.get('data_source_name') if data.get('data_source_name') is not None else data.get('from_data')
from_data = ca.default_store.get_datasource_obj(ds_name, raw=True)
+ if from_data is None:
+ return {'message': f'Can not find datasource: {ds_name}'}, 400
+
if retrain is True:
original_name = name
name = name + '_retrained'
@@ -171,12 +170,10 @@ def put(self, name):
if retrain is True:
try:
- model_swapping_map[original_name] = True
ca.mindsdb_native.delete_model(original_name)
ca.mindsdb_native.rename_model(name, original_name)
- model_swapping_map[original_name] = False
except:
- model_swapping_map[original_name] = False
+ pass
return '', 200
@@ -231,14 +228,12 @@ def get(self, name):
return columns, 200
-
@ns_conf.route('/<name>/predict')
@ns_conf.param('name', 'The predictor identifier')
class PredictorPredict(Resource):
@ns_conf.doc('post_predictor_predict', params=predictor_query_params)
def post(self, name):
'''Queries predictor'''
- global model_swapping_map
data = request.json
@@ -256,10 +251,6 @@ def post(self, name):
if type(kwargs) != type({}):
kwargs = {}
- # Not the fanciest semaphore, but should work since restplus is multi-threaded and this condition should rarely be reached
- while name in model_swapping_map and model_swapping_map[name] is True:
- time.sleep(1)
-
if is_custom(name):
return ca.custom_models.predict(name, when_data=when, **kwargs)
else:
@@ -273,7 +264,6 @@ def post(self, name):
class PredictorPredictFromDataSource(Resource):
@ns_conf.doc('post_predictor_predict', params=predictor_query_params)
def post(self, name):
- global model_swapping_map
data = request.json
from_data = ca.default_store.get_datasource_obj(data.get('data_source_name'), raw=True)
@@ -293,10 +283,6 @@ def post(self, name):
if type(kwargs) != type({}):
kwargs = {}
- # Not the fanciest semaphore, but should work since restplus is multi-threaded and this condition should rarely be reached
- while name in model_swapping_map and model_swapping_map[name] is True:
- time.sleep(1)
-
if is_custom(name):
return ca.custom_models.predict(name, from_data=from_data, **kwargs)
else:
diff --git a/mindsdb/interfaces/database/database.py b/mindsdb/interfaces/database/database.py
--- a/mindsdb/interfaces/database/database.py
+++ b/mindsdb/interfaces/database/database.py
@@ -24,7 +24,7 @@ def _get_integrations(self):
# @TODO Once we have a presistent state sorted out this should be simplified as to not refresh the existing integrations every single time
integration_arr = []
for db_alias in self.config['integrations']:
- if self.config['integrations'][db_alias]['enabled']:
+ if self.config['integrations'][db_alias]['publish']:
db_type = self.config['integrations'][db_alias]['type']
if db_type == 'clickhouse':
integration_arr.append(Clickhouse(self.config, db_alias))
diff --git a/mindsdb/interfaces/datastore/datastore.py b/mindsdb/interfaces/datastore/datastore.py
--- a/mindsdb/interfaces/datastore/datastore.py
+++ b/mindsdb/interfaces/datastore/datastore.py
@@ -5,12 +5,10 @@
import os
import pickle
-from mindsdb.interfaces.datastore.sqlite_helpers import get_sqlite_data, cast_df_columns_types, create_sqlite_db
from mindsdb.interfaces.native.mindsdb import MindsdbNative
from mindsdb_native import FileDS, ClickhouseDS, MariaDS, MySqlDS, PostgresDS, MSSQLDS, MongoDS, SnowflakeDS
-
class DataStore():
def __init__(self, config):
self.config = config
@@ -40,8 +38,25 @@ def get_datasources(self):
return datasource_arr
def get_data(self, name, where=None, limit=None, offset=None):
- # @TODO Apply filter directly to postgres/mysql/clickhouse/etc... when the datasource is of that type
- return get_sqlite_data(os.path.join(self.dir, name, 'sqlite.db'), where=where, limit=limit, offset=offset)
+ if offset is None:
+ offset = 0
+
+ ds = self.get_datasource_obj(name)
+
+ # @TODO Remove and add `offset` to the `filter` method of the datasource
+ if limit is not None:
+ filtered_ds = ds.filter(where=where, limit=limit+offset)
+ else:
+ filtered_ds = ds.filter(where=where)
+
+ filtered_ds = filtered_ds.iloc[offset:]
+
+ data = filtered_ds.to_dict(orient='records')
+ return {
+ 'data': data,
+ 'rowcount': len(ds),
+ 'columns_names': filtered_ds.columns
+ }
def get_datasource(self, name):
for ds in self.get_datasources():
@@ -168,7 +183,6 @@ def save_datasource(self, name, source_type, source, file_path=None):
ds = dsClass(**picklable['kwargs'])
else:
# This probably only happens for urls
- print('Create URL data source !')
ds = FileDS(source)
picklable = {
'class': 'FileDS',
@@ -182,8 +196,8 @@ def save_datasource(self, name, source_type, source, file_path=None):
shutil.rmtree(ds_meta_dir)
raise Exception('Each column in datasource must have unique name')
- df_with_types = cast_df_columns_types(df, self.get_analysis(df)['data_analysis_v2'])
- create_sqlite_db(os.path.join(ds_meta_dir, 'sqlite.db'), df_with_types)
+ # Not sure if needed
+ #summary_analysis = self.get_analysis(ds.filter(limit=200))['data_analysis_v2']
with open(os.path.join(ds_meta_dir, 'ds.pickle'), 'wb') as fp:
pickle.dump(picklable, fp)
diff --git a/mindsdb/interfaces/datastore/sqlite_helpers.py b/mindsdb/interfaces/datastore/sqlite_helpers.py
deleted file mode 100644
--- a/mindsdb/interfaces/datastore/sqlite_helpers.py
+++ /dev/null
@@ -1,139 +0,0 @@
-import sqlite3
-from mindsdb_native.libs.constants.mindsdb import DATA_TYPES, DATA_SUBTYPES
-import re
-
-
-def create_sqlite_db(path, data_frame):
- con = sqlite3.connect(path)
- data_frame.to_sql(name='data', con=con, index=False)
- con.close()
-
-
-def cast_df_columns_types(df, stats):
- types_map = {
- DATA_TYPES.NUMERIC: {
- DATA_SUBTYPES.INT: 'int64',
- DATA_SUBTYPES.FLOAT: 'float64',
- DATA_SUBTYPES.BINARY: 'bool'
- },
- DATA_TYPES.DATE: {
- DATA_SUBTYPES.DATE: 'datetime64', # YYYY-MM-DD
- DATA_SUBTYPES.TIMESTAMP: 'datetime64' # YYYY-MM-DD hh:mm:ss or 1852362464
- },
- DATA_TYPES.CATEGORICAL: {
- DATA_SUBTYPES.SINGLE: 'category',
- DATA_SUBTYPES.MULTIPLE: 'category'
- },
- DATA_TYPES.FILE_PATH: {
- DATA_SUBTYPES.IMAGE: 'object',
- DATA_SUBTYPES.VIDEO: 'object',
- DATA_SUBTYPES.AUDIO: 'object'
- },
- DATA_TYPES.SEQUENTIAL: {
- DATA_SUBTYPES.ARRAY: 'object'
- },
- DATA_TYPES.TEXT: {
- DATA_SUBTYPES.SHORT: 'object',
- DATA_SUBTYPES.RICH: 'object'
- }
- }
-
- columns = [dict(name=x) for x in list(df.keys())]
-
- for column in columns:
- try:
- name = column['name']
- col_type = stats[name]['typing']['data_type']
- col_subtype = stats[name]['typing']['data_subtype']
- new_type = types_map[col_type][col_subtype]
- if new_type == 'int64' or new_type == 'float64':
- df[name] = df[name].apply(lambda x: x.replace(',', '.') if isinstance(x, str) else x)
- if new_type == 'int64':
- df = df.astype({name: 'float64'})
- df = df.astype({name: new_type})
- except Exception as e:
- print(e)
- print(f'Error: cant convert type of DS column {name} to {new_type}')
-
- return df
-
-
-def parse_filter(key, value):
- result = re.search(r'filter(_*.*)\[(.*)\]', key)
- operator = result.groups()[0].strip('_') or 'like'
- field = result.groups()[1]
- operators_map = {
- 'like': 'like',
- 'in': 'in',
- 'nin': 'not in',
- 'gt': '>',
- 'lt': '<',
- 'gte': '>=',
- 'lte': '<=',
- 'eq': '=',
- 'neq': '!='
- }
- if operator not in operators_map:
- return None
- operator = operators_map[operator]
- return {'field': field, 'value': value, 'operator': operator}
-
-
-def prepare_sql_where(where):
- marks = {}
- if len(where) > 0:
- for i in range(len(where)):
- field = where[i]['field'].replace('"', '""')
- operator = where[i]['operator']
- value = where[i]['value']
- var_name = f'var{i}'
- if ' ' in field:
- field = f'"{field}"'
- if operator == 'like':
- marks[var_name] = '%' + value + '%'
- else:
- marks[var_name] = value
- where[i] = f'{field} {operator} :var{i}'
- where = 'where ' + ' and '.join(where)
- else:
- where = ''
- return where, marks
-
-
-def get_sqlite_columns_names(cursor):
- cursor.execute('pragma table_info(data);')
- column_name_index = [x[0] for x in cursor.description].index('name')
- columns = cursor.fetchall()
- return [x[column_name_index] for x in columns]
-
-
-def get_sqlite_data(db_path, where, limit, offset):
- where = [] if where is None else where
-
- con = sqlite3.connect(db_path)
- cur = con.cursor()
-
- offset = '' if limit is None or offset is None else f'offset {offset}'
- limit = '' if limit is None else f'limit {limit}'
-
- columns_names = get_sqlite_columns_names(cur)
- where = [x for x in where if x['field'] in columns_names]
- where, marks = prepare_sql_where(where)
-
- count_query = ' '.join(['select count(1) from data', where])
- cur.execute(count_query, marks)
- rowcount = cur.fetchone()[0]
-
- query = ' '.join(['select * from data', where, limit, offset])
- cur.execute(query, marks)
- data = cur.fetchall()
- data = [dict(zip(columns_names, x)) for x in data]
-
- cur.close()
- con.close()
-
- return {
- 'data': data,
- 'rowcount': rowcount,
- 'columns_names': columns_names
- }
diff --git a/mindsdb/interfaces/native/mindsdb.py b/mindsdb/interfaces/native/mindsdb.py
--- a/mindsdb/interfaces/native/mindsdb.py
+++ b/mindsdb/interfaces/native/mindsdb.py
@@ -81,6 +81,7 @@ def get_models(self, status='any'):
models = F.get_models()
if status != 'any':
models = [x for x in models if x['status'] == status]
+ models = [x for x in models if x['status'] != 'training' or parse_datetime(x['created_at']) > parse_datetime(self.config['mindsdb_last_started_at'])]
for i in range(len(models)):
for k in ['train_end_at', 'updated_at', 'created_at']:
diff --git a/mindsdb/utilities/config.py b/mindsdb/utilities/config.py
--- a/mindsdb/utilities/config.py
+++ b/mindsdb/utilities/config.py
@@ -35,8 +35,9 @@
}
}
+
class Config(object):
- current_version = '1.3'
+ current_version = '1.4'
_config = {}
paths = {
'root': '',
@@ -57,7 +58,7 @@ def __init__(self, config_path):
self._config_hash = None
self._config = None
if isinstance(config_path, str):
- self.config_path = config_path
+ self.config_path = os.path.abspath(config_path)
self._read()
self._config_hash = self._gen_hash()
@@ -65,7 +66,7 @@ def __init__(self, config_path):
if os.path.isabs(storage_dir) is False:
storage_dir = os.path.normpath(
os.path.join(
- os.path.dirname(config_path),
+ os.path.dirname(self.config_path),
storage_dir
)
)
@@ -171,10 +172,23 @@ def m1_2(config):
config['config_version'] = '1.3'
return config
+ def m1_3(config):
+ ''' rename integration['enabled'] to integration['publish']
+ '''
+ for integration in config.get('integrations', []).values():
+ if 'enabled' in integration:
+ enabled = integration['enabled']
+ del integration['enabled']
+ integration['publish'] = enabled
+
+ config['config_version'] = '1.4'
+ return config
+
migrations = {
'1.0': m1_0,
'1.1': m1_1,
- '1.2': m1_2
+ '1.2': m1_2,
+ '1.3': m1_3
}
current_version = self._parse_version(self._config['config_version'])
@@ -269,9 +283,7 @@ def get_all(self):
return self._config
def set(self, key_chain, value, delete=False):
- with open(self.config_path, 'r') as fp:
- self._config = json.load(fp)
-
+ self._read()
c = self._config
for i, k in enumerate(key_chain):
if k in c and i + 1 < len(key_chain):
@@ -291,8 +303,8 @@ def add_db_integration(self, name, dict):
dict['date_last_update'] = str(datetime.datetime.now()).split('.')[0]
if 'database_name' not in dict:
dict['database_name'] = name
- if 'enabled' not in dict:
- dict['enabled'] = True
+ if 'publish' not in dict:
+ dict['publish'] = True
self.set(['integrations', name], dict)
diff --git a/mindsdb/utilities/fs.py b/mindsdb/utilities/fs.py
--- a/mindsdb/utilities/fs.py
+++ b/mindsdb/utilities/fs.py
@@ -40,8 +40,8 @@ def get_paths():
'/var/lib/mindsdb'
),
(
- '~/.local/etc/mindsdb',
- '~/.local/var/lib/mindsdb'
+ '{}/.local/etc/mindsdb'.format(Path.home()),
+ '{}/.local/var/lib/mindsdb'.format(Path.home())
)
])
@@ -51,9 +51,9 @@ def get_paths():
def get_or_create_dir_struct():
for tup in get_paths():
try:
- for dir in tup:
- assert(os.path.exists(dir))
- assert(os.access(dir, os.W_OK) is True)
+ for _dir in tup:
+ assert os.path.exists(_dir)
+ assert os.access(_dir, os.W_OK) is True
config_dir = tup[0]
if 'DEV_CONFIG_PATH' in os.environ:
@@ -65,9 +65,9 @@ def get_or_create_dir_struct():
for tup in get_paths():
try:
- for dir in tup:
- create_directory(dir)
- assert(os.access(dir, os.W_OK) is True)
+ for _dir in tup:
+ create_directory(_dir)
+ assert os.access(_dir, os.W_OK) is True
config_dir = tup[0]
if 'DEV_CONFIG_PATH' in os.environ:
diff --git a/mindsdb/utilities/functions.py b/mindsdb/utilities/functions.py
--- a/mindsdb/utilities/functions.py
+++ b/mindsdb/utilities/functions.py
@@ -42,7 +42,7 @@ def get_all_models_meta_data(mindsdb_native, custom_models):
'name': x['name'],
'predict': x['predict'],
'data_analysis': mindsdb_native.get_model_data(x['name'])['data_analysis_v2']
- } for x in mindsdb_native.get_models()
+ } for x in mindsdb_native.get_models() if x['status'] == 'complete'
]
model_data_arr.extend(custom_models.get_models())
diff --git a/mindsdb/utilities/wizards.py b/mindsdb/utilities/wizards.py
--- a/mindsdb/utilities/wizards.py
+++ b/mindsdb/utilities/wizards.py
@@ -23,33 +23,33 @@ def _in(ask, default, use_default):
def auto_config(python_path, pip_path, storage_dir):
config = {
"debug": False,
- "config_version": "1.3",
+ "config_version": "1.4",
"api": {
},
"integrations": {
"default_clickhouse": {
- "enabled": False,
+ "publish": False,
"type": 'clickhouse'
},
"default_mariadb": {
- "enabled": False,
+ "publish": False,
"type": 'mariadb'
},
"default_mysql": {
- "enabled": False,
+ "publish": False,
"type": 'mysql'
},
"default_postgres": {
- "enabled": False,
+ "publish": False,
"type": 'postgres',
"database": 'postgres'
},
"default_mssql": {
- "enabled": False,
+ "publish": False,
"type": 'mssql'
},
"default_mongodb": {
- "enabled": False,
+ "publish": False,
"type": 'mongodb'
}
},
@@ -138,7 +138,7 @@ def cli_config(python_path, pip_path, storage_dir, config_dir, use_default=False
clickhouse = _in('Connect to clickhouse ? [Y/N]', 'Y', use_default)
if clickhouse in ['Y', 'y']:
- config['integrations']['default_clickhouse']['enabled'] = _in('Enable Clickhouse integration?: ', False, use_default)
+ config['integrations']['default_clickhouse']['publish'] = _in('Enable Clickhouse integration?: ', False, use_default)
config['integrations']['default_clickhouse']['host'] = _in('Clickhouse host: ', '127.0.0.1', use_default)
config['integrations']['default_clickhouse']['port'] = _in('Clickhouse port: ', 8123, use_default)
config['integrations']['default_clickhouse']['user'] = _in('Clickhouse user: ', 'default', use_default)
@@ -147,7 +147,7 @@ def cli_config(python_path, pip_path, storage_dir, config_dir, use_default=False
mariadb = _in('Connect to Mariadb ? [Y/N]', 'Y', use_default)
if mariadb in ['Y', 'y']:
- config['integrations']['default_mariadb']['enabled'] = _in('Enable Mariadb integration?: ', False, use_default)
+ config['integrations']['default_mariadb']['publish'] = _in('Enable Mariadb integration?: ', False, use_default)
config['integrations']['default_mariadb']['host'] = _in('Mariadb host: ', '127.0.0.1', use_default)
config['integrations']['default_mariadb']['port'] = _in('Mariadb port: ', 3306, use_default)
config['integrations']['default_mariadb']['user'] = _in('Mariadb user: ', 'root', use_default)
@@ -156,7 +156,7 @@ def cli_config(python_path, pip_path, storage_dir, config_dir, use_default=False
mysql = _in('Connect to MySQL ? [Y/N]', 'Y', use_default)
if mysql in ['Y', 'y']:
- config['integrations']['default_mysql']['enabled'] = _in('Enable MySQL integration?: ', False, use_default)
+ config['integrations']['default_mysql']['publish'] = _in('Enable MySQL integration?: ', False, use_default)
config['integrations']['default_mysql']['host'] = _in('MySQL host: ', '127.0.0.1', use_default)
config['integrations']['default_mysql']['port'] = _in('MySQL port: ', 3306, use_default)
config['integrations']['default_mysql']['user'] = _in('MySQL user: ', 'root', use_default)
@@ -165,7 +165,7 @@ def cli_config(python_path, pip_path, storage_dir, config_dir, use_default=False
mysql = _in('Connect to PostgreSQL ? [Y/N]', 'Y', use_default)
if mysql in ['Y', 'y']:
- config['integrations']['default_postgres']['enabled'] = _in('Enable PostgreSQL integration?: ', False, use_default)
+ config['integrations']['default_postgres']['publish'] = _in('Enable PostgreSQL integration?: ', False, use_default)
config['integrations']['default_postgres']['host'] = _in('PostgreSQL host: ', '127.0.0.1', use_default)
config['integrations']['default_postgres']['port'] = _in('PostgreSQL port: ', 5432, use_default)
config['integrations']['default_postgres']['user'] = _in('PostgreSQL user: ', 'postgres', use_default)
@@ -175,7 +175,7 @@ def cli_config(python_path, pip_path, storage_dir, config_dir, use_default=False
mssql = _in('Connect to MSSQL ? [Y/N]', 'Y', use_default)
if mssql in ['Y', 'y']:
- config['integrations']['default_mssql']['enabled'] = _in('Enable MSSQL integration?: ', False, use_default)
+ config['integrations']['default_mssql']['publish'] = _in('Enable MSSQL integration?: ', False, use_default)
config['integrations']['default_mssql']['host'] = _in('MSSQL host: ', '127.0.0.1', use_default)
config['integrations']['default_mssql']['port'] = _in('MSSQL port: ', 1433, use_default)
config['integrations']['default_mssql']['user'] = _in('MSSQL user: ', 'sa', use_default)
@@ -185,7 +185,7 @@ def cli_config(python_path, pip_path, storage_dir, config_dir, use_default=False
mongodb = _in('Connect to MongoDB ? [Y/N]', 'Y', use_default)
if mongodb in ['Y', 'y']:
- config['integrations']['default_mongodb']['enabled'] = _in('Enable MongoDB integration?: ', False, use_default)
+ config['integrations']['default_mongodb']['publish'] = _in('Enable MongoDB integration?: ', False, use_default)
config['integrations']['default_mongodb']['host'] = _in('MongoDB host: ', '127.0.0.1', use_default)
config['integrations']['default_mongodb']['port'] = _in('MongoDB port: ', 27017, use_default)
config['integrations']['default_mongodb']['user'] = _in('MongoDB user: ', '', use_default)
@@ -193,10 +193,9 @@ def cli_config(python_path, pip_path, storage_dir, config_dir, use_default=False
config['integrations']['default_mongodb']['type'] = 'mongodb'
for db_name in list(config['integrations'].keys()):
- if not config['integrations'][db_name]['enabled']:
+ if not config['integrations'][db_name]['publish']:
del config['integrations'][db_name]
-
config_path = os.path.join(config_dir, 'config.json')
with open(config_path, 'w') as fp:
json.dump(config, fp, indent=4, sort_keys=True)
| diff --git a/tests/integration_tests/api/test_http.py b/tests/integration_tests/api/test_http.py
--- a/tests/integration_tests/api/test_http.py
+++ b/tests/integration_tests/api/test_http.py
@@ -32,10 +32,10 @@ def setUpClass(cls):
apis=['http'],
override_integration_config={
'default_mariadb': {
- 'enabled': True
+ 'publish': True
},
'default_clickhouse': {
- 'enabled': True
+ 'publish': True
}
},
mindsdb_database=common.MINDSDB_DATABASE
@@ -60,7 +60,7 @@ def test_1_config(self):
for integration_name in integration_names['integrations']:
assert integration_name in self.initial_integrations_names
- test_integration_data = {'enabled': False, 'host': 'test', 'type': 'clickhouse'}
+ test_integration_data = {'publish': False, 'host': 'test', 'type': 'clickhouse'}
res = requests.put(f'{root}/config/integrations/test_integration', json={'params': test_integration_data})
assert res.status_code == 200
@@ -85,7 +85,7 @@ def test_1_config(self):
assert res.status_code == 200
integration = res.json()
- for k in ['enabled', 'host', 'port', 'type', 'user']:
+ for k in ['publish', 'host', 'port', 'type', 'user']:
assert k in integration
assert integration[k] is not None
assert integration['password'] is None
diff --git a/tests/integration_tests/flows/common.py b/tests/integration_tests/flows/common.py
--- a/tests/integration_tests/flows/common.py
+++ b/tests/integration_tests/flows/common.py
@@ -18,13 +18,16 @@
from mindsdb.utilities.ps import wait_port, is_port_in_use
from mindsdb_native import CONFIG
+
+HTTP_API_ROOT = 'http://localhost:47334/api'
+
DATASETS_PATH = os.getenv('DATASETS_PATH')
USE_EXTERNAL_DB_SERVER = bool(int(os.getenv('USE_EXTERNAL_DB_SERVER') or "1"))
EXTERNAL_DB_CREDENTIALS = str(Path.home().joinpath('.mindsdb_credentials.json'))
-MINDSDB_DATABASE = f'mindsdb_{int(time.time()*1000)}' if USE_EXTERNAL_DB_SERVER else 'mindsdb'
+MINDSDB_DATABASE = 'mindsdb'
dir_path = os.path.dirname(os.path.realpath(__file__))
@@ -96,9 +99,9 @@
}
-def prepare_config(config, mindsdb_database='mindsdb', override_integration_config={}, override_api_config={}):
+def prepare_config(config, mindsdb_database='mindsdb', override_integration_config={}, override_api_config={}, clear_storage=True):
for key in config._config['integrations']:
- config._config['integrations'][key]['enabled'] = False
+ config._config['integrations'][key]['publish'] = False
if USE_EXTERNAL_DB_SERVER:
with open(EXTERNAL_DB_CREDENTIALS, 'rt') as f:
@@ -120,7 +123,7 @@ def prepare_config(config, mindsdb_database='mindsdb', override_integration_conf
config['api']['mongodb']['database'] = mindsdb_database
storage_dir = TEMP_DIR.joinpath('storage')
- if storage_dir.is_dir():
+ if storage_dir.is_dir() and clear_storage:
shutil.rmtree(str(storage_dir))
config._config['storage_dir'] = str(storage_dir)
@@ -164,6 +167,8 @@ def open_ssh_tunnel(port, direction='R'):
config._config['api']['mysql']['port'] = mindsdb_port
config._config['api']['mongodb']['port'] = mindsdb_port
+ MINDSDB_DATABASE = f'mindsdb_{mindsdb_port}'
+
with open(EXTERNAL_DB_CREDENTIALS, 'rt') as f:
credentials = json.loads(f.read())
override = {}
@@ -179,8 +184,9 @@ def make_test_csv(name, data):
return str(test_csv_path)
-def stop_mindsdb(sp):
- sp.kill()
+def stop_mindsdb(sp=None):
+ if sp:
+ sp.kill()
sp = subprocess.Popen('kill -9 $(lsof -t -i:47334)', shell=True)
sp.wait()
sp = subprocess.Popen('kill -9 $(lsof -t -i:47335)', shell=True)
@@ -189,8 +195,8 @@ def stop_mindsdb(sp):
sp.wait()
-def run_environment(config, apis=['mysql'], override_integration_config={}, override_api_config={}, mindsdb_database='mindsdb'):
- temp_config_path = prepare_config(config, mindsdb_database, override_integration_config, override_api_config)
+def run_environment(config, apis=['mysql'], override_integration_config={}, override_api_config={}, mindsdb_database='mindsdb', clear_storage=True):
+ temp_config_path = prepare_config(config, mindsdb_database, override_integration_config, override_api_config, clear_storage)
config = Config(temp_config_path)
api_str = ','.join(apis)
@@ -220,6 +226,8 @@ async def wait_apis_start(ports):
ports_to_wait = [config['api'][api]['port'] for api in apis]
ioloop = asyncio.get_event_loop()
+ if ioloop.is_closed():
+ ioloop = asyncio.new_event_loop()
success = ioloop.run_until_complete(wait_apis_start(ports_to_wait))
ioloop.close()
if not success:
diff --git a/tests/integration_tests/flows/config/config.json b/tests/integration_tests/flows/config/config.json
--- a/tests/integration_tests/flows/config/config.json
+++ b/tests/integration_tests/flows/config/config.json
@@ -20,7 +20,7 @@
"debug": true,
"integrations": {
"default_clickhouse": {
- "enabled": true,
+ "publish": true,
"host": "localhost",
"password": "",
"port": 8123,
@@ -28,7 +28,7 @@
"user": "default"
},
"default_mariadb": {
- "enabled": true,
+ "publish": true,
"host": "localhost",
"password": "root",
"port": 3306,
@@ -36,7 +36,7 @@
"user": "root"
},
"default_mongodb": {
- "enabled": false,
+ "publish": false,
"host": "127.0.0.1",
"password": "",
"port": 27001,
@@ -44,7 +44,7 @@
"user": ""
},
"default_mssql": {
- "enabled": true,
+ "publish": true,
"host": "127.0.0.1",
"odbc_driver_name": "MySQL ODBC 8.0 Unicode Driver",
"password": "password",
@@ -53,7 +53,7 @@
"user": "sa"
},
"default_mysql": {
- "enabled": true,
+ "publish": true,
"host": "localhost",
"password": "root",
"port": 3307,
@@ -62,7 +62,7 @@
},
"default_postgres": {
"database": "postgres",
- "enabled": true,
+ "publish": true,
"host": "localhost",
"password": "",
"port": 5432,
@@ -70,7 +70,7 @@
"user": "postgres"
},
"default_snowflake": {
- "enabled": false,
+ "publish": false,
"type": "snowflake"
}
},
diff --git a/tests/integration_tests/flows/http_test_helpers.py b/tests/integration_tests/flows/http_test_helpers.py
new file mode 100644
--- /dev/null
+++ b/tests/integration_tests/flows/http_test_helpers.py
@@ -0,0 +1,65 @@
+import requests
+import time
+
+from common import HTTP_API_ROOT
+
+
+def get_predictors_list():
+ res = requests.get(f'{HTTP_API_ROOT}/predictors/')
+ assert res.status_code == 200
+ return res.json()
+
+
+def get_predictors_names_list():
+ predictors = get_predictors_list()
+ return [x['name'] for x in predictors]
+
+
+def check_predictor_exists(name):
+ assert name in get_predictors_names_list()
+
+
+def check_predictor_not_exists(name):
+ assert name not in get_predictors_names_list()
+
+
+def get_predictor_data(name):
+ predictors = get_predictors_list()
+ for p in predictors:
+ if p['name'] == name:
+ return p
+ return None
+
+
+def check_ds_not_exists(ds_name):
+ res = requests.get(f'{HTTP_API_ROOT}/datasources')
+ assert res.status_code == 200
+ ds_names = [x['name'] for x in res.json()]
+ assert ds_name not in ds_names
+
+
+def check_ds_exists(ds_name):
+ res = requests.get(f'{HTTP_API_ROOT}/datasources')
+ assert res.status_code == 200
+ ds_names = [x['name'] for x in res.json()]
+ assert ds_name in ds_names
+
+
+def check_ds_analyzable(ds_name):
+ start_time = time.time()
+ analyze_done = False
+ while analyze_done is False and (time.time() - start_time) < 30:
+ res = requests.get(f'{HTTP_API_ROOT}/datasources/{ds_name}/analyze')
+ assert res.status_code == 200
+ analyze_done = res.json().get('status', '') != 'analyzing'
+ time.sleep(1)
+ assert analyze_done
+
+
+def wait_predictor_learn(predictor_name):
+ start_time = time.time()
+ learn_done = False
+ while learn_done is False and (time.time() - start_time) < 120:
+ learn_done = get_predictor_data(predictor_name)['status'] == 'complete'
+ time.sleep(1)
+ assert learn_done
diff --git a/tests/integration_tests/flows/test_clickhouse.py b/tests/integration_tests/flows/test_clickhouse.py
--- a/tests/integration_tests/flows/test_clickhouse.py
+++ b/tests/integration_tests/flows/test_clickhouse.py
@@ -98,7 +98,7 @@ def setUpClass(cls):
apis=['mysql'],
override_integration_config={
'default_clickhouse': {
- 'enabled': True
+ 'publish': True
}
},
override_api_config={
diff --git a/tests/integration_tests/flows/test_custom_model.py b/tests/integration_tests/flows/test_custom_model.py
--- a/tests/integration_tests/flows/test_custom_model.py
+++ b/tests/integration_tests/flows/test_custom_model.py
@@ -80,7 +80,7 @@ def setUpClass(cls):
apis=['http', 'mysql'],
override_integration_config={
'default_mariadb': {
- 'enabled': True
+ 'publish': True
}
},
mindsdb_database=MINDSDB_DATABASE
diff --git a/tests/integration_tests/flows/test_mariadb.py b/tests/integration_tests/flows/test_mariadb.py
--- a/tests/integration_tests/flows/test_mariadb.py
+++ b/tests/integration_tests/flows/test_mariadb.py
@@ -83,7 +83,7 @@ def setUpClass(cls):
apis=['mysql'],
override_integration_config={
'default_mariadb': {
- 'enabled': True
+ 'publish': True
}
},
mindsdb_database=MINDSDB_DATABASE
diff --git a/tests/integration_tests/flows/test_mistakes.py b/tests/integration_tests/flows/test_mistakes.py
new file mode 100644
--- /dev/null
+++ b/tests/integration_tests/flows/test_mistakes.py
@@ -0,0 +1,221 @@
+import unittest
+import requests
+import asyncio
+import time
+
+from mindsdb.utilities.config import Config
+
+from common import (
+ MINDSDB_DATABASE,
+ HTTP_API_ROOT,
+ TEST_CONFIG,
+ run_environment,
+ stop_mindsdb
+)
+
+from http_test_helpers import (
+ wait_predictor_learn,
+ check_predictor_not_exists,
+ check_ds_not_exists,
+ check_ds_exists
+)
+
+# +++ define test data
+TEST_DATASET = 'us_health_insurance'
+
+TO_PREDICT = {
+ 'smoker': str
+}
+CONDITION = {
+ 'age': 20,
+ 'sex': 'female'
+}
+# ---
+
+TEST_DATA_TABLE = TEST_DATASET
+TEST_PREDICTOR_NAME = f'{TEST_DATASET}_predictor'
+EXTERNAL_DS_NAME = f'{TEST_DATASET}_external'
+
+TEST_INTEGRATION = 'test_integration'
+TEST_DS = 'test_ds'
+TEST_PREDICTOR = 'test_predictor'
+
+config = Config(TEST_CONFIG)
+
+
+class UserFlowTest_1(unittest.TestCase):
+ def test_1_wrong_integration(self):
+ '''
+ start mindsdb with publish integration with wrong password
+ try create ds
+ change password to correct
+ '''
+ original_db_password = config['integrations']['default_mariadb']['password']
+ self.mdb, datastore = run_environment(
+ config,
+ apis=['mysql', 'http'],
+ override_integration_config={
+ 'default_mariadb': {
+ 'publish': True,
+ 'password': 'broken'
+ }
+ },
+ mindsdb_database=MINDSDB_DATABASE
+ )
+
+ check_ds_not_exists(TEST_DS)
+
+ # TODO creating DS from unexists integration raise not critical error in code.
+ # need fix it and return human-readable error
+ # related issue: https://github.com/mindsdb/mindsdb/issues/945
+ # data = {
+ # "integration_id": 'unexists_integration',
+ # "name": TEST_DS,
+ # "query": f"select * from test_data.{TEST_DATASET} limit 50;"
+ # }
+ # res = requests.put(f'{HTTP_API_ROOT}/datasources/{TEST_DS}', json=data)
+ # assert res ?
+
+ # check create DS with wrong integration password
+ data = {
+ "integration_id": 'default_mariadb',
+ "name": TEST_DS,
+ "query": f"select * from test_data.{TEST_DATASET} limit 100;"
+ }
+ res = requests.put(f'{HTTP_API_ROOT}/datasources/{TEST_DS}', json=data)
+ assert 'Access denied for user' in res.json()['message']
+
+ check_ds_not_exists(TEST_DS)
+
+ # restore password
+ res = requests.post(
+ f'{HTTP_API_ROOT}/config/integrations/default_mariadb',
+ json={'params': {'password': original_db_password}}
+ )
+ assert res.status_code == 200
+ config['integrations']['default_mariadb']['password'] = original_db_password
+
+ def test_2_broke_analisys(self):
+ '''
+ stop mindsdb while analyse dataset
+ '''
+ data = {
+ "integration_id": 'default_mariadb',
+ "name": TEST_DS,
+ "query": f"select * from test_data.{TEST_DATASET} limit 100;"
+ }
+ res = requests.put(f'{HTTP_API_ROOT}/datasources/{TEST_DS}', json=data)
+ assert res.status_code == 200
+
+ res = requests.get(f'{HTTP_API_ROOT}/datasources/{TEST_DS}/analyze')
+ assert res.status_code == 200
+
+ stop_mindsdb()
+
+ self.mdb, datastore = run_environment(
+ config,
+ apis=['mysql', 'http'],
+ override_integration_config={
+ 'default_mariadb': {
+ 'publish': True
+ }
+ },
+ mindsdb_database=MINDSDB_DATABASE,
+ clear_storage=False
+ )
+
+ check_ds_exists(TEST_DS)
+
+ def test_3_wrong_predictor(self):
+ '''
+ try create predictor with wrong parameters,
+ close mindsdb while model training
+ check mindsdb can start again
+ '''
+ check_predictor_not_exists(TEST_PREDICTOR)
+
+ data = {
+ 'to_predict': list(TO_PREDICT.keys()),
+ 'data_source_name': 'wrong ds'
+ }
+ res = requests.put(f'{HTTP_API_ROOT}/predictors/{TEST_PREDICTOR}', json=data)
+ assert 'Can not find datasource' in res.json()['message']
+
+ check_predictor_not_exists(TEST_PREDICTOR)
+
+ data = {
+ 'to_predict': list(TO_PREDICT.keys()),
+ 'data_source_name': TEST_DS
+ }
+ res = requests.put(f'{HTTP_API_ROOT}/predictors/{TEST_PREDICTOR}', json=data)
+ assert res.status_code == 200
+
+ stop_mindsdb()
+
+ self.mdb, datastore = run_environment(
+ config,
+ apis=['mysql', 'http'],
+ override_integration_config={
+ 'default_mariadb': {
+ 'publish': True
+ }
+ },
+ mindsdb_database=MINDSDB_DATABASE,
+ clear_storage=False
+ )
+
+ # TODO add after this issue will be closed: https://github.com/mindsdb/mindsdb/issues/948
+ # check_predictor_not_exists(TEST_PREDICTOR)
+
+ data = {
+ 'to_predict': list(TO_PREDICT.keys()),
+ 'data_source_name': TEST_DS
+ }
+ res = requests.put(f'{HTTP_API_ROOT}/predictors/{TEST_PREDICTOR}_2', json=data)
+ assert res.status_code == 200
+
+ wait_predictor_learn(f'{TEST_PREDICTOR}_2')
+
+ def test_4_wrong_prediction(self):
+ '''
+ close mindsdb while make prediction, then try run it again
+ '''
+ ioloop = asyncio.get_event_loop()
+ if ioloop.is_closed():
+ ioloop = asyncio.new_event_loop()
+ ioloop.run_in_executor(
+ None,
+ lambda: requests.post(
+ f'{HTTP_API_ROOT}/predictors/{TEST_PREDICTOR}_2/predict',
+ json={'when': CONDITION}
+ )
+ )
+ time.sleep(0.5)
+ stop_mindsdb()
+ ioloop.close()
+
+ self.mdb, datastore = run_environment(
+ config,
+ apis=['mysql', 'http'],
+ override_integration_config={
+ 'default_mariadb': {
+ 'publish': True
+ }
+ },
+ mindsdb_database=MINDSDB_DATABASE,
+ clear_storage=False
+ )
+
+ res = requests.post(
+ f'{HTTP_API_ROOT}/predictors/{TEST_PREDICTOR}_2/predict',
+ json={'when': CONDITION}
+ )
+ assert res.status_code == 200
+
+
+if __name__ == "__main__":
+ try:
+ unittest.main(failfast=True)
+ print('Tests passed!')
+ except Exception as e:
+ print(f'Tests Failed!\n{e}')
diff --git a/tests/integration_tests/flows/test_mongo.py b/tests/integration_tests/flows/test_mongo.py
--- a/tests/integration_tests/flows/test_mongo.py
+++ b/tests/integration_tests/flows/test_mongo.py
@@ -53,7 +53,7 @@ def setUpClass(cls):
apis=['mongodb'],
override_integration_config={
'default_mongodb': {
- 'enabled': True,
+ 'publish': True,
'port': 27002,
'host': '127.0.0.1',
'type': 'mongodb',
diff --git a/tests/integration_tests/flows/test_mssql.py b/tests/integration_tests/flows/test_mssql.py
--- a/tests/integration_tests/flows/test_mssql.py
+++ b/tests/integration_tests/flows/test_mssql.py
@@ -59,7 +59,7 @@ def setUpClass(cls):
apis=['mysql'],
override_integration_config={
'default_mssql': {
- 'enabled': True
+ 'publish': True
}
},
mindsdb_database=MINDSDB_DATABASE
diff --git a/tests/integration_tests/flows/test_mysql.py b/tests/integration_tests/flows/test_mysql.py
--- a/tests/integration_tests/flows/test_mysql.py
+++ b/tests/integration_tests/flows/test_mysql.py
@@ -83,7 +83,7 @@ def setUpClass(cls):
apis=['mysql'],
override_integration_config={
'default_mysql': {
- 'enabled': True
+ 'publish': True
}
},
mindsdb_database=MINDSDB_DATABASE
@@ -137,14 +137,13 @@ def test_1_initial_state(self):
def test_2_insert_predictor(self):
print(f'\nExecuting {inspect.stack()[0].function}')
- # FIXME add {"stop_training_in_x_seconds": 3} after https://github.com/mindsdb/mindsdb_native/issues/291
query(f"""
insert into {MINDSDB_DATABASE}.predictors (name, predict, select_data_query, training_options) values
(
'{TEST_PREDICTOR_NAME}',
'{','.join(to_predict_column_names)}',
'select * from test_data.{TEST_DATA_TABLE} limit 50',
- '{{"join_learn_process": true}}'
+ '{{"join_learn_process": true, "stop_training_in_x_seconds": 3}}'
);
""")
@@ -169,7 +168,7 @@ def test_3_externael_ds(self):
'{name}',
'{','.join(to_predict_column_names)}',
'{EXTERNAL_DS_NAME}',
- '{{"join_learn_process": true}}'
+ '{{"join_learn_process": true, "stop_training_in_x_seconds": 3}}'
);
""")
diff --git a/tests/integration_tests/flows/test_postgres.py b/tests/integration_tests/flows/test_postgres.py
--- a/tests/integration_tests/flows/test_postgres.py
+++ b/tests/integration_tests/flows/test_postgres.py
@@ -89,7 +89,7 @@ def setUpClass(cls):
apis=['mysql'],
override_integration_config={
'default_postgres': {
- 'enabled': True
+ 'publish': True
}
},
mindsdb_database=MINDSDB_DATABASE
diff --git a/tests/integration_tests/flows/test_user_flow_1.py b/tests/integration_tests/flows/test_user_flow_1.py
new file mode 100644
--- /dev/null
+++ b/tests/integration_tests/flows/test_user_flow_1.py
@@ -0,0 +1,232 @@
+import unittest
+import requests
+from pathlib import Path
+
+import mysql.connector
+
+from mindsdb.utilities.config import Config
+
+from common import (
+ USE_EXTERNAL_DB_SERVER,
+ DATASETS_COLUMN_TYPES,
+ MINDSDB_DATABASE,
+ DATASETS_PATH,
+ HTTP_API_ROOT,
+ TEST_CONFIG,
+ run_environment,
+ make_test_csv,
+ upload_csv
+)
+
+from http_test_helpers import (
+ wait_predictor_learn,
+ check_predictor_exists,
+ check_predictor_not_exists,
+ check_ds_not_exists,
+ check_ds_exists,
+ check_ds_analyzable
+)
+
+# +++ define test data
+TEST_DATASET = 'us_health_insurance'
+
+DB_TYPES_MAP = {
+ int: 'int',
+ float: 'float',
+ str: 'varchar(255)'
+}
+
+TO_PREDICT = {
+ # 'charges': float,
+ 'smoker': str
+}
+CONDITION = {
+ 'age': 20,
+ 'sex': 'female'
+}
+# ---
+
+TEST_DATA_TABLE = TEST_DATASET
+TEST_PREDICTOR_NAME = f'{TEST_DATASET}_predictor'
+EXTERNAL_DS_NAME = f'{TEST_DATASET}_external'
+
+TEST_INTEGRATION = 'test_integration'
+TEST_DS = 'test_ds'
+TEST_DS_CSV = 'test_ds_csv'
+TEST_PREDICTOR = 'test_predictor'
+TEST_PREDICTOR_CSV = 'test_predictor_csv'
+
+config = Config(TEST_CONFIG)
+
+
+def query(q, as_dict=False, fetch=False):
+ con = mysql.connector.connect(
+ host=config['integrations']['default_mariadb']['host'],
+ port=config['integrations']['default_mariadb']['port'],
+ user=config['integrations']['default_mariadb']['user'],
+ passwd=config['integrations']['default_mariadb']['password']
+ )
+
+ cur = con.cursor(dictionary=as_dict)
+ cur.execute(q)
+ res = True
+ if fetch:
+ res = cur.fetchall()
+ con.commit()
+ con.close()
+ return res
+
+
+def fetch(q, as_dict=True):
+ return query(q, as_dict, fetch=True)
+
+
+class UserFlowTest_1(unittest.TestCase):
+ def get_tables_in(self, schema):
+ test_tables = fetch(f'show tables from {schema}', as_dict=False)
+ return [x[0] for x in test_tables]
+
+ @classmethod
+ def setUpClass(cls):
+ mdb, datastore = run_environment(
+ config,
+ apis=['mysql', 'http'],
+ mindsdb_database=MINDSDB_DATABASE
+ )
+ cls.mdb = mdb
+
+ query('create database if not exists test_data')
+
+ if not USE_EXTERNAL_DB_SERVER:
+ test_csv_path = Path(DATASETS_PATH).joinpath(TEST_DATASET).joinpath('data.csv')
+ if TEST_DATA_TABLE not in cls.get_tables_in(cls, 'test_data'):
+ print('creating test data table...')
+ upload_csv(
+ query=query,
+ columns_map=DATASETS_COLUMN_TYPES[TEST_DATASET],
+ db_types_map=DB_TYPES_MAP,
+ table_name=TEST_DATA_TABLE,
+ csv_path=test_csv_path
+ )
+
+ data = fetch(f'select * from test_data.{TEST_DATA_TABLE} limit 50', as_dict=True)
+ cls.external_datasource_csv_path = make_test_csv(EXTERNAL_DS_NAME, data)
+
+ def test_1_create_integration_via_http(self):
+ '''
+ check integration is not exists
+ create integration
+ check new integration values
+ '''
+ res = requests.get(f'{HTTP_API_ROOT}/config/integrations/{TEST_INTEGRATION}')
+ assert res.status_code == 404
+
+ test_integration_data = {}
+ test_integration_data.update(config['integrations']['default_mariadb'])
+ test_integration_data['publish'] = True
+ test_integration_data['database_name'] = TEST_INTEGRATION
+ res = requests.put(f'{HTTP_API_ROOT}/config/integrations/{TEST_INTEGRATION}', json={'params': test_integration_data})
+ assert res.status_code == 200
+
+ res = requests.get(f'{HTTP_API_ROOT}/config/integrations/{TEST_INTEGRATION}')
+ assert res.status_code == 200
+ test_integration = res.json()
+ assert test_integration['password'] is None
+ for key in ['user', 'port', 'host', 'publish']:
+ assert test_integration[key] == test_integration_data[key]
+
+ def test_3_create_ds_from_sql_by_http(self):
+ '''
+ check is no DS with this name
+ create DS
+ analyse it
+ '''
+ check_ds_not_exists(TEST_DS)
+
+ data = {
+ "integration_id": TEST_INTEGRATION,
+ "name": TEST_DS,
+ "query": f"select * from test_data.{TEST_DATASET} limit 100;"
+ }
+ res = requests.put(f'{HTTP_API_ROOT}/datasources/{TEST_DS}', json=data)
+ assert res.status_code == 200
+
+ check_ds_exists(TEST_DS)
+ check_ds_analyzable(TEST_DS)
+
+ def test_4_create_ds_from_csv_by_http(self):
+ '''
+ same for csv-ds
+ '''
+ check_ds_not_exists(TEST_DS_CSV)
+
+ with open(self.external_datasource_csv_path, 'rb') as f:
+ d = f.read()
+ res = requests.put(
+ f'{HTTP_API_ROOT}/datasources/{TEST_DS_CSV}',
+ files={
+ 'file': ('data.csv', d, 'text/csv'),
+ 'name': (None, TEST_DS_CSV),
+ 'source_type': (None, 'file'),
+ 'source': (None, 'data.csv')
+ }
+ )
+ assert res.status_code == 200
+
+ check_ds_exists(TEST_DS_CSV)
+ check_ds_analyzable(TEST_DS_CSV)
+
+ def test_5_create_and_query_predictors(self):
+ '''
+ check predictor not exists
+ learn predictor
+ query
+ '''
+ def test_predictor(predictior_name, datasource_name):
+ check_predictor_not_exists(predictior_name)
+
+ data = {
+ 'to_predict': list(TO_PREDICT.keys()),
+ 'data_source_name': datasource_name
+ }
+ res = requests.put(f'{HTTP_API_ROOT}/predictors/{predictior_name}', json=data)
+ assert res.status_code == 200
+
+ check_predictor_exists(predictior_name)
+
+ wait_predictor_learn(predictior_name)
+
+ res = requests.post(
+ f'{HTTP_API_ROOT}/predictors/{predictior_name}/predict',
+ json={'when': CONDITION}
+ )
+ assert res.status_code == 200
+ res = res.json()
+ assert len(res) == 1
+ res = res[0]
+ for field in TO_PREDICT:
+ assert field in res
+ assert res[field]['predicted_value'] is not None
+ assert res[field]['confidence'] > 0
+
+ test_predictor(TEST_PREDICTOR, TEST_DS)
+ test_predictor(TEST_PREDICTOR_CSV, TEST_DS_CSV)
+
+ def test_6_delete(self):
+ for predictor_name in [TEST_PREDICTOR, TEST_PREDICTOR_CSV]:
+ res = requests.delete(f'{HTTP_API_ROOT}/predictors/{predictor_name}')
+ assert res.status_code == 200
+ check_predictor_not_exists(predictor_name)
+
+ for ds_name in [TEST_DS_CSV, TEST_DS]:
+ res = requests.delete(f'{HTTP_API_ROOT}/datasources/{ds_name}')
+ assert res.status_code == 200
+ check_ds_not_exists(ds_name)
+
+
+if __name__ == "__main__":
+ try:
+ unittest.main(failfast=True)
+ print('Tests passed!')
+ except Exception as e:
+ print(f'Tests Failed!\n{e}')
diff --git a/tests/integration_tests/flows/test_user_flow_2.py b/tests/integration_tests/flows/test_user_flow_2.py
new file mode 100644
--- /dev/null
+++ b/tests/integration_tests/flows/test_user_flow_2.py
@@ -0,0 +1,199 @@
+import unittest
+import requests
+from pathlib import Path
+
+import mysql.connector
+
+from mindsdb.utilities.config import Config
+
+from common import (
+ USE_EXTERNAL_DB_SERVER,
+ DATASETS_COLUMN_TYPES,
+ MINDSDB_DATABASE,
+ DATASETS_PATH,
+ HTTP_API_ROOT,
+ TEST_CONFIG,
+ run_environment,
+ make_test_csv,
+ upload_csv,
+ stop_mindsdb,
+ condition_dict_to_str,
+ check_prediction_values
+)
+
+from http_test_helpers import (
+ check_ds_not_exists,
+ check_ds_exists,
+ check_ds_analyzable
+)
+
+
+# +++ define test data
+TEST_DATASET = 'us_health_insurance'
+
+DB_TYPES_MAP = {
+ int: 'int',
+ float: 'float',
+ str: 'varchar(255)'
+}
+
+TO_PREDICT = {
+ # 'charges': float,
+ 'smoker': str
+}
+CONDITION = {
+ 'age': 20,
+ 'sex': 'female'
+}
+# ---
+
+TEST_DATA_TABLE = TEST_DATASET
+TEST_PREDICTOR_NAME = f'{TEST_DATASET}_predictor'
+EXTERNAL_DS_NAME = f'{TEST_DATASET}_external'
+
+TEST_INTEGRATION = 'test_integration'
+TEST_DS = 'test_ds'
+TEST_DS_CSV = 'test_ds_csv'
+TEST_PREDICTOR = 'test_predictor'
+TEST_PREDICTOR_CSV = 'test_predictor_csv'
+
+config = Config(TEST_CONFIG)
+
+to_predict_column_names = list(TO_PREDICT.keys())
+
+
+def query(q, as_dict=False, fetch=False):
+ con = mysql.connector.connect(
+ host=config['integrations']['default_mariadb']['host'],
+ port=config['integrations']['default_mariadb']['port'],
+ user=config['integrations']['default_mariadb']['user'],
+ passwd=config['integrations']['default_mariadb']['password']
+ )
+
+ cur = con.cursor(dictionary=as_dict)
+ cur.execute(q)
+ res = True
+ if fetch:
+ res = cur.fetchall()
+ con.commit()
+ con.close()
+ return res
+
+
+def fetch(q, as_dict=True):
+ return query(q, as_dict, fetch=True)
+
+
+class UserFlowTest_2(unittest.TestCase):
+ def get_tables_in(self, schema):
+ test_tables = fetch(f'show tables from {schema}', as_dict=False)
+ return [x[0] for x in test_tables]
+
+ @classmethod
+ def setUpClass(cls):
+ mdb, datastore = run_environment(
+ config,
+ apis=['http'],
+ mindsdb_database=MINDSDB_DATABASE
+ )
+ cls.mdb = mdb
+
+ query('create database if not exists test_data')
+
+ if not USE_EXTERNAL_DB_SERVER:
+ test_csv_path = Path(DATASETS_PATH).joinpath(TEST_DATASET).joinpath('data.csv')
+ if TEST_DATA_TABLE not in cls.get_tables_in(cls, 'test_data'):
+ print('creating test data table...')
+ upload_csv(
+ query=query,
+ columns_map=DATASETS_COLUMN_TYPES[TEST_DATASET],
+ db_types_map=DB_TYPES_MAP,
+ table_name=TEST_DATA_TABLE,
+ csv_path=test_csv_path
+ )
+
+ data = fetch(f'select * from test_data.{TEST_DATA_TABLE} limit 50', as_dict=True)
+ cls.external_datasource_csv_path = make_test_csv(EXTERNAL_DS_NAME, data)
+
+ def test_1_upload_ds(self):
+ check_ds_not_exists(TEST_DS_CSV)
+
+ with open(self.external_datasource_csv_path, 'rb') as f:
+ d = f.read()
+ res = requests.put(
+ f'{HTTP_API_ROOT}/datasources/{TEST_DS_CSV}',
+ files={
+ 'file': ('data.csv', d, 'text/csv'),
+ 'name': (None, TEST_DS_CSV),
+ 'source_type': (None, 'file'),
+ 'source': (None, 'data.csv')
+ }
+ )
+ assert res.status_code == 200
+
+ check_ds_exists(TEST_DS_CSV)
+ check_ds_analyzable(TEST_DS_CSV)
+
+ def test_2_add_integration(self):
+ test_integration_data = {}
+ test_integration_data.update(config['integrations']['default_mariadb'])
+ test_integration_data['publish'] = True
+ test_integration_data['database_name'] = TEST_INTEGRATION
+ res = requests.put(f'{HTTP_API_ROOT}/config/integrations/{TEST_INTEGRATION}', json={'params': test_integration_data})
+ assert res.status_code == 200
+
+ def test_3_restart_and_connect(self):
+ stop_mindsdb()
+
+ mdb, datastore = run_environment(
+ config,
+ apis=['mysql'],
+ override_integration_config={
+ 'default_mariadb': {
+ 'publish': True
+ }
+ },
+ mindsdb_database=MINDSDB_DATABASE,
+ clear_storage=False
+ )
+ self.mdb = mdb
+
+ def test_4_learn_predictor(self):
+ query(f"""
+ insert into {MINDSDB_DATABASE}.predictors (name, predict, external_datasource, training_options) values
+ (
+ '{TEST_PREDICTOR}',
+ '{','.join(to_predict_column_names)}',
+ '{TEST_DS_CSV}',
+ '{{"join_learn_process": true, "stop_training_in_x_seconds": 3}}'
+ );
+ """)
+
+ print('predictor record in mindsdb.predictors')
+ res = fetch(f"select status from {MINDSDB_DATABASE}.predictors where name = '{TEST_PREDICTOR}'")
+ self.assertTrue(len(res) == 1)
+ self.assertTrue(res[0]['status'] == 'complete')
+
+ print('predictor table in mindsdb db')
+ self.assertTrue(TEST_PREDICTOR in self.get_tables_in(MINDSDB_DATABASE))
+
+ def test_5_make_query(self):
+ res = fetch(f"""
+ select
+ *
+ from
+ {MINDSDB_DATABASE}.{TEST_PREDICTOR}
+ where
+ {condition_dict_to_str(CONDITION)};
+ """)
+
+ self.assertTrue(len(res) == 1)
+ self.assertTrue(check_prediction_values(res[0], TO_PREDICT))
+
+
+if __name__ == "__main__":
+ try:
+ unittest.main(failfast=True)
+ print('Tests passed!')
+ except Exception as e:
+ print(f'Tests Failed!\n{e}')
| Deprecate `model_swapping_map`
I believe `model_swapping_map` could be removed since mindsdb_native should have all the required thread-safety now, make sure this is true and if so, remove it.
PUT datasouse route fail by timeout
That happens if datasource is query to big table in database.
Datasource analysing refactoring
Couple thoughts about analysing:
History of analysing in 'upload ds' route:
1. Let come back to time when we work just with 'file' datasources. We dont run analysis after file uploaded. We do it if user want see analysis, or learn predictor.
2. We want add new feature: filter/sort for data in datasource in Scout 'data preview' screen. Doing in pandas.DataFrame is slow, so we decide use sqllite database. But how we can create database, if we dont know file columns types? If just copy csv to sqllite-db, then all columns will be text, and all filters in Scout will be text. So we need analyse datasource right after it uploaded.
3. We add database-based datasources without any changes in datasource upload flow. How this flow looks now: route get from user query string and 'integration' name. Analyse process run. After it done, sqllite db created.
Problems in this flow:
1. analysis can take long time
2. we save in sqllite db copy of data from real db.
3. if user will train predictor from that datasource after some time, predictor will be trained from current database data, not from sqllite data. But in 'datasource preview' will be still data from sqllite.
4. each time (not each really, it has some timeout), when user open 'datasource analysis', datasource analysis will be re-run.
How i see solve:
1. add statues for datasources. In this case, we can set status='analysing' for uploading datasource and make analysis in background. For datasources with this status should be disabled all interactions in Scout.
2. save datasource analysis in file in datasource folder. User should see date/time of last analysis and button 're-analyse' (probably just for db-based datasource, for file-based is make no sense). In this case we can open 'datasource analysis' screen without waiting.
3. dont make sqllite db for db-based datasources. User already have his data in database, we dont want make full copy of it.
4. point above mean we should disable 'datasource preview' screen for db-based datasources. Why: it will be difficult to make filter/sorting/pagination for that data. At least at this moment.
|
Most time take 'analyse' process. Is no way to make it significantly faster. To make this route async need to add statuses for datasources.
*If we merge remote SQL datasource this should be fixed*
depend on https://github.com/mindsdb/mindsdb/issues/908
| 2020-12-03T10:41:21Z | [] | [] |
mindsdb/mindsdb | 1,019 | mindsdb__mindsdb-1019 | [
"945"
] | 7aa41930d2ce6a55b342e63bdb1705107245f2bf | diff --git a/mindsdb/api/http/namespaces/datasource.py b/mindsdb/api/http/namespaces/datasource.py
--- a/mindsdb/api/http/namespaces/datasource.py
+++ b/mindsdb/api/http/namespaces/datasource.py
@@ -129,6 +129,10 @@ def on_file(file):
if 'query' in data:
source_type = request.json['integration_id']
+ if source_type not in ca.default_store.config['integrations']:
+ # integration doens't exist
+ abort(400, f"{source_type} integration doesn't exist")
+
ca.default_store.save_datasource(name, source_type, request.json)
os.rmdir(temp_dir_path)
return ca.default_store.get_datasource(name)
| diff --git a/tests/integration_tests/api/test_http.py b/tests/integration_tests/api/test_http.py
--- a/tests/integration_tests/api/test_http.py
+++ b/tests/integration_tests/api/test_http.py
@@ -1,15 +1,16 @@
import os
+import time
+import unittest
+import importlib.util
from random import randint
from pathlib import Path
-import unittest
-import requests
-import time
+from uuid import uuid1
+import requests
import psutil
from mindsdb.utilities.config import Config
-import importlib.util
common_path = Path(__file__).parent.parent.absolute().joinpath('flows/common.py').resolve()
spec = importlib.util.spec_from_file_location("common", str(common_path))
common = importlib.util.module_from_spec(spec)
@@ -235,13 +236,17 @@ def test_9_gui_is_served(self):
assert response.status_code == 200
assert response.content.decode().find('<head>') > 0
- def test_10_telemetry_enabled(self):
+ def test_10_ds_from_unexist_integration(self):
"""
Call telemetry enabled
then check the response is status 200
"""
- response = requests.get(f'{root}/config/telemetry/true')
- assert response.status_code == 200
+ ds_name = f"ds_{uuid1()}"
+ data = {"integration_id": f'unexists_integration_{uuid1()}',
+ "name": ds_name,
+ "query": "select * from test_data.any_data limit 100;"}
+ response = requests.put(f'{root}/datasources/{ds_name}', json=data)
+ assert response.status_code == 400, f"expected 400 but got {response.status_code}, {response.text}"
if __name__ == '__main__':
unittest.main(failfast=True)
| Error if try create datasource from not existing integration
If send query `PUT api/datasources/ds_name` with data:
```
{
"integration_id": 'unexists_integration',
"name": 'ds_name',
"query": f"select * from test_data.any_data limit 100;"
}
```
then in response will be error:
```
{
"message": 'TypeError: expected str, bytes or os.PathLike object, not dict'
}
```
| I wouldn't call this a bug, yes, we need better logs for these type of things, but in principle this issue shouldn't happen to being with if people are using this via Scout or the database.
If they are using it via the API directly, I assume they are "expert" users. Still, something we want better logging for, but not a bug. | 2020-12-22T09:12:43Z | [] | [] |
mindsdb/mindsdb | 1,020 | mindsdb__mindsdb-1020 | [
"967"
] | b7b97231a780826f33473cc865eb8e8a42aea053 | diff --git a/mindsdb/utilities/ps.py b/mindsdb/utilities/ps.py
--- a/mindsdb/utilities/ps.py
+++ b/mindsdb/utilities/ps.py
@@ -1,11 +1,44 @@
-import psutil
+import sys
import time
+from collections import namedtuple
+import psutil
+
+
+def net_connections():
+ """Cross-platform psutil.net_connections like interface"""
+ if sys.platform.lower().startswith('linux'):
+ return psutil.net_connections()
+
+ all_connections = []
+ Pconn = None
+ for p in psutil.process_iter(['pid']):
+ try:
+ process = psutil.Process(p.pid)
+ connections = process.connections()
+ if connections:
+ for conn in connections:
+ # Adding pid to the returned instance
+ # for consistency with psutil.net_connections()
+ if Pconn is None:
+ fields = list(conn._fields)
+ fields.append('pid')
+ _conn = namedtuple('Pconn', fields)
+ for attr in conn._fields:
+ setattr(_conn, attr, getattr(conn, attr))
+ _conn.pid = p.pid
+ all_connections.append(_conn)
+
+ except (psutil.AccessDenied, psutil.ZombieProcess, psutil.NoSuchProcess):
+ pass
+ return all_connections
def is_port_in_use(port_num):
- portsinuse = []
- conns = psutil.net_connections()
- portsinuse = [x.laddr[1] for x in conns if x.status == 'LISTEN']
+ """Check does any of child process uses specified port."""
+ parent_process = psutil.Process()
+ child_pids = [x.pid for x in parent_process.children(recursive=True)]
+ conns = net_connections()
+ portsinuse = [x.laddr[1] for x in conns if x.pid in child_pids and x.status == 'LISTEN']
portsinuse.sort()
return int(port_num) in portsinuse
| diff --git a/tests/integration_tests/api/test_http.py b/tests/integration_tests/api/test_http.py
--- a/tests/integration_tests/api/test_http.py
+++ b/tests/integration_tests/api/test_http.py
@@ -7,9 +7,9 @@
from uuid import uuid1
import requests
-import psutil
from mindsdb.utilities.config import Config
+from mindsdb.utilities.ps import net_connections
common_path = Path(__file__).parent.parent.absolute().joinpath('flows/common.py').resolve()
spec = importlib.util.spec_from_file_location("common", str(common_path))
@@ -46,11 +46,10 @@ def setUpClass(cls):
@classmethod
def tearDownClass(cls):
try:
- conns = psutil.net_connections()
+ conns = net_connections()
pid = [x.pid for x in conns if x.status == 'LISTEN' and x.laddr[1] == 47334 and x.pid is not None]
if len(pid) > 0:
os.kill(pid[0], 9)
- cls.sp.kill()
except Exception:
pass
diff --git a/tests/integration_tests/flows/common.py b/tests/integration_tests/flows/common.py
--- a/tests/integration_tests/flows/common.py
+++ b/tests/integration_tests/flows/common.py
@@ -1,21 +1,21 @@
import time
-from pathlib import Path
+import os
import json
-import requests
import subprocess
import atexit
-import os
import asyncio
import shutil
import csv
+from pathlib import Path
+import requests
from pandas import DataFrame
from mindsdb.utilities.fs import create_dirs_recursive
from mindsdb.utilities.config import Config
from mindsdb.interfaces.native.mindsdb import MindsdbNative
from mindsdb.interfaces.datastore.datastore import DataStore
-from mindsdb.utilities.ps import wait_port, is_port_in_use
+from mindsdb.utilities.ps import wait_port, is_port_in_use, net_connections
from mindsdb_native import CONFIG
@@ -187,12 +187,17 @@ def make_test_csv(name, data):
def stop_mindsdb(sp=None):
if sp:
sp.kill()
- sp = subprocess.Popen('kill -9 $(lsof -t -i:47334)', shell=True)
- sp.wait()
- sp = subprocess.Popen('kill -9 $(lsof -t -i:47335)', shell=True)
- sp.wait()
- sp = subprocess.Popen('kill -9 $(lsof -t -i:47336)', shell=True)
- sp.wait()
+ conns = net_connections()
+ pids = [x.pid for x in conns
+ if x.pid is not None and x.status in ['LISTEN', 'CLOSE_WAIT']
+ and x.laddr[1] in (47334, 47335, 47336)]
+
+ for pid in pids:
+ try:
+ os.kill(pid, 9)
+ # process may be killed by OS due to some reasons in that moment
+ except ProcessLookupError:
+ pass
def run_environment(config, apis=['mysql'], override_integration_config={}, override_api_config={}, mindsdb_database='mindsdb', clear_storage=True):
diff --git a/tests/integration_tests/flows/test_mistakes.py b/tests/integration_tests/flows/test_mistakes.py
--- a/tests/integration_tests/flows/test_mistakes.py
+++ b/tests/integration_tests/flows/test_mistakes.py
@@ -65,17 +65,6 @@ def test_1_wrong_integration(self):
check_ds_not_exists(TEST_DS)
- # TODO creating DS from unexists integration raise not critical error in code.
- # need fix it and return human-readable error
- # related issue: https://github.com/mindsdb/mindsdb/issues/945
- # data = {
- # "integration_id": 'unexists_integration',
- # "name": TEST_DS,
- # "query": f"select * from test_data.{TEST_DATASET} limit 50;"
- # }
- # res = requests.put(f'{HTTP_API_ROOT}/datasources/{TEST_DS}', json=data)
- # assert res ?
-
# check create DS with wrong integration password
data = {
"integration_id": 'default_mariadb',
| Run tests on windows and OSX
Part of the reason we wanted tests with remote databases was to be able to test mindsdb on windows and OSX.
This is currently being done for native but not for mindsdb, current issues that stop us from testing on OSX and windows:
* `psutil.net_connections` requires root privileges on osx/windows
* `ssh` command will fail on windows
* ???
| 2020-12-22T16:27:12Z | [] | [] |
|
mindsdb/mindsdb | 1,458 | mindsdb__mindsdb-1458 | [
"1457"
] | 5e3da52edfd6e29eec16a2fdae5a6239994ce67b | diff --git a/mindsdb/api/http/namespaces/util.py b/mindsdb/api/http/namespaces/util.py
--- a/mindsdb/api/http/namespaces/util.py
+++ b/mindsdb/api/http/namespaces/util.py
@@ -43,15 +43,16 @@ def get(self):
}
for process_type in response:
- p = Path(tempfile.gettempdir()).joinpath(f'mindsdb/processes/{process_type}/')
- if not p.is_dir():
+ processes_dir = Path(tempfile.gettempdir()).joinpath(f'mindsdb/processes/{process_type}/')
+ if not processes_dir.is_dir():
continue
- pids = [int(x.name) for x in p.iterdir()]
- for pid in pids:
+ process_marks = [x.name for x in processes_dir.iterdir()]
+ for p_mark in process_marks:
+ pid = int(p_mark.split('-')[0])
try:
psutil.Process(pid)
except Exception:
- p.joinpath(str(pid)).unlink()
+ processes_dir.joinpath(p_mark).unlink()
else:
response[process_type] = True
diff --git a/mindsdb/interfaces/model/learn_process.py b/mindsdb/interfaces/model/learn_process.py
--- a/mindsdb/interfaces/model/learn_process.py
+++ b/mindsdb/interfaces/model/learn_process.py
@@ -2,9 +2,15 @@
import traceback
import tempfile
from pathlib import Path
+
+import pandas as pd
from pandas.core.frame import DataFrame
import torch.multiprocessing as mp
+import lightwood
from lightwood.api.types import ProblemDefinition
+from lightwood import __version__ as lightwood_version
+
+import mindsdb.interfaces.storage.db as db
from mindsdb.interfaces.database.database import DatabaseWrapper
from mindsdb.interfaces.model.model_interface import ModelInterface, ModelInterfaceWrapper
from mindsdb.interfaces.storage.db import session, Predictor
@@ -12,11 +18,7 @@
from mindsdb.interfaces.datastore.datastore import DataStore, DataStoreWrapper
from mindsdb.interfaces.storage.fs import FsStore
from mindsdb.utilities.config import Config
-from mindsdb.utilities.fs import create_process_mark, delete_process_mark
-import mindsdb.interfaces.storage.db as db
-import pandas as pd
-import lightwood
-from lightwood import __version__ as lightwood_version
+from mindsdb.utilities.functions import mark_process
from mindsdb.utilities.log import log
@@ -37,33 +39,30 @@ def delete_learn_mark():
p.unlink()
+@mark_process(name='learn')
def run_generate(df: DataFrame, problem_definition: ProblemDefinition, name: str, company_id: int, datasource_id: int) -> int:
- create_process_mark('learn')
- try:
- json_ai = lightwood.json_ai_from_problem(df, problem_definition)
- code = lightwood.code_from_json_ai(json_ai)
-
- predictor_record = db.Predictor(
- company_id=company_id,
- name=name,
- json_ai=json_ai.to_dict(),
- code=code,
- datasource_id=datasource_id,
- mindsdb_version=mindsdb_version,
- lightwood_version=lightwood_version,
- to_predict=[problem_definition.target],
- learn_args=problem_definition.to_dict(),
- data={'name': name}
- )
-
- db.session.add(predictor_record)
- db.session.commit()
- finally:
- delete_process_mark('learn')
-
-
+ json_ai = lightwood.json_ai_from_problem(df, problem_definition)
+ code = lightwood.code_from_json_ai(json_ai)
+
+ predictor_record = db.Predictor(
+ company_id=company_id,
+ name=name,
+ json_ai=json_ai.to_dict(),
+ code=code,
+ datasource_id=datasource_id,
+ mindsdb_version=mindsdb_version,
+ lightwood_version=lightwood_version,
+ to_predict=[problem_definition.target],
+ learn_args=problem_definition.to_dict(),
+ data={'name': name}
+ )
+
+ db.session.add(predictor_record)
+ db.session.commit()
+
+
+@mark_process(name='learn')
def run_fit(predictor_id: int, df: pd.DataFrame) -> None:
- create_process_mark('learn')
try:
predictor_record = session.query(db.Predictor).filter_by(id=predictor_id).first()
assert predictor_record is not None
@@ -96,8 +95,6 @@ def run_fit(predictor_id: int, df: pd.DataFrame) -> None:
predictor_record.data = {'error': f'{traceback.format_exc()}\nMain error: {e}'}
session.commit()
raise e
- finally:
- delete_process_mark('learn')
def run_learn(df: DataFrame, problem_definition: ProblemDefinition, name: str, company_id: int, datasource_id: int) -> None:
@@ -163,7 +160,7 @@ def run_update(name: str, company_id: int):
predictor_record.mindsdb_version = mindsdb_version
predictor_record.update_status = 'up_to_date'
session.commit()
-
+
except Exception as e:
log.error(e)
predictor_record.update_status = 'update_failed' # type: ignore
diff --git a/mindsdb/interfaces/model/model_controller.py b/mindsdb/interfaces/model/model_controller.py
--- a/mindsdb/interfaces/model/model_controller.py
+++ b/mindsdb/interfaces/model/model_controller.py
@@ -1,23 +1,23 @@
+import os
import sys
+import time
import psutil
import datetime
-import time
-import os
from copy import deepcopy
-from typing import Optional, Tuple, Union, Dict, Any
+from contextlib import contextmanager
from dateutil.parser import parse as parse_datetime
+from typing import Optional, Tuple, Union, Dict, Any
+import lightwood
from lightwood.api.types import ProblemDefinition
-import numpy as np
-from contextlib import contextmanager
from packaging import version
+import numpy as np
import pandas as pd
-import lightwood
import mindsdb_datasources
from mindsdb import __version__ as mindsdb_version
import mindsdb.interfaces.storage.db as db
-from mindsdb.utilities.fs import create_process_mark, delete_process_mark
+from mindsdb.utilities.functions import mark_process
from mindsdb.interfaces.database.database import DatabaseWrapper
from mindsdb.utilities.config import Config
from mindsdb.interfaces.storage.fs import FsStore
@@ -100,9 +100,8 @@ def _unpack_old_args(self, from_data: dict, kwargs: dict, to_predict: Optional[U
return df, problem_definition, join_learn_process
+ @mark_process(name='learn')
def learn(self, name: str, from_data: dict, to_predict: str, datasource_id: int, kwargs: dict, company_id: int) -> None:
- create_process_mark('learn')
-
df, problem_definition, join_learn_process = self._unpack_old_args(from_data, kwargs, to_predict)
p = LearnProcess(df, ProblemDefinition.from_dict(problem_definition), name, company_id, datasource_id)
p.start()
@@ -111,8 +110,8 @@ def learn(self, name: str, from_data: dict, to_predict: str, datasource_id: int,
if not IS_PY36:
p.close()
+ @mark_process(name='predict')
def predict(self, name: str, when_data: Union[dict, list, pd.DataFrame], pred_format: str, company_id: int):
- create_process_mark('predict')
original_name = name
name = f'{company_id}@@@@@{name}'
@@ -156,8 +155,6 @@ def predict(self, name: str, when_data: Union[dict, list, pd.DataFrame], pred_fo
# Bellow is useful for debugging caching and storage issues
# del self.predictor_cache[name]
- delete_process_mark('predict')
-
target = predictor_record.to_predict[0]
if pred_format in ('explain', 'dict', 'dict&explain'):
explain_arr = []
@@ -171,7 +168,8 @@ def predict(self, name: str, when_data: Union[dict, list, pd.DataFrame], pred_fo
'confidence_upper_bound': row.get('upper', None),
'anomaly': row.get('anomaly', None),
'truth': row.get('truth', None)
- }})
+ }
+ })
td = {'predicted_value': row['prediction']}
for col in df.columns:
@@ -196,12 +194,11 @@ def predict(self, name: str, when_data: Union[dict, list, pd.DataFrame], pred_fo
print("HERE!!")
return predictions
+ @mark_process(name='analyse')
def analyse_dataset(self, ds: dict, company_id: int) -> lightwood.DataAnalysis:
- create_process_mark('analyse')
ds_cls = getattr(mindsdb_datasources, ds['class'])
df = ds_cls(*ds['args'], **ds['kwargs']).df
analysis = lightwood.analyze_dataset(df)
- delete_process_mark('analyse')
return analysis.to_dict() # type: ignore
def get_model_data(self, name, company_id: int):
@@ -248,7 +245,7 @@ def get_model_data(self, name, company_id: int):
data['status'] = 'complete'
else:
data['status'] = 'error'
-
+
if data.get('accuracies', None) is not None:
if len(data['accuracies']) > 0:
data['accuracy'] = float(np.mean(list(data['accuracies'].values())))
@@ -297,8 +294,8 @@ def update_model(self, name: str, company_id: int):
p.start()
return 'Updated in progress'
+ @mark_process(name='learn')
def generate_predictor(self, name: str, from_data: dict, datasource_id, problem_definition_dict: dict, join_learn_process: bool, company_id: int):
- create_process_mark('learn')
df, problem_definition, _ = self._unpack_old_args(from_data, problem_definition_dict)
p = GenerateProcess(df, ProblemDefinition.from_dict(problem_definition), name, company_id, datasource_id)
p.start()
@@ -315,7 +312,7 @@ def edit_json_ai(self, name: str, json_ai: dict, company_id=None):
predictor_record.code = lightwood.code_from_json_ai(json_ai)
predictor_record.json_ai = json_ai.to_dict()
db.session.commit()
-
+
def code_from_json_ai(self, json_ai: dict, company_id=None):
json_ai = lightwood.JsonAI.from_dict(json_ai)
code = lightwood.code_from_json_ai(json_ai)
@@ -328,14 +325,14 @@ def edit_code(self, name: str, code: str, company_id=None):
predictor_record = db.session.query(db.Predictor).filter_by(company_id=company_id, name=name).first()
assert predictor_record is not None
-
+
lightwood.predictor_from_code(code)
predictor_record.code = code
predictor_record.json_ai = None
db.session.commit()
+ @mark_process(name='learn')
def fit_predictor(self, name: str, from_data: dict, join_learn_process: bool, company_id: int) -> None:
- create_process_mark('learn')
predictor_record = db.session.query(db.Predictor).filter_by(company_id=company_id, name=name).first()
assert predictor_record is not None
diff --git a/mindsdb/utilities/fs.py b/mindsdb/utilities/fs.py
--- a/mindsdb/utilities/fs.py
+++ b/mindsdb/utilities/fs.py
@@ -1,7 +1,8 @@
-import inspect
import os
-from pathlib import Path
import tempfile
+import threading
+from pathlib import Path
+
from appdirs import user_data_dir
@@ -41,15 +42,23 @@ def create_dirs_recursive(path):
raise ValueError(f'Wrong path: {path}')
+def _get_process_mark_id():
+ return f'{os.getpid()}-{threading.get_ident()}'
+
+
def create_process_mark(folder='learn'):
if os.name == 'posix':
p = Path(tempfile.gettempdir()).joinpath(f'mindsdb/processes/{folder}/')
p.mkdir(parents=True, exist_ok=True)
- p.joinpath(f'{os.getpid()}').touch()
+ p.joinpath(_get_process_mark_id()).touch()
def delete_process_mark(folder='learn'):
if os.name == 'posix':
- p = Path(tempfile.gettempdir()).joinpath(f'mindsdb/processes/{folder}/').joinpath(f'{os.getpid()}')
+ p = (
+ Path(tempfile.gettempdir())
+ .joinpath(f'mindsdb/processes/{folder}/')
+ .joinpath(_get_process_mark_id())
+ )
if p.exists():
p.unlink()
diff --git a/mindsdb/utilities/functions.py b/mindsdb/utilities/functions.py
--- a/mindsdb/utilities/functions.py
+++ b/mindsdb/utilities/functions.py
@@ -1,5 +1,8 @@
import argparse
import datetime
+from functools import wraps
+
+from mindsdb.utilities.fs import create_process_mark, delete_process_mark
def args_parse():
@@ -44,3 +47,16 @@ def is_notebook():
return False # Other type (?)
except NameError:
return False # Probably standard Python interpreter
+
+
+def mark_process(name):
+ def mark_process_wrapper(func):
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ create_process_mark(name)
+ try:
+ return func(*args, **kwargs)
+ finally:
+ delete_process_mark(name)
+ return wrapper
+ return mark_process_wrapper
| diff --git a/tests/integration_tests/flows/test_clickhouse.py b/tests/integration_tests/flows/test_clickhouse.py
--- a/tests/integration_tests/flows/test_clickhouse.py
+++ b/tests/integration_tests/flows/test_clickhouse.py
@@ -96,8 +96,8 @@ def setUpClass(cls):
INTEGRATION_NAME: {
'publish': True
}
- }
- ,'permanent_storage': {
+ },
+ 'permanent_storage': {
'location': 'local'
}
}
| 'predict' process will nether finish if exception raise in it
If exception like this happen during 'predict' (and, probably, learn) processes, then process will be live forever.
```
ERROR:mindsdb.api.http.initialize:Exception on /api/predictors/gui_only_predictor/predict [POST]
Traceback (most recent call last):
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "/usr/local/lib/python3.6/dist-packages/flask/app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "/usr/local/lib/python3.6/dist-packages/flask_restx/api.py", line 403, in wrapper
resp = resource(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/flask/views.py", line 89, in view
return self.dispatch_request(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/flask_restx/resource.py", line 49, in dispatch_request
resp = meth(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/mindsdb/api/http/namespaces/predictor.py", line 159, in post
results = request.model_interface.predict(name, when, 'explain')
File "/usr/local/lib/python3.6/dist-packages/mindsdb/interfaces/model/model_interface.py", line 12, in wrapper
return getattr(self.model_interface, name)(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/mindsdb/interfaces/model/model_interface.py", line 31, in predict
return self.controller.predict(*args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/mindsdb/interfaces/model/model_controller.py", line 150, in predict
predictions = self.predictor_cache[name]['predictor'].predict(df)
File "/tmp/7d78b6a185cdf38403eba6dc79f75e6b3b63bae73c11ce6f16317165199043515.py", line 181, in predict
encoded_data = encoded_ds.get_encoded_data(include_target=False)
File "/usr/local/lib/python3.6/dist-packages/lightwood/data/encoded_ds.py", line 89, in get_encoded_data
encoded_dfs.append(self.get_encoded_column_data(col))
File "/usr/local/lib/python3.6/dist-packages/lightwood/data/encoded_ds.py", line 78, in get_encoded_column_data
encoded_data = self.encoders[column_name].encode(self.data_frame[column_name], **kwargs)
File "/usr/local/lib/python3.6/dist-packages/lightwood/encoder/time_series/rnn.py", line 399, in encode
encoded = self._encode_one(val)
File "/usr/local/lib/python3.6/dist-packages/lightwood/encoder/time_series/rnn.py", line 313, in _encode_one
encoder_hidden)
File "/usr/local/lib/python3.6/dist-packages/lightwood/encoder/time_series/helpers/rnn_helpers.py", line 67, in forward
output, hidden = self.gru(input, hidden)
File "/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py", line 889, in _call_impl
result = self.forward(*input, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/torch/nn/modules/rnn.py", line 819, in forward
self.check_forward_args(input, hx, batch_sizes)
File "/usr/local/lib/python3.6/dist-packages/torch/nn/modules/rnn.py", line 226, in check_forward_args
self.check_input(input, batch_sizes)
File "/usr/local/lib/python3.6/dist-packages/torch/nn/modules/rnn.py", line 204, in check_input
self.input_size, input.size(-1)))
RuntimeError: input.size(-1) must be equal to input_size. Expected 15, got 14
```
| 2021-09-17T09:21:59Z | [] | [] |
|
mindsdb/mindsdb | 1,461 | mindsdb__mindsdb-1461 | [
"1460"
] | 7d7254149ef7af967e753708862b594d7b25aa65 | diff --git a/mindsdb/api/http/initialize.py b/mindsdb/api/http/initialize.py
--- a/mindsdb/api/http/initialize.py
+++ b/mindsdb/api/http/initialize.py
@@ -7,14 +7,11 @@
from zipfile import ZipFile
from pathlib import Path
import traceback
-from datetime import datetime, date, timedelta
import tempfile
# import concurrent.futures
-import numpy as np
from flask import Flask, url_for, make_response
from flask.json import dumps
from flask_restx import Api
-from flask.json import JSONEncoder
from mindsdb.__about__ import __version__ as mindsdb_version
from mindsdb.interfaces.datastore.datastore import DataStore
@@ -24,6 +21,7 @@
from mindsdb.utilities.config import Config
from mindsdb.utilities.log import get_log
from mindsdb.interfaces.storage.db import session
+from mindsdb.utilities.json_encoder import CustomJSONEncoder
class Swagger_Api(Api):
@@ -36,24 +34,6 @@ def specs_url(self):
return url_for(self.endpoint("specs"), _external=False)
-class CustomJSONEncoder(JSONEncoder):
- def default(self, obj):
- if isinstance(obj, date):
- return obj.strftime("%Y-%m-%d")
- if isinstance(obj, datetime):
- return obj.strftime("%Y-%m-%dT%H:%M:%S.%f")
- if isinstance(obj, timedelta):
- return str(obj)
- if isinstance(obj, np.bool_):
- return bool(obj)
- if isinstance(obj, np.int8) or isinstance(obj, np.int16) or isinstance(obj, np.int32) or isinstance(obj, np.int64):
- return int(obj)
- if isinstance(obj, np.float16) or isinstance(obj, np.float32) or isinstance(obj, np.float64) or isinstance(obj, np.float128):
- return float(obj)
-
- return JSONEncoder.default(self, obj)
-
-
def custom_output_json(data, code, headers=None):
resp = make_response(dumps(data), code)
resp.headers.extend(headers or {})
diff --git a/mindsdb/interfaces/datastore/datastore.py b/mindsdb/interfaces/datastore/datastore.py
--- a/mindsdb/interfaces/datastore/datastore.py
+++ b/mindsdb/interfaces/datastore/datastore.py
@@ -16,6 +16,7 @@
from mindsdb.interfaces.storage.fs import FsStore
from mindsdb.utilities.log import log
from mindsdb.interfaces.database.integrations import get_db_integration
+from mindsdb.utilities.json_encoder import CustomJSONEncoder
class DataStoreWrapper(object):
@@ -59,7 +60,7 @@ def start_analysis(self, name, company_id=None):
try:
analysis = self.model_interface.analyse_dataset(ds=self.get_datasource_obj(name, raw=True, company_id=company_id), company_id=company_id)
datasource_record = session.query(Datasource).filter_by(company_id=company_id, name=name).first()
- datasource_record.analysis = json.dumps(analysis)
+ datasource_record.analysis = json.dumps(analysis, cls=CustomJSONEncoder)
session.commit()
except Exception as e:
log.error(e)
diff --git a/mindsdb/streams/kafka/kafka_stream.py b/mindsdb/streams/kafka/kafka_stream.py
--- a/mindsdb/streams/kafka/kafka_stream.py
+++ b/mindsdb/streams/kafka/kafka_stream.py
@@ -1,8 +1,9 @@
import json
from copy import deepcopy
+
import kafka
-from mindsdb.api.http.initialize import CustomJSONEncoder
+from mindsdb.utilities.json_encoder import CustomJSONEncoder
from mindsdb.streams.base.base_stream import BaseStream
diff --git a/mindsdb/streams/redis/redis_stream.py b/mindsdb/streams/redis/redis_stream.py
--- a/mindsdb/streams/redis/redis_stream.py
+++ b/mindsdb/streams/redis/redis_stream.py
@@ -3,7 +3,7 @@
import walrus
from mindsdb.streams.base import BaseStream
-from mindsdb.api.http.initialize import CustomJSONEncoder
+from mindsdb.utilities.json_encoder import CustomJSONEncoder
class RedisStream(BaseStream):
diff --git a/mindsdb/utilities/json_encoder.py b/mindsdb/utilities/json_encoder.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/utilities/json_encoder.py
@@ -0,0 +1,22 @@
+from datetime import datetime, date, timedelta
+
+import numpy as np
+from flask.json import JSONEncoder
+
+
+class CustomJSONEncoder(JSONEncoder):
+ def default(self, obj):
+ if isinstance(obj, date):
+ return obj.strftime("%Y-%m-%d")
+ if isinstance(obj, datetime):
+ return obj.strftime("%Y-%m-%dT%H:%M:%S.%f")
+ if isinstance(obj, timedelta):
+ return str(obj)
+ if isinstance(obj, np.bool_):
+ return bool(obj)
+ if isinstance(obj, np.int8) or isinstance(obj, np.int16) or isinstance(obj, np.int32) or isinstance(obj, np.int64):
+ return int(obj)
+ if isinstance(obj, np.float16) or isinstance(obj, np.float32) or isinstance(obj, np.float64) or isinstance(obj, np.float128):
+ return float(obj)
+
+ return JSONEncoder.default(self, obj)
| diff --git a/tests/integration_tests/flows/test_user_flow_1.py b/tests/integration_tests/flows/test_user_flow_1.py
--- a/tests/integration_tests/flows/test_user_flow_1.py
+++ b/tests/integration_tests/flows/test_user_flow_1.py
@@ -171,6 +171,9 @@ def test_predictor(predictior_name, datasource_name):
check_predictor_exists(predictior_name)
+ import time
+ time.sleep(10)
+
wait_predictor_learn(predictior_name)
res = requests.post(
| Data analysis is not using custom json encoder for dump data
That is the reason why data analysis is not visible with lightwood 1.2
| 2021-09-20T15:05:06Z | [] | [] |
|
mindsdb/mindsdb | 1,471 | mindsdb__mindsdb-1471 | [
"1459"
] | a24cc2e337900f712046482c8e9fac00bf0e7a1d | diff --git a/mindsdb/api/http/namespaces/predictor.py b/mindsdb/api/http/namespaces/predictor.py
--- a/mindsdb/api/http/namespaces/predictor.py
+++ b/mindsdb/api/http/namespaces/predictor.py
@@ -1,4 +1,3 @@
-import time
from dateutil.parser import parse as parse_datetime
from flask import request
from flask_restx import Resource, abort
@@ -10,6 +9,7 @@
put_predictor_params
)
+
@ns_conf.route('/')
class PredictorList(Resource):
@ns_conf.doc('list_predictors')
@@ -18,6 +18,7 @@ def get(self):
models = request.model_interface.get_models()
return models
+
@ns_conf.route('/<name>')
@ns_conf.param('name', 'The predictor identifier')
@ns_conf.response(404, 'predictor not found')
@@ -89,13 +90,6 @@ def put(self, name):
)
request.model_interface.learn(name, from_data, to_predict, request.default_store.get_datasource(ds_name)['id'], kwargs=kwargs)
- for i in range(20):
- try:
- # Dirty hack, we should use a messaging queue between the predictor process and this bit of the code
- request.model_interface.get_model_data(name)
- break
- except Exception:
- time.sleep(1)
if retrain is True:
try:
@@ -231,7 +225,7 @@ class PredictorEditCode(Resource):
def put(self, name):
request.model_interface.edit_code(name, request.json['code'])
return '', 200
-
+
@ns_conf.route('/<name>/train')
@ns_conf.param('name', 'The predictor identifier')
diff --git a/mindsdb/interfaces/model/learn_process.py b/mindsdb/interfaces/model/learn_process.py
--- a/mindsdb/interfaces/model/learn_process.py
+++ b/mindsdb/interfaces/model/learn_process.py
@@ -40,31 +40,20 @@ def delete_learn_mark():
@mark_process(name='learn')
-def run_generate(df: DataFrame, problem_definition: ProblemDefinition, name: str, company_id: int, datasource_id: int) -> int:
+def run_generate(df: DataFrame, problem_definition: ProblemDefinition, predictor_id: int) -> int:
json_ai = lightwood.json_ai_from_problem(df, problem_definition)
code = lightwood.code_from_json_ai(json_ai)
- predictor_record = db.Predictor(
- company_id=company_id,
- name=name,
- json_ai=json_ai.to_dict(),
- code=code,
- datasource_id=datasource_id,
- mindsdb_version=mindsdb_version,
- lightwood_version=lightwood_version,
- to_predict=[problem_definition.target],
- learn_args=problem_definition.to_dict(),
- data={'name': name}
- )
-
- db.session.add(predictor_record)
+ predictor_record = Predictor.query.get(predictor_id)
+ predictor_record.json_ai = json_ai.to_dict()
+ predictor_record.code = code
db.session.commit()
@mark_process(name='learn')
def run_fit(predictor_id: int, df: pd.DataFrame) -> None:
try:
- predictor_record = session.query(db.Predictor).filter_by(id=predictor_id).first()
+ predictor_record = Predictor.query.get(predictor_id)
assert predictor_record is not None
fs_store = FsStore()
@@ -97,11 +86,9 @@ def run_fit(predictor_id: int, df: pd.DataFrame) -> None:
raise e
-def run_learn(df: DataFrame, problem_definition: ProblemDefinition, name: str, company_id: int, datasource_id: int) -> None:
- run_generate(df, problem_definition, name, company_id, datasource_id)
- predictor_record = db.session.query(db.Predictor).filter_by(company_id=company_id, name=name).first()
- assert predictor_record is not None
- run_fit(predictor_record.id, df)
+def run_learn(df: DataFrame, problem_definition: ProblemDefinition, predictor_id: int) -> None:
+ run_generate(df, problem_definition, predictor_id)
+ run_fit(predictor_id, df)
def run_adjust(name, db_name, from_data, datasource_id, company_id):
@@ -156,7 +143,7 @@ def run_update(name: str, company_id: int):
predictor_record.data = predictor.model_analysis.to_dict() # type: ignore
session.commit()
- predictor_record.lightwood_version = lightwood.__version__
+ predictor_record.lightwood_version = lightwood_version
predictor_record.mindsdb_version = mindsdb_version
predictor_record.update_status = 'up_to_date'
session.commit()
diff --git a/mindsdb/interfaces/model/model_controller.py b/mindsdb/interfaces/model/model_controller.py
--- a/mindsdb/interfaces/model/model_controller.py
+++ b/mindsdb/interfaces/model/model_controller.py
@@ -10,10 +10,13 @@
import lightwood
from lightwood.api.types import ProblemDefinition
+from lightwood import __version__ as lightwood_version
+from packaging import version
import numpy as np
import pandas as pd
import mindsdb_datasources
+from mindsdb import __version__ as mindsdb_version
import mindsdb.interfaces.storage.db as db
from mindsdb.utilities.functions import mark_process
from mindsdb.interfaces.database.database import DatabaseWrapper
@@ -104,12 +107,45 @@ def _unpack_old_args(self, from_data: dict, kwargs: dict, to_predict: Optional[U
@mark_process(name='learn')
def learn(self, name: str, from_data: dict, to_predict: str, datasource_id: int, kwargs: dict, company_id: int) -> None:
df, problem_definition, join_learn_process = self._unpack_old_args(from_data, kwargs, to_predict)
- p = LearnProcess(df, ProblemDefinition.from_dict(problem_definition), name, company_id, datasource_id)
+
+ problem_definition = ProblemDefinition.from_dict(problem_definition)
+ predictor_record = db.Predictor(
+ company_id=company_id,
+ name=name,
+ datasource_id=datasource_id,
+ mindsdb_version=mindsdb_version,
+ lightwood_version=lightwood_version,
+ to_predict=problem_definition.target,
+ learn_args=problem_definition.to_dict(),
+ data={'name': name}
+ )
+
+ db.session.add(predictor_record)
+ db.session.commit()
+ predictor_id = predictor_record.id
+
+ p = LearnProcess(df, problem_definition, predictor_id)
p.start()
if join_learn_process:
p.join()
if not IS_PY36:
p.close()
+ db.session.refresh(predictor_record)
+
+ data = {}
+ if predictor_record.update_status == 'available':
+ data['status'] = 'complete'
+ elif predictor_record.json_ai is None and predictor_record.code is None:
+ data['status'] = 'generating'
+ elif predictor_record.data is None:
+ data['status'] = 'editable'
+ elif 'training_log' in predictor_record.data:
+ data['status'] = 'training'
+ elif 'error' not in predictor_record.data:
+ data['status'] = 'complete'
+ else:
+ data['status'] = 'error'
+ print(f'!!!!===== {name} learn finished status={ data["status"]}')
@mark_process(name='predict')
def predict(self, name: str, when_data: Union[dict, list, pd.DataFrame], pred_format: str, company_id: int):
@@ -136,6 +172,7 @@ def predict(self, name: str, when_data: Union[dict, list, pd.DataFrame], pred_fo
'pickle': str(os.path.join(self.config['paths']['predictors'], fs_name))
}
else:
+ print(f'===== {name} predict={ predictor_record.data is None}')
raise Exception(f'Trying to predict using predictor {original_name} with status: {predictor_data["status"]}')
if isinstance(when_data, dict) and 'kwargs' in when_data and 'args' in when_data:
@@ -286,12 +323,31 @@ def update_model(self, name: str, company_id: int):
@mark_process(name='learn')
def generate_predictor(self, name: str, from_data: dict, datasource_id, problem_definition_dict: dict, join_learn_process: bool, company_id: int):
df, problem_definition, _ = self._unpack_old_args(from_data, problem_definition_dict)
- p = GenerateProcess(df, ProblemDefinition.from_dict(problem_definition), name, company_id, datasource_id)
+
+ problem_definition = ProblemDefinition.from_dict(problem_definition)
+
+ predictor_record = db.Predictor(
+ company_id=company_id,
+ name=name,
+ datasource_id=datasource_id,
+ mindsdb_version=mindsdb_version,
+ lightwood_version=lightwood_version,
+ to_predict=problem_definition.target,
+ learn_args=problem_definition.to_dict(),
+ data={'name': name}
+ )
+
+ db.session.add(predictor_record)
+ db.session.commit()
+ predictor_id = predictor_record.id
+
+ p = GenerateProcess(df, problem_definition, predictor_id)
p.start()
if join_learn_process:
p.join()
if not IS_PY36:
p.close()
+ db.session.refresh(predictor_record)
def edit_json_ai(self, name: str, json_ai: dict, company_id=None):
predictor_record = db.session.query(db.Predictor).filter_by(company_id=company_id, name=name).first()
diff --git a/mindsdb/streams/stream_controller.py b/mindsdb/streams/stream_controller.py
--- a/mindsdb/streams/stream_controller.py
+++ b/mindsdb/streams/stream_controller.py
@@ -1,4 +1,3 @@
-from copy import deepcopy
import os
from threading import Event, Thread
from time import time
@@ -19,7 +18,7 @@ def __init__(self, name, predictor, stream_in, stream_out, anomaly_stream=None,
self.stream_in = stream_in
self.stream_out = stream_out
self.anomaly_stream = anomaly_stream
-
+
self.learning_stream = learning_stream
self.learning_threshold = learning_threshold
self.learning_data = []
@@ -36,8 +35,8 @@ def __init__(self, name, predictor, stream_in, stream_out, anomaly_stream=None,
self.target = p.to_predict[0]
- ts_settings = p.learn_args.get('timeseries_settings', None)
- if not ts_settings['is_timeseries']:
+ ts_settings = p.learn_args.get('timeseries_settings')
+ if isinstance(ts_settings, dict) is False or len(ts_settings) == 0:
ts_settings = None
if ts_settings is None:
| diff --git a/tests/integration_tests/flows/test_redis.py b/tests/integration_tests/flows/test_redis.py
--- a/tests/integration_tests/flows/test_redis.py
+++ b/tests/integration_tests/flows/test_redis.py
@@ -71,11 +71,11 @@ def test_length(self):
def upload_ds(self, name):
df = pd.DataFrame({
- 'group': ["A" for _ in range(100, 210)],
- 'order': [x for x in range(100, 210)],
- 'x1': [x for x in range(100,210)],
- 'x2': [x*2 for x in range(100,210)],
- 'y': [x*3 for x in range(100,210)]
+ 'group': ["A" for _ in range(100, 250)],
+ 'order': [x for x in range(100, 250)],
+ 'x1': [x for x in range(100, 250)],
+ 'x2': [x * 2 for x in range(100, 250)],
+ 'y': [x * 3 for x in range(100, 250)]
})
with tempfile.NamedTemporaryFile(mode='w+', newline='', delete=False) as f:
df.to_csv(f, index=False)
@@ -127,7 +127,7 @@ def train_ts_predictor(self, ds_name, predictor_name, with_gb=True):
url = f'{HTTP_API_ROOT}/predictors/{predictor_name}'
res = requests.put(url, json=params)
res.raise_for_status()
-
+
def test_1_create_integration(self):
print(f"\nExecuting {self._testMethodName}")
url = f'{HTTP_API_ROOT}/config/integrations/{INTEGRATION_NAME}'
@@ -138,12 +138,11 @@ def test_1_create_integration(self):
res = requests.put(url, json={"params": params})
self.assertEqual(res.status_code, 200)
-
+
def test_2_create_redis_stream(self):
print(f"\nExecuting {self._testMethodName}")
self.upload_ds(DS_NAME)
self.train_predictor(DS_NAME, DEFAULT_PREDICTOR)
- time.sleep(30)
url = f'{HTTP_API_ROOT}/streams/{NORMAL_STREAM_NAME}'
res = requests.put(url, json={
@@ -189,8 +188,8 @@ def test_5_making_ts_stream_prediction(self):
stream_in = RedisStream(STREAM_IN_TS, CONNECTION_PARAMS)
stream_out = RedisStream(STREAM_OUT_TS, CONNECTION_PARAMS)
- for x in range(210, 221):
- stream_in.write({'x1': x, 'x2': 2*x, 'order': x, 'group': "A", 'y': 3*x})
+ for x in range(230, 241):
+ stream_in.write({'x1': x, 'x2': 2 * x, 'order': x, 'group': "A", 'y': 3 * x})
time.sleep(0.01)
time.sleep(10)
@@ -214,7 +213,7 @@ def test_6_create_stream_redis_native_api(self):
stream_out = RedisStream(STREAM_OUT_NATIVE, CONNECTION_PARAMS)
for x in range(1, 3):
- stream_in.write({'x1': x, 'x2': 2*x})
+ stream_in.write({'x1': x, 'x2': 2 * x})
time.sleep(5)
time.sleep(30)
@@ -262,7 +261,7 @@ def test_9_making_ts_stream_prediction_no_group(self):
stream_out = RedisStream(STREAM_OUT, CONNECTION_PARAMS)
for x in range(210, 221):
- stream_in.write({'x1': x, 'x2': 2*x, 'order': x, 'y': 3*x})
+ stream_in.write({'x1': x, 'x2': 2*x, 'order': x, 'y': 3 * x})
time.sleep(5)
self.assertEqual(len(list(stream_out.read())), 2)
| `PUT predictor` must not return answer while record in db is not created.
| 2021-09-28T10:39:08Z | [] | [] |
|
mindsdb/mindsdb | 1,601 | mindsdb__mindsdb-1601 | [
"1564"
] | 227f56902a60b97c0f149cce85974562f34e93e9 | diff --git a/mindsdb/__about__.py b/mindsdb/__about__.py
--- a/mindsdb/__about__.py
+++ b/mindsdb/__about__.py
@@ -1,6 +1,6 @@
__title__ = 'MindsDB'
__package_name__ = 'mindsdb'
-__version__ = '2.54.0'
+__version__ = '2.55.0'
__description__ = "MindsDB server, provides server capabilities to mindsdb native python library"
__email__ = "[email protected]"
__author__ = 'MindsDB Inc'
diff --git a/mindsdb/__main__.py b/mindsdb/__main__.py
--- a/mindsdb/__main__.py
+++ b/mindsdb/__main__.py
@@ -8,6 +8,7 @@
import psutil
import torch.multiprocessing as mp
+from packaging import version
from mindsdb.utilities.config import Config, STOP_THREADS_EVENT
from mindsdb.interfaces.model.model_interface import ray_based, ModelInterface, ModelInterfaceWrapper
@@ -15,9 +16,10 @@
from mindsdb.api.mysql.start import start as start_mysql
from mindsdb.api.mongo.start import start as start_mongo
from mindsdb.utilities.ps import is_pid_listen_port, get_child_pids
-from mindsdb.utilities.functions import args_parse
+from mindsdb.utilities.functions import args_parse, get_versions_where_predictors_become_obsolete
from mindsdb.interfaces.database.database import DatabaseWrapper
from mindsdb.utilities.log import log
+import mindsdb.interfaces.storage.db as db
from mindsdb.interfaces.database.integrations import get_db_integrations
@@ -83,6 +85,28 @@ def close_api_gracefully(apis):
is_cloud = config.get('cloud', False)
if not is_cloud:
+ # region Mark old predictors as outdated
+ is_modified = False
+ predictor_records = db.session.query(db.Predictor).all()
+ if len(predictor_records) > 0:
+ sucess, compatible_versions = get_versions_where_predictors_become_obsolete()
+ if sucess is True:
+ compatible_versions = [version.parse(x) for x in compatible_versions]
+ mindsdb_version_parsed = version.parse(mindsdb_version)
+ compatible_versions = [x for x in compatible_versions if x <= mindsdb_version_parsed]
+ if len(compatible_versions) > 0:
+ last_compatible_version = compatible_versions[-1]
+ for predictor_record in predictor_records:
+ if (
+ isinstance(predictor_record.mindsdb_version, str) is not None
+ and version.parse(predictor_record.mindsdb_version) < last_compatible_version
+ ):
+ predictor_record.update_status = 'available'
+ is_modified = True
+ if is_modified is True:
+ db.session.commit()
+ # endregion
+
for integration_name in get_db_integrations(COMPANY_ID, sensitive_info=True):
print(f"Setting up integration: {integration_name}")
if get_db_integration(integration_name, COMPANY_ID).get('publish', False):
diff --git a/mindsdb/api/http/namespaces/config.py b/mindsdb/api/http/namespaces/config.py
--- a/mindsdb/api/http/namespaces/config.py
+++ b/mindsdb/api/http/namespaces/config.py
@@ -224,8 +224,7 @@ def get(self):
else:
mongo = True
- #cloud = ca.config_obj.get('cloud', False)
- cloud = True
+ cloud = ca.config_obj.get('cloud', False)
local_time = datetime.datetime.now(tzlocal())
local_timezone = local_time.tzname()
diff --git a/mindsdb/api/http/namespaces/datasource.py b/mindsdb/api/http/namespaces/datasource.py
--- a/mindsdb/api/http/namespaces/datasource.py
+++ b/mindsdb/api/http/namespaces/datasource.py
@@ -71,7 +71,11 @@ def delete(self, name):
request.default_store.delete_datasource(name)
except Exception as e:
log.error(e)
- abort(400, str(e))
+ return http_error(
+ 400,
+ f"Error deleting datasource",
+ f"There was an error while tring to delete datasource with name '{name}'"
+ )
return '', 200
@ns_conf.doc('put_datasource', params=put_datasource_params)
diff --git a/mindsdb/api/http/namespaces/predictor.py b/mindsdb/api/http/namespaces/predictor.py
--- a/mindsdb/api/http/namespaces/predictor.py
+++ b/mindsdb/api/http/namespaces/predictor.py
@@ -1,4 +1,3 @@
-import time
from dateutil.parser import parse as parse_datetime
from flask import request
from flask_restx import Resource, abort
@@ -10,6 +9,7 @@
put_predictor_params
)
+
@ns_conf.route('/')
class PredictorList(Resource):
@ns_conf.doc('list_predictors')
@@ -18,6 +18,7 @@ def get(self):
models = request.model_interface.get_models()
return models
+
@ns_conf.route('/<name>')
@ns_conf.param('name', 'The predictor identifier')
@ns_conf.response(404, 'predictor not found')
@@ -89,13 +90,6 @@ def put(self, name):
)
request.model_interface.learn(name, from_data, to_predict, request.default_store.get_datasource(ds_name)['id'], kwargs=kwargs)
- for i in range(20):
- try:
- # Dirty hack, we should use a messaging queue between the predictor process and this bit of the code
- request.model_interface.get_model_data(name)
- break
- except Exception:
- time.sleep(1)
if retrain is True:
try:
@@ -231,7 +225,7 @@ class PredictorEditCode(Resource):
def put(self, name):
request.model_interface.edit_code(name, request.json['code'])
return '', 200
-
+
@ns_conf.route('/<name>/train')
@ns_conf.param('name', 'The predictor identifier')
diff --git a/mindsdb/api/http/namespaces/stream.py b/mindsdb/api/http/namespaces/stream.py
--- a/mindsdb/api/http/namespaces/stream.py
+++ b/mindsdb/api/http/namespaces/stream.py
@@ -66,8 +66,11 @@ def put(self, name):
))
else:
+ # cloud
if 'type' not in params_keys:
return abort(404, "'type' parameter is required in case of cloud.")
+ #because '_' is not allowed in pod name - replace it.
+ name=name.replace('_', '-')
if db.session.query(db.Stream).filter_by(company_id=request.company_id, name=name).first() is not None:
return abort(404, 'Stream "{}" already exists'.format(name))
diff --git a/mindsdb/api/mysql/mysql_proxy/classes/com_operators.py b/mindsdb/api/mysql/mysql_proxy/classes/com_operators.py
--- a/mindsdb/api/mysql/mysql_proxy/classes/com_operators.py
+++ b/mindsdb/api/mysql/mysql_proxy/classes/com_operators.py
@@ -1,13 +1,6 @@
-from moz_sql_parser.keywords import join_keywords, binary_ops, unary_ops
import re
import operator
-unary_ops = list(unary_ops.values())
-binary_ops = list(binary_ops.values())
-
-unary_ops.extend(['missing', 'exists'])
-binary_ops.extend(['like', 'in', 'between'])
-
def f_and(*args):
for i in args:
@@ -38,41 +31,39 @@ def f_add(*args):
out += i
return out
-def f_ne(a,b):
- if a is None or b is None: return False
- return operator.ne(a,b)
-def f_eq(a,b):
- if a is None or b is None: return False
- return operator.eq(a,b)
+def f_ne(a, b):
+ if a is None or b is None:
+ return False
+ return operator.ne(a, b)
+
+
+def f_eq(a, b):
+ if a is None or b is None:
+ return False
+ return operator.eq(a, b)
+
operator_map = {
- 'concat': operator.concat,
- 'mul': operator.mul,
- 'div': operator.truediv,
- 'mod': operator.mod,
- 'add': f_add,
- 'sub': operator.sub,
- 'binary_and': operator.and_,
- 'binary_or': operator.or_,
- 'lt': operator.lt,
- 'lte': operator.le,
- 'gt': operator.gt,
- 'gte': operator.ge,
- 'eq': f_eq, # operator.eq,
- 'neq': f_ne, # operator.ne,
- 'nin': lambda v, l: v not in l,
- 'in': lambda v, l: v in l,
- 'nlike': lambda s, p: not f_like(s, p),
- 'like': f_like,
- 'not_between': lambda v, a, b: v < a or v > b,
- 'between': lambda v, a, b: v > a and v < b,
- 'or': f_or,
- 'and': f_and,
-
- 'missing': lambda x: x is None,
- 'exists': lambda x: x is not None,
-
- "neg": operator.neg,
- "binary_not": operator.inv,
+ '+': f_add,
+ '-': operator.sub,
+ '/': operator.truediv,
+ '*': operator.mul,
+ '%': operator.mod,
+ '=': f_eq,
+ '!=': f_ne,
+ '>': operator.gt,
+ '<': operator.lt,
+ '>=': operator.ge,
+ '<=': operator.le,
+ 'IS': operator.eq,
+ 'IS NOT': operator.ne,
+ 'LIKE': f_like,
+ 'NOT LIKE': lambda s, p: not f_like(s, p),
+ 'IN': lambda v, l: v in l,
+ 'NOT IN': lambda v, l: v not in l,
+ 'AND': f_and,
+ 'OR': f_or,
+ '||': f_add
+ # binary and, binary not, exists, missing, etc
}
diff --git a/mindsdb/api/mysql/mysql_proxy/classes/com_operators_new.py b/mindsdb/api/mysql/mysql_proxy/classes/com_operators_new.py
deleted file mode 100644
--- a/mindsdb/api/mysql/mysql_proxy/classes/com_operators_new.py
+++ /dev/null
@@ -1,108 +0,0 @@
-# from moz_sql_parser.keywords import join_keywords, binary_ops, unary_ops
-import re
-import operator
-
-# unary_ops = list(unary_ops.values())
-# binary_ops = list(binary_ops.values())
-
-# unary_ops.extend(['missing', 'exists'])
-# binary_ops.extend(['like', 'in', 'between'])
-
-
-def f_and(*args):
- for i in args:
- if not i:
- return False
- return True
-
-
-def f_or(*args):
- for i in args:
- if i:
- return True
- return False
-
-
-def f_like(s, p):
- p = '^{}$'.format(p.replace('%', '[\s\S]*'))
-
- return re.match(p, s) is not None
-
-
-def f_add(*args):
- # strings and numbers are supported
- # maybe it is not true sql-way
-
- out = args[0] + args[1]
- for i in args[2:]:
- out += i
- return out
-
-
-def f_ne(a, b):
- if a is None or b is None:
- return False
- return operator.ne(a, b)
-
-
-def f_eq(a, b):
- if a is None or b is None:
- return False
- return operator.eq(a, b)
-
-
-operator_map = {
- '+': f_add,
- '-': operator.sub,
- '/': operator.truediv,
- '*': operator.mul,
- '%': operator.mod,
- '=': f_eq,
- '!=': f_ne,
- '>': operator.gt,
- '<': operator.lt,
- '>=': operator.ge,
- '<=': operator.le,
- 'IS': operator.eq,
- 'IS NOT': operator.ne,
- 'LIKE': f_like,
- 'NOT LIKE': lambda s, p: not f_like(s, p),
- 'IN': lambda v, l: v in l,
- 'NOT IN': lambda v, l: v not in l,
- 'AND': f_and,
- 'OR': f_or,
- '||': f_add
- # binary and, binary not, exists, missing, etc
-}
-
-
-# operator_map_old = {
-# 'concat': operator.concat,
-# 'mul': operator.mul,
-# 'div': operator.truediv,
-# 'mod': operator.mod,
-# 'add': f_add,
-# 'sub': operator.sub,
-# 'binary_and': operator.and_,
-# 'binary_or': operator.or_,
-# 'lt': operator.lt,
-# 'lte': operator.le,
-# 'gt': operator.gt,
-# 'gte': operator.ge,
-# 'eq': f_eq, # operator.eq,
-# 'neq': f_ne, # operator.ne,
-# 'nin': lambda v, l: v not in l,
-# 'in': lambda v, l: v in l,
-# 'nlike': lambda s, p: not f_like(s, p),
-# 'like': f_like,
-# 'not_between': lambda v, a, b: v < a or v > b,
-# 'between': lambda v, a, b: v > a and v < b,
-# 'or': f_or,
-# 'and': f_and,
-
-# 'missing': lambda x: x is None,
-# 'exists': lambda x: x is not None,
-
-# "neg": operator.neg,
-# "binary_not": operator.inv,
-# }
diff --git a/mindsdb/api/mysql/mysql_proxy/classes/sql_query.py b/mindsdb/api/mysql/mysql_proxy/classes/sql_query.py
--- a/mindsdb/api/mysql/mysql_proxy/classes/sql_query.py
+++ b/mindsdb/api/mysql/mysql_proxy/classes/sql_query.py
@@ -43,7 +43,7 @@
JoinStep
)
-from mindsdb.api.mysql.mysql_proxy.classes.com_operators_new import operator_map as new_operator_map
+from mindsdb.api.mysql.mysql_proxy.classes.com_operators import operator_map
from mindsdb.api.mysql.mysql_proxy.libs.constants.mysql import TYPES, ERR
from mindsdb.api.mysql.mysql_proxy.utilities import log
from mindsdb.interfaces.ai_table.ai_table import AITableStore
@@ -176,10 +176,15 @@ def _fetch_dataframe_step(self, step):
table_alias = get_table_alias(step.query.from_table, self.database)
# TODO for information_schema we have 'database' = 'mindsdb'
- data = dn.select_query(
+ data, column_names = dn.select(
query=query
)
+ self.columns_list = [
+ table_alias + (column_name, column_name)
+ for column_name in column_names
+ ]
+
for i, row in enumerate(data):
row['__mindsdb_row_id'] = self.row_id + i
self.row_id = self.row_id + len(data)
@@ -223,7 +228,11 @@ def _parse_query(self, sql):
sql = sql[:sql.lower().find('where 1=0')] + ' limit 0'
is_crutch = True
# ---
- mindsdb_sql_struct = parse_sql(sql, dialect='mindsdb')
+
+ # +++ https://github.com/mindsdb/mindsdb_sql/issues/64
+ str_sql = sql.replace(' status ', ' `status` ')
+ # ---
+ mindsdb_sql_struct = parse_sql(str_sql, dialect='mindsdb')
# is it query with only constants?
if (
@@ -335,8 +344,9 @@ def _parse_query(self, sql):
values = []
for row in step_data:
for row_data in row.values():
- for v in row_data.values():
- values.append(v)
+ for name, value in row_data.items():
+ if name != '__mindsdb_row_id':
+ values.append(value)
data = []
substep = step.step
@@ -416,8 +426,7 @@ def _parse_query(self, sql):
columns=None,
where_data=where_data,
integration_name=self.session.integration,
- integration_type=self.session.integration_type,
- is_timeseries=_mdb_make_predictions
+ integration_type=self.session.integration_type
)
data = [{get_preditor_alias(step, self.database): x} for x in data]
elif isinstance(step, JoinStep):
@@ -611,7 +620,7 @@ def _apply_where_filter(self, row, where):
elif not isinstance(where, (UnaryOperation, BinaryOperation)):
Exception(f'Unknown operation type: {where}')
- op_fn = new_operator_map.get(where.op)
+ op_fn = operator_map.get(where.op)
if op_fn is None:
raise Exception(f'unknown operator {where.op}')
diff --git a/mindsdb/api/mysql/mysql_proxy/classes/sql_statement_parser.py b/mindsdb/api/mysql/mysql_proxy/classes/sql_statement_parser.py
--- a/mindsdb/api/mysql/mysql_proxy/classes/sql_statement_parser.py
+++ b/mindsdb/api/mysql/mysql_proxy/classes/sql_statement_parser.py
@@ -1,3 +1,6 @@
+import re
+import json
+
from pyparsing import (
CaselessKeyword,
ParseException,
@@ -23,10 +26,10 @@
alphas,
nums
)
-import re
-import json
-
-import moz_sql_parser as moz_sql
+from mindsdb_sql.parser.ast import (
+ Join
+)
+from mindsdb_sql import parse_sql
RE_INT = re.compile(r'^[-+]?([1-9]\d*|0)$')
@@ -73,6 +76,8 @@ def __init__(self, text, init_parse=True):
self._struct = self.parse_as_insert()
elif self._keyword == 'delete':
self._struct = self.parse_as_delete()
+ elif self._keyword == 'drop':
+ self._struct = None
elif self._keyword == 'create_predictor':
self._struct = self.parse_as_create_predictor()
elif self._keyword in 'create_ai_table':
@@ -136,12 +141,13 @@ def get_keyword(sql):
rollback
commit
explain
+ drop
create_predictor
create_ai_table
'''
- START, SET, USE, SHOW, DELETE, INSERT, UPDATE, ALTER, SELECT, ROLLBACK, COMMIT, EXPLAIN, CREATE, AI, TABLE, PREDICTOR, VIEW = map(
- CaselessKeyword, "START SET USE SHOW DELETE INSERT UPDATE ALTER SELECT ROLLBACK COMMIT EXPLAIN CREATE AI TABLE PREDICTOR VIEW".split()
+ START, SET, USE, SHOW, DELETE, INSERT, UPDATE, ALTER, SELECT, ROLLBACK, COMMIT, EXPLAIN, CREATE, AI, TABLE, PREDICTOR, VIEW, DROP = map(
+ CaselessKeyword, "START SET USE SHOW DELETE INSERT UPDATE ALTER SELECT ROLLBACK COMMIT EXPLAIN CREATE AI TABLE PREDICTOR VIEW DROP".split()
)
CREATE_PREDICTOR = CREATE + PREDICTOR
CREATE_AI_TABLE = CREATE + AI + TABLE
@@ -153,7 +159,7 @@ def get_keyword(sql):
| UPDATE | ALTER | SELECT
| ROLLBACK | COMMIT | EXPLAIN
| CREATE_PREDICTOR | CREATE_AI_TABLE
- | CREATE_VIEW
+ | CREATE_VIEW | DROP
)('keyword')
r = expr.parseString(sql)
@@ -243,55 +249,39 @@ def parse_as_create_ai_table(self) -> dict:
r['select'] = r['select'][1:-1]
r['select'] = r['select'].strip(' \n')
- select = moz_sql.parse(r['select'])
+ select = parse_sql(r['select'])
- if 'from' not in select \
- or len(select['from']) != 2 \
- or 'join' not in select['from'][1]:
+ if isinstance(select.from_table, Join) is False:
raise Exception("'from' must be like: 'from integration.table join predictor'")
- # add 'name' to each statement
- for s in [*select['select'], select['from'][0], select['from'][1]['join']]:
- if 'name' not in s:
- if '.' in s['value']:
- s['name'] = s['value'][s['value'].find('.') + 1:]
- else:
- s['name'] = s['value']
-
- f = {
- 'integration': select['from'][0],
- 'predictor': select['from'][1]['join']
- }
+ integration_name = select.from_table.left.parts[0]
+ select.from_table.left.parts = select.from_table.left.parts[1:]
+ integration_name_alias = select.from_table.left.alias.parts[0]
- # remove predictor join
- select['from'].pop()
+ predictor_name = select.from_table.right.parts[0]
+ predictor_name_alias = select.from_table.right.alias.parts[0]
+ select.from_table = select.from_table.left
- new_select = []
+ query_fields = []
predictor_fields = []
- integration_prefix = f"{f['integration']['name']}."
- for s in select['select']:
- if s['value'].startswith(integration_prefix):
- s['value'] = s['value'][len(integration_prefix):]
- new_select.append(s)
- else:
- predictor_fields.append(s)
+ predictor_fields_targets = []
- predictor_prefix = f"{f['predictor']['name']}."
- for pf in predictor_fields:
- if pf['value'].startswith(predictor_prefix):
- pf['value'] = pf['value'][len(predictor_prefix):]
+ integration_sql = str(select)
- integration_name = f['integration']['value'][:f['integration']['value'].find('.')]
- f['integration']['value'] = f['integration']['value'][len(integration_name) + 1:]
- select['select'] = new_select
- integration_sql = moz_sql.format(select)
+ for target in select.targets:
+ if target.parts[0] == integration_name_alias:
+ query_fields.append(target.parts[1])
+ predictor_fields_targets.append(target)
+ elif target.parts[0] == predictor_name_alias:
+ predictor_fields.append(target.parts[1])
+ select.targets = predictor_fields_targets
res = {
'ai_table_name': r['ai_table_name'],
'integration_name': integration_name,
'integration_query': integration_sql,
- 'query_fields': select['select'],
- 'predictor_name': f['predictor']['value'],
+ 'query_fields': query_fields,
+ 'predictor_name': predictor_name,
'predictor_fields': predictor_fields
}
@@ -579,9 +569,11 @@ def check_recursive(a, b):
''',
{
'ai_table_name': 'ai_table_name',
- 'integration_name': 'integration',
- 'select': 'select * from table',
- 'predictor_name': 'model_name'
+ 'integration_name': 'integration_name',
+ 'integration_query': 'SELECT a.col1, a.col2, a.col3, p.col3 AS pred_col3 FROM table_name AS a',
+ 'query_fields': ['col1', 'col2', 'col3'],
+ 'predictor_name': 'predictor_name',
+ 'predictor_fields': ['col3']
}
]]
for sql, result in tests:
diff --git a/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/datasource_datanode.py b/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/datasource_datanode.py
--- a/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/datasource_datanode.py
+++ b/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/datasource_datanode.py
@@ -19,15 +19,11 @@ def getTableColumns(self, table):
ds = self.datastore.get_datasource(table)
return [x['name'] for x in ds['columns']]
- def select_query(self, query):
+ def select(self, query):
query_tables = get_all_tables(query)
if len(query_tables) != 1:
raise Exception(f'Only one table can be used in query to information_schema: {query}')
data = self.datastore.get_data(query_tables[0], where=None, limit=None, offset=None)
- return data['data']
-
- def select(self, table, columns=None, where=None, where_data=None, order_by=None, group_by=None, came_from=None):
- data = self.datastore.get_data(table, where=None, limit=None, offset=None)
- return data['data']
+ return data['data'], data['columns_names']
diff --git a/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/integration_datanode.py b/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/integration_datanode.py
--- a/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/integration_datanode.py
+++ b/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/integration_datanode.py
@@ -1,5 +1,4 @@
import pandas as pd
-from moz_sql_parser import format
from mindsdb.api.mysql.mysql_proxy.datahub.datanodes.datanode import DataNode
@@ -23,55 +22,17 @@ def hasTable(self, tableName):
def getTableColumns(self, tableName):
return []
- def select_query(self, query):
+ def select(self, query):
sql_query = str(query)
dso, _creation_info = self.data_store.create_datasource(self.integration_name, {'query': sql_query})
data = dso.df.to_dict(orient='records')
+ column_names = list(dso.df.columns)
- for column_name in dso.df.columns:
+ for column_name in column_names:
if pd.core.dtypes.common.is_datetime_or_timedelta_dtype(dso.df[column_name]):
pass_data = dso.df[column_name].dt.to_pydatetime()
for i, rec in enumerate(data):
rec[column_name] = pass_data[i].timestamp()
- return data
-
- def select(self, table=None, columns=None, where=None, where_data=None, order_by=None, group_by=None, came_from=None):
- has_where = isinstance(where, (dict, list)) and len(where) > 0
-
- if isinstance(where, dict):
- where = [where]
-
- if isinstance(where, list):
- for el in where:
- if isinstance(el, dict):
- for key in el:
- if isinstance(el[key], list) and len(el[key]) > 0 and isinstance(el[key][0], str) and '.' in el[key][0]:
- el[key][0] = el[key][0][el[key][0].find('.') + 1:]
- where = {'and': where}
-
- format_data = {
- 'from': table,
- 'select': columns
- }
- if has_where:
- format_data['where'] = where
-
- query = format(format_data)
-
- ds_name = self.data_store.get_vacant_name('temp')
- self.data_store.save_datasource(ds_name, self.integration_name, {'query': query})
- dso = self.data_store.get_datasource_obj(ds_name)
-
- data = dso.df.to_dict(orient='records')
-
- for column_name in dso.df.columns:
- if pd.core.dtypes.common.is_datetime_or_timedelta_dtype(dso.df[column_name]):
- pass_data = dso.df[column_name].dt.to_pydatetime()
- for i, rec in enumerate(data):
- rec[column_name] = pass_data[i].timestamp()
-
- self.data_store.delete_datasource(ds_name)
-
- return data
+ return data, column_names
diff --git a/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/mindsdb_datanode.py b/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/mindsdb_datanode.py
--- a/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/mindsdb_datanode.py
+++ b/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/mindsdb_datanode.py
@@ -16,7 +16,6 @@
from mindsdb.utilities.functions import cast_row_types
from mindsdb.utilities.config import Config
from mindsdb.interfaces.database.integrations import get_db_integration
-from mindsdb.api.mysql.mysql_proxy.utilities.sql import to_moz_sql_struct
class NumpyJSONEncoder(json.JSONEncoder):
@@ -146,9 +145,13 @@ def get_predictors(self, mindsdb_sql_query):
predictors_df = self._select_predictors()
mindsdb_sql_query.from_table.parts = ['predictors']
+ # +++ https://github.com/mindsdb/mindsdb_sql/issues/64
+ str_query = str(mindsdb_sql_query).replace('status', '`status`')
+ # ---
+
# +++ FIXME https://github.com/mindsdb/dfsql/issues/37 https://github.com/mindsdb/mindsdb_sql/issues/53
- if ' 1 = 0' in str(mindsdb_sql_query):
- q = str(mindsdb_sql_query)
+ if ' 1 = 0' in str(str_query):
+ q = str_query
q = q[:q.lower().find('where')] + ' limit 0'
result_df = dfsql.sql_query(
q,
@@ -156,9 +159,8 @@ def get_predictors(self, mindsdb_sql_query):
reduce_output=False,
predictors=predictors_df
)
- elif 'AND (1 = 1)' in str(mindsdb_sql_query):
- q = str(mindsdb_sql_query)
- q = q.replace('AND (1 = 1)', ' ')
+ elif 'AND (1 = 1)' in str_query:
+ q = str_query.replace('AND (1 = 1)', ' ')
result_df = dfsql.sql_query(
q,
ds_kwargs={'case_sensitive': False},
@@ -169,7 +171,7 @@ def get_predictors(self, mindsdb_sql_query):
# ---
try:
result_df = dfsql.sql_query(
- str(mindsdb_sql_query),
+ str_query,
ds_kwargs={'case_sensitive': False},
reduce_output=False,
predictors=predictors_df
@@ -183,19 +185,9 @@ def get_predictors(self, mindsdb_sql_query):
return result_df.to_dict(orient='records'), list(result_df.columns)
- def select_query(self, query):
- moz_struct = to_moz_sql_struct(query)
- data = self.select(
- table=query.from_table.parts[-1],
- columns=None,
- where=moz_struct.get('where')
- )
- return data
-
- def select(self, table, columns=None, where=None, where_data=None, order_by=None, group_by=None, integration_name=None, integration_type=None, is_timeseries=False):
+ def select(self, table, columns=None, where=None, where_data=None, order_by=None, group_by=None, integration_name=None, integration_type=None):
''' NOTE WHERE statements can be just $eq joined with 'and'
'''
- _mdb_make_predictions = is_timeseries
if table == 'predictors':
return self._select_predictors()
if table == 'commands':
diff --git a/mindsdb/api/mysql/mysql_proxy/datahub/information_schema.py b/mindsdb/api/mysql/mysql_proxy/datahub/information_schema.py
--- a/mindsdb/api/mysql/mysql_proxy/datahub/information_schema.py
+++ b/mindsdb/api/mysql/mysql_proxy/datahub/information_schema.py
@@ -36,7 +36,6 @@ def add(self, dsObject):
self.index[key.upper()] = val
def get(self, name):
- # INFORMATION_SCHEMA.SCHEMATA
if name.upper() == 'INFORMATION_SCHEMA':
return self
ds = self.index.get(name.upper())
@@ -64,6 +63,7 @@ def _get_tables(self):
data = [
['SCHEMATA', 'information_schema', 'SYSTEM VIEW', [], 'utf8mb4_0900_ai_ci'],
['TABLES', 'information_schema', 'SYSTEM VIEW', [], 'utf8mb4_0900_ai_ci'],
+ ['COLUMNS', 'information_schema', 'SYSTEM VIEW', [], 'utf8mb4_0900_ai_ci'],
['EVENTS', 'information_schema', 'SYSTEM VIEW', [], 'utf8mb4_0900_ai_ci'],
['ROUTINES', 'information_schema', 'SYSTEM VIEW', [], 'utf8mb4_0900_ai_ci'],
['TRIGGERS', 'information_schema', 'SYSTEM VIEW', [], 'utf8mb4_0900_ai_ci']
@@ -146,7 +146,7 @@ def _get_triggers(self):
df = pd.DataFrame(data, columns=columns)
return df
- def select_query(self, query):
+ def select(self, query):
query_tables = get_all_tables(query)
if len(query_tables) != 1:
@@ -169,7 +169,7 @@ def select_query(self, query):
raise Exception('Information schema: Not implemented.')
table_name = query.from_table.parts[-1]
- # +++ FIXME https://github.com/mindsdb/dfsql/issues/37 https://github.com/mindsdb/mindsdb_sql/issues/53
+ # region FIXME https://github.com/mindsdb/dfsql/issues/37 https://github.com/mindsdb/mindsdb_sql/issues/53
if ' 1 = 0' in str(query):
q = str(query)
q = q[:q.lower().find('where')] + ' limit 0'
@@ -179,77 +179,21 @@ def select_query(self, query):
reduce_output=False,
**{table_name: dataframe}
)
+ # endregion
else:
# ---
- data = dfsql.sql_query(
- str(query),
- ds_kwargs={'case_sensitive': False},
- reduce_output=False,
- **{table_name: dataframe}
- )
-
- return data.to_dict(orient='records')
-
- def select(self, columns=None, table=None, where=None, order_by=None, group_by=None, came_from=None):
- tn = table.upper()
- if tn == 'SCHEMATA':
- # there is two query we can process, both hardcoded:
- # SELECT schema_name as name FROM INFORMATION_SCHEMA.SCHEMATA;
- # SELECT default_character_set_name as CharacterSetName, default_collation_name as CollationName FROM INFORMATION_SCHEMA.SCHEMATA WHERE schema_name = 'information_schema';
- if len(columns) == 1 and columns[0] == 'schema_name':
- data = [{'schema_name': 'INFORMATION_SCHEMA'}]
- for key in self.index:
- data.append({
- 'schema_name': key.upper()
- })
- return data
- elif len(columns) == 3 and where is not None and 'schema_name' in where:
- return [{
- 'schema_name': where['schema_name']['$eq'],
- 'default_character_set_name': 'utf8',
- 'default_collation_name': 'utf8_general_ci'
- }]
- if tn == 'TABLES':
- # query examples:
- # SELECT table_name FROM INFORMATION_SCHEMA.TABLES WHERE table_schema = 'information_schema' AND table_type in ('BASE TABLE', 'SYSTEM VIEW');
- # SELECT table_name as name FROM INFORMATION_SCHEMA.TABLES WHERE table_schema = 'information_schema' AND table_type = 'VIEW';
- # TODO add real table rows
- tables = [
- # at least this tables should be returned for GUI clients
- {'table_name': 'SCHEMATA', 'table_schema': 'information_schema', 'table_type': 'SYSTEM VIEW', 'table_rows': []},
- {'table_name': 'TABLES', 'table_schema': 'information_schema', 'table_type': 'SYSTEM VIEW', 'table_rows': []},
- {'table_name': 'EVENTS', 'table_schema': 'information_schema', 'table_type': 'SYSTEM VIEW', 'table_rows': []},
- {'table_name': 'ROUTINES', 'table_schema': 'information_schema', 'table_type': 'SYSTEM VIEW', 'table_rows': []},
- {'table_name': 'TRIGGERS', 'table_schema': 'information_schema', 'table_type': 'SYSTEM VIEW', 'table_rows': []},
- ]
- for dsName, ds in self.index.items():
- t = ds.getTables()
- tables += [{'table_name': x, 'table_schema': dsName, 'table_type': 'BASE TABLE', 'table_rows': []} for x in t]
-
- filtered_tables = tables
- if isinstance(where, dict) and 'table_schema' in where:
- schema = where['table_schema']['$eq']
- filtered_tables = [x for x in filtered_tables if x['table_schema'].upper() == schema.upper()]
-
- if isinstance(where, dict) and 'table_type' in where:
- types = []
- if '$eq' in where['table_type']:
- types = [where['table_type']['$eq'].upper()]
- if '$in' in where['table_type']:
- types += [x.upper() for x in where['table_type']['$in']]
- filtered_tables = [x for x in filtered_tables if x['table_type'] in types]
- return filtered_tables
- if tn == 'COLUMNS':
- # SELECT * FROM information_schema.COLUMNS WHERE TABLE_SCHEMA='CSV_DS' AND TABLE_NAME='part' ORDER BY ORDINAL_POSITION
- return []
- if tn == 'EVENTS':
- # SELECT event_name as name FROM INFORMATION_SCHEMA.EVENTS WHERE event_schema = 'information_schema';
- return []
- if tn == 'ROUTINES':
- # SELECT specific_name as name FROM INFORMATION_SCHEMA.ROUTINES WHERE routine_schema = 'information_schema' AND routine_type = 'FUNCTION';
- return []
- if tn == 'TRIGGERS':
- # SELECT trigger_name as name FROM INFORMATION_SCHEMA.TRIGGERS WHERE trigger_schema = 'information_schema';
- return []
-
- return []
+ try:
+ if table == 'TABLES':
+ query = 'select * from TABLES'
+ table_name = 'TABLES'
+ data = dfsql.sql_query(
+ str(query),
+ ds_kwargs={'case_sensitive': False},
+ reduce_output=False,
+ **{table_name: dataframe}
+ )
+ except Exception as e:
+ print(f'Exception! {e}')
+ return [], []
+
+ return data.to_dict(orient='records'), data.columns.to_list()
diff --git a/mindsdb/api/mysql/mysql_proxy/libs/constants/mysql.py b/mindsdb/api/mysql/mysql_proxy/libs/constants/mysql.py
--- a/mindsdb/api/mysql/mysql_proxy/libs/constants/mysql.py
+++ b/mindsdb/api/mysql/mysql_proxy/libs/constants/mysql.py
@@ -977,10 +977,13 @@ class WARN(object):
'@@session.tx_read_only': ('0', TYPES.MYSQL_TYPE_VAR_STRING, CHARSET_NUMBERS['utf8_general_ci']),
'@@version_comment': ('(MindsDB)', TYPES.MYSQL_TYPE_VAR_STRING, CHARSET_NUMBERS['utf8_general_ci']),
- '@@version': ('0.1', TYPES.MYSQL_TYPE_VAR_STRING, CHARSET_NUMBERS['utf8_general_ci']),
+ '@@version': ('8.0.17', TYPES.MYSQL_TYPE_VAR_STRING, CHARSET_NUMBERS['utf8_general_ci']),
'@@collation_connection': ('utf8_general_ci', TYPES.MYSQL_TYPE_VAR_STRING, CHARSET_NUMBERS['utf8_general_ci']),
- '@@performance_schema': (1, TYPES.MYSQL_TYPE_LONGLONG, CHARSET_NUMBERS['binary'])
+ '@@performance_schema': (1, TYPES.MYSQL_TYPE_LONGLONG, CHARSET_NUMBERS['binary']),
+
+ '@@GLOBAL.transaction_isolation': ('REPEATABLE-READ', TYPES.MYSQL_TYPE_VAR_STRING, CHARSET_NUMBERS['utf8_general_ci']),
+ '@@transaction_isolation': ('REPEATABLE-READ', TYPES.MYSQL_TYPE_VAR_STRING, CHARSET_NUMBERS['utf8_general_ci']),
}
class SESSION_TRACK(object):
diff --git a/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py b/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py
--- a/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py
+++ b/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py
@@ -14,7 +14,6 @@
import sys
import socketserver as SocketServer
import ssl
-import re
import traceback
import json
import atexit
@@ -27,7 +26,24 @@
import select
import base64
-import moz_sql_parser as sql_parser
+import pandas as pd
+import dfsql
+from mindsdb_sql import parse_sql
+from mindsdb_sql.parser.ast import (
+ RollbackTransaction,
+ CommitTransaction,
+ StartTransaction,
+ BinaryOperation,
+ Identifier,
+ Constant,
+ Function,
+ Explain,
+ Select,
+ Show,
+ Set,
+)
+from mindsdb_sql.parser.dialects.mysql import Variable
+from mindsdb_sql.parser.dialects.mindsdb import DropPredictor, DropIntegration
from mindsdb.utilities.wizards import make_ssl_cert
from mindsdb.utilities.config import Config
@@ -136,18 +152,17 @@ class MysqlProxy(SocketServer.BaseRequestHandler):
The Main Server controller class
"""
- charset = 'utf8'
-
- charset_text_type = CHARSET_NUMBERS['utf8_general_ci']
-
- session = None
-
- client_capabilities = None
-
@staticmethod
def server_close(srv):
srv.server_close()
+ def __init__(self, request, client_address, server):
+ self.charset = 'utf8'
+ self.charset_text_type = CHARSET_NUMBERS['utf8_general_ci']
+ self.session = None
+ self.client_capabilities = None
+ super().__init__(request, client_address, server)
+
def init_session(self, company_id=None):
global connection_id
log.debug('New connection [{ip}:{port}]'.format(
@@ -304,106 +319,6 @@ def sendPackageGroup(self, packages):
string = b''.join([x.accum() for x in packages])
self.socket.sendall(string)
- def answer_version(self):
- packages = []
- packages += self.getTabelPackets(
- columns=[{
- 'table_name': '',
- 'name': 'version()',
- 'type': TYPES.MYSQL_TYPE_VAR_STRING
- }],
- data=['0.1']
- )
- if self.client_capabilities.DEPRECATE_EOF is True:
- packages.append(self.packet(OkPacket, eof=True))
- else:
- packages.append(self.packet(EofPacket))
- self.sendPackageGroup(packages)
-
- def answer_current_user(self):
- packages = []
- packages += self.getTabelPackets(
- columns=[{
- 'table_name': '',
- 'name': 'current_user()',
- 'type': TYPES.MYSQL_TYPE_VAR_STRING
- }],
- data=['mindsdb']
- )
- if self.client_capabilities.DEPRECATE_EOF is True:
- packages.append(self.packet(OkPacket, eof=True))
- else:
- packages.append(self.packet(EofPacket))
- self.sendPackageGroup(packages)
-
- def answer_show_variables(self, sql):
- sql_lower = sql.lower()
- if 'show variables' in sql_lower:
- variables = re.findall(r"variable_name='([a-zA-Z_]*)'", sql_lower)
- elif "show variables like" in sql_lower:
- variables = re.findall(r"show variables like '([a-zA-Z_]*)'", sql_lower)
- elif "show session variables like" in sql_lower:
- variables = re.findall(r"show session variables like '([a-zA-Z_]*)'", sql_lower)
- elif 'show session status like' in sql_lower:
- variables = re.findall(r"show session variables like '([a-zA-Z_]*)'", sql_lower)
- elif 'show global variables' in sql_lower:
- variables = [x for x in SERVER_VARIABLES if x.startswith('@@session.') is False]
-
- data = []
- for variable_name in variables:
- variable_data = SERVER_VARIABLES.get(f'@@{variable_name}')
- if variable_data is None:
- variable_data = ['']
- data.append([variable_name, variable_data[0]])
-
- packages = []
- packages += self.getTabelPackets(
- columns=[{
- 'table_name': 'session_variables',
- 'name': 'Variable_name',
- 'type': TYPES.MYSQL_TYPE_VAR_STRING
- }, {
- 'table_name': 'session_variables',
- 'name': 'Value',
- 'type': TYPES.MYSQL_TYPE_VAR_STRING
- }],
- data=data
- )
- if self.client_capabilities.DEPRECATE_EOF is True:
- packages.append(self.packet(OkPacket, eof=True))
- else:
- packages.append(self.packet(EofPacket))
- self.sendPackageGroup(packages)
-
- def answerVersionComment(self):
- packages = []
- packages += self.getTabelPackets(
- columns=[{
- 'table_name': '',
- 'name': '@@version_comment',
- 'type': TYPES.MYSQL_TYPE_VAR_STRING
- }],
- data=[{'@@version_comment': '(MindsDB)'}]
- )
- if self.client_capabilities.DEPRECATE_EOF is True:
- packages.append(self.packet(OkPacket, eof=True))
- else:
- packages.append(self.packet(EofPacket))
- self.sendPackageGroup(packages)
-
- def answerVersion(self):
- packages = []
- packages += self.getTabelPackets(
- columns=[{
- 'table_name': '',
- 'name': '@@version',
- 'type': TYPES.MYSQL_TYPE_VAR_STRING
- }],
- data=[{'@@version': '0.1'}]
- )
- packages.append(self.packet(OkPacket, eof=True))
- self.sendPackageGroup(packages)
-
def answerTableQuery(self, query):
packages = []
packages += self.getTabelPackets(
@@ -558,7 +473,7 @@ def answer_create_predictor(self, struct):
else:
kwargs['timeseries_settings'].update(timeseries_settings)
- model_interface.learn(predictor_name, ds, predict, ds_data['id'], kwargs=kwargs)
+ model_interface.learn(predictor_name, ds, predict, ds_data['id'], kwargs=kwargs, delete_ds_on_fail=True)
self.packet(OkPacket).send()
@@ -885,12 +800,12 @@ def answer_stmt_fetch(self, stmt_id, limit=100000):
def answer_stmt_close(self, stmt_id):
self.session.unregister_stmt(stmt_id)
- def answer_explain_table(self, sql):
- parts = sql.split(' ')
- table = parts[1].lower()
- if table == 'predictors' or table == 'mindsdb.predictors':
+ def answer_explain_table(self, target):
+ db = ((self.session.database or 'mindsdb') if len(target) != 2 else target[0]).lower()
+ table = target[-1].lower()
+ if table == 'predictors' and db == 'mindsdb':
self.answer_explain_predictors()
- elif table == 'commands' or table == 'mindsdb.commands':
+ elif table == 'commands' and db == 'mindsdb':
self.answer_explain_commands()
else:
raise NotImplementedError("Only 'EXPLAIN predictors' and 'EXPLAIN commands' supported")
@@ -999,7 +914,7 @@ def answer_explain_commands(self):
self.sendPackageGroup(packages)
- def queryAnswer(self, sql):
+ def query_answer(self, sql):
# +++
# if query not for mindsdb then process that query in integration db
# TODO redirect only select data queries
@@ -1023,7 +938,7 @@ def queryAnswer(self, sql):
datanode = self.session.datahub.get(self.session.database)
if datanode is None:
raise Exception('datanode is none')
- result = datanode.select_query(sql.replace('`', ''))
+ result, _column_names = datanode.select(sql.replace('`', ''))
columns = []
data = []
@@ -1053,32 +968,130 @@ def queryAnswer(self, sql):
keyword = statement.keyword
struct = statement.struct
- if keyword == 'show':
- if 'show databases' in sql_lower or 'show schemas' in sql_lower:
- sql = 'select schema_name as Database from information_schema.SCHEMATA'
- statement = SqlStatementParser(sql)
- sql_lower = statement.sql.lower()
- keyword = statement.keyword
- struct = statement.struct
- elif 'tables' in sql_lower:
- if sql_lower == 'show tables':
- schema = 'mindsdb'
- elif 'show tables from' in sql_lower:
- schema = re.findall(r'show\s+tables\s+from\s+(\S*)', sql_lower)[0]
- elif 'show full tables from' in sql_lower:
- schema = re.findall(r'show\s+full\s+tables\s+from\s+(\S*)', sql_lower)[0]
- sql = f"select table_name as Tables_in_{schema} from INFORMATION_SCHEMA.TABLES WHERE table_schema = '{schema.upper()}' and table_type = 'BASE TABLE'"
- statement = SqlStatementParser(sql)
- sql_lower = statement.sql.lower()
- keyword = statement.keyword
- struct = statement.struct
- elif (
- 'show variables' in sql_lower
- or 'show session variables' in sql_lower
- or 'show session status' in sql_lower
- or 'show global variables' in sql_lower
- ):
- self.answer_show_variables(sql)
+ # FIXME remove after https://github.com/mindsdb/mindsdb_sql/issues/68
+ try:
+ # +++ https://github.com/mindsdb/mindsdb_sql/issues/64
+ sql_lower_replace = sql_lower.replace(' status ', ' `status` ')
+ sql_relpace = sql.replace(' status ', ' `status` ')
+ # ---
+ if keyword == 'set' and 'names' in sql_lower_replace:
+ # FIXME https://github.com/mindsdb/mindsdb_sql/issues/73
+ if '@@' in sql_lower:
+ statement = parse_sql(sql_lower_replace, dialect='mysql')
+ else:
+ statement = parse_sql(sql_lower_replace, dialect='mindsdb')
+ else:
+ if '@@' in sql_lower:
+ statement = parse_sql(sql_relpace, dialect='mysql')
+ else:
+ statement = parse_sql(sql_relpace, dialect='mindsdb')
+ except Exception:
+ if keyword == 'show':
+ statement = parse_sql('show tables')
+ elif keyword == 'set':
+ statement = parse_sql('set autocommit')
+ statement.category = 'error'
+
+ if isinstance(statement, DropPredictor):
+ predictor_name = statement.name.parts[-1]
+ self.session.datahub['mindsdb'].delete_predictor(predictor_name)
+ self.packet(OkPacket).send()
+ elif isinstance(statement, DropIntegration):
+ raise Exception('Not ready')
+ elif isinstance(statement, Show) or keyword == 'show':
+ sql_category = statement.category.lower()
+ condition = statement.condition.lower() if isinstance(statement.condition, str) else statement.condition
+ expression = statement.expression
+ if sql_category in ('databases', 'schemas'):
+ new_statement = Select(
+ targets=[Identifier(parts=["schema_name"], alias=Identifier('Database'))],
+ from_table=Identifier(parts=['information_schema', 'SCHEMATA'])
+ )
+ if condition == 'like':
+ new_statement.where = BinaryOperation('like', args=[Identifier('schema_name'), expression])
+ elif condition is not None:
+ raise Exception(f'Not implemented: {sql}')
+
+ query = SQLQuery(
+ str(new_statement),
+ session=self.session
+ )
+ self.selectAnswer(query)
+ return
+ elif sql_category in ('tables', 'full tables'):
+ schema = self.session.database or 'mindsdb'
+ if condition == 'from':
+ schema = expression.parts[0]
+ elif condition is not None:
+ raise Exception(f'Unknown condition in query: {statement}')
+
+ new_statement = Select(
+ targets=[Identifier(parts=['table_name'], alias=Identifier(f'Tables_in_{schema}'))],
+ from_table=Identifier(parts=['information_schema', 'TABLES']),
+ where=BinaryOperation('and', args=[
+ BinaryOperation('=', args=[Identifier('table_schema'), Constant(schema.upper())]),
+ BinaryOperation('like', args=[Identifier('table_type'), Constant('BASE TABLE')])
+ ])
+ )
+
+ query = SQLQuery(
+ str(new_statement),
+ session=self.session
+ )
+ self.selectAnswer(query)
+ return
+ elif sql_category in ('variables', 'session variables', 'session status', 'global variables'):
+ new_statement = Select(
+ targets=[Identifier(parts=['Variable_name']), Identifier(parts=['Value'])],
+ from_table=Identifier(parts=['dataframe']),
+ )
+
+ if condition == 'like':
+ new_statement.where = BinaryOperation('like', args=[Identifier('Variable_name'), expression])
+ elif condition == 'where':
+ new_statement.where = expression
+ elif condition is not None:
+ raise Exception(f'Unknown condition in query: {statement}')
+
+ data = {}
+ is_session = 'session' in sql_category
+ for var_name, var_data in SERVER_VARIABLES.items():
+ var_name = var_name.replace('@@', '')
+ if is_session and var_name.startswith('session.') is False:
+ continue
+ if var_name.startswith('session.') or var_name.startswith('GLOBAL.'):
+ name = var_name.replace('session.', '').replace('GLOBAL.', '')
+ data[name] = var_data[0]
+ elif var_name not in data:
+ data[var_name] = var_data[0]
+
+ df = pd.DataFrame(data.items(), columns=['Variable_name', 'Value'])
+ data = dfsql.sql_query(
+ str(new_statement),
+ ds_kwargs={'case_sensitive': False},
+ reduce_output=False,
+ **{'dataframe': df}
+ )
+ data = data.values.tolist()
+
+ packages = []
+ packages += self.getTabelPackets(
+ columns=[{
+ 'table_name': 'session_variables',
+ 'name': 'Variable_name',
+ 'type': TYPES.MYSQL_TYPE_VAR_STRING
+ }, {
+ 'table_name': 'session_variables',
+ 'name': 'Value',
+ 'type': TYPES.MYSQL_TYPE_VAR_STRING
+ }],
+ data=data
+ )
+ if self.client_capabilities.DEPRECATE_EOF is True:
+ packages.append(self.packet(OkPacket, eof=True))
+ else:
+ packages.append(self.packet(EofPacket))
+ self.sendPackageGroup(packages)
return
elif "show status like 'ssl_version'" in sql_lower:
packages = []
@@ -1100,10 +1113,7 @@ def queryAnswer(self, sql):
packages.append(self.packet(EofPacket))
self.sendPackageGroup(packages)
return
- elif (
- sql_lower.startswith("show function status where db = 'mindsdb'")
- or sql_lower.startswith("show procedure status where db = 'mindsdb'")
- ):
+ elif sql_category in ('function status', 'procedure status'):
# SHOW FUNCTION STATUS WHERE Db = 'MINDSDB';
# SHOW PROCEDURE STATUS WHERE Db = 'MINDSDB'
# SHOW FUNCTION STATUS WHERE Db = 'MINDSDB' AND Name LIKE '%';
@@ -1119,19 +1129,51 @@ def queryAnswer(self, sql):
table = sql[sql.rfind('.') + 1:].strip(' .;\n\t').replace('`', '')
self.answer_show_create_table(table)
return
-
- if keyword == 'start':
- # start transaction
+ elif 'show character set where charset =' in sql_lower:
+ # show character set where charset = 'utf8mb4';
+ charset = sql_lower.replace('show character set where charset = ', '').strip("'")
+ self.answer_show_charset(charset)
+ return
+ elif sql_category == 'warnings':
+ self.answer_show_warnings()
+ return
+ elif sql_category == 'engines':
+ self.answer_show_engines()
+ return
+ elif sql_category == 'charset':
+ self.answer_show_charset()
+ return
+ elif sql_category == 'collation':
+ self.answer_show_collation()
+ return
+ elif sql_category == 'table status':
+ # SHOW TABLE STATUS LIKE 'table'
+ table_name = None
+ if condition == 'like' and isinstance(expression, Constant):
+ table_name = expression.value
+ if table_name is None:
+ err_str = f"Can't determine table name in query: {sql}"
+ log.warning(err_str)
+ raise Exception(err_str)
+ self.answer_show_table_status(table_name)
+ return
+ elif isinstance(statement, (StartTransaction, CommitTransaction, RollbackTransaction)):
self.packet(OkPacket).send()
- elif keyword == 'set':
- if 'autocommit' in sql_lower:
+ elif keyword == 'set' or isinstance(statement, Set):
+ category = statement.category.lower()
+ if category == 'autocommit':
self.packet(OkPacket).send()
- elif 'set names' in sql_lower:
- # it can be "set names utf8"
- self.charset = re.findall(r"set\s+names\s+(\S*)", sql_lower)[0]
- self.charset_text_type = CHARSET_NUMBERS['utf8_general_ci']
- if self.charset == 'utf8mb4':
- self.charset_text_type = CHARSET_NUMBERS['utf8mb4_general_ci']
+ elif category == 'names':
+ # set names utf8;
+ charsets = {
+ 'utf8': CHARSET_NUMBERS['utf8_general_ci'],
+ 'utf8mb4': CHARSET_NUMBERS['utf8mb4_general_ci']
+ }
+ self.charset = statement.arg.parts[0]
+ self.charset_text_type = charsets.get(self.charset)
+ if self.charset_text_type is None:
+ log.warning(f"Unknown charset: {self.charset}. Setting up 'utf8_general_ci' as charset text type.")
+ self.charset_text_type = CHARSET_NUMBERS['utf8_general_ci']
self.packet(
OkPacket,
state_track=[
@@ -1141,6 +1183,7 @@ def queryAnswer(self, sql):
]
).send()
else:
+ log.warning(f'SQL statement is not processable, return OK package: {sql}')
self.packet(OkPacket).send()
elif keyword == 'use':
self.session.database = sql_lower.split()[1].strip(' ;')
@@ -1149,16 +1192,6 @@ def queryAnswer(self, sql):
self.answer_create_ai_table(struct)
elif keyword == 'create_predictor':
self.answer_create_predictor(struct)
- elif 'show warnings' in sql_lower:
- self.answerShowWarnings()
- elif 'show engines' in sql_lower:
- self.answerShowEngines()
- elif 'show charset' in sql_lower:
- self.answerShowCharset()
- elif 'show collation' in sql_lower:
- self.answerShowCollation()
- elif 'show table status' in sql_lower:
- self.answer_show_table_status(sql)
elif keyword == 'delete' and \
('mindsdb.predictors' in sql_lower or self.session.database == 'mindsdb' and 'predictors' in sql_lower):
self.delete_predictor_sql(sql)
@@ -1189,22 +1222,13 @@ def queryAnswer(self, sql):
raise NotImplementedError('Update and Insert not implemented')
elif keyword == 'alter' and ('disable keys' in sql_lower) or ('enable keys' in sql_lower):
self.packet(OkPacket).send()
- elif keyword == 'select':
- if 'connection_id()' in sql_lower:
- self.answer_connection_id(sql)
- return
- if '@@' in sql_lower:
- self.answerVariables(sql)
- return
- if 'database()' in sql_lower:
- self.answerSelectDatabase()
- return
- if 'current_user()' in sql_lower:
- self.answer_current_user()
- return
- if 'version()' in sql_lower:
- self.answer_version()
+ elif isinstance(statement, Select):
+ if statement.from_table is None:
+ self.answer_single_row_select(statement)
return
+ # if 'connection_id()' in sql_lower:
+ # self.answer_connection_id(sql)
+ # return
if "table_name,table_comment,if(table_type='base table', 'table', table_type)" in sql_lower:
# TABLEAU
# SELECT TABLE_NAME,TABLE_COMMENT,IF(TABLE_TYPE='BASE TABLE', 'TABLE', TABLE_TYPE),TABLE_SCHEMA FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA LIKE 'mindsdb' AND ( TABLE_TYPE='BASE TABLE' OR TABLE_TYPE='VIEW' ) ORDER BY TABLE_SCHEMA, TABLE_NAME
@@ -1243,39 +1267,72 @@ def queryAnswer(self, sql):
self.sendPackageGroup(packages)
return
- # region DataGrip
- if 'select user()' in sql_lower:
- packages = []
- packages += self.getTabelPackets(
- columns=[{
- 'table_name': '',
- 'name': 'USER()',
- 'type': TYPES.MYSQL_TYPE_VAR_STRING
- }],
- data=['mindsdb'] # TODO set here actual user
- )
- if self.client_capabilities.DEPRECATE_EOF is True:
- packages.append(self.packet(OkPacket, eof=True))
- else:
- packages.append(self.packet(EofPacket))
- self.sendPackageGroup(packages)
- return
-
query = SQLQuery(
sql,
session=self.session
)
self.selectAnswer(query)
- elif keyword == 'rollback':
- self.packet(OkPacket).send()
- elif keyword == 'commit':
- self.packet(OkPacket).send()
- elif keyword == 'explain':
- self.answer_explain_table(sql)
+ elif isinstance(statement, Explain):
+ self.answer_explain_table(statement.target.parts)
else:
print(sql)
+ log.warning(f'Unknown SQL statement: {sql}')
raise NotImplementedError('Action not implemented')
+ def answer_single_row_select(self, statement):
+ columns = []
+ data = []
+ for target in statement.targets:
+ if isinstance(target, Variable):
+ var_name = target.value
+ column_name = f'@@{var_name}'
+ column_alias = target.alias or column_name
+ result = SERVER_VARIABLES.get(column_name)
+ if result is None:
+ log.warning(f'Unknown variable: {column_name}')
+ result = ''
+ else:
+ result = result[0]
+ elif isinstance(target, Function):
+ functions_results = {
+ 'connection_id': self.connection_id,
+ 'database': self.session.database,
+ 'current_user': self.session.username,
+ 'user': self.session.username,
+ 'version': '8.0.17'
+ }
+ function_name = target.op.lower()
+ column_name = f'{target.op}()'
+ column_alias = target.alias or column_name
+ result = functions_results[function_name]
+ elif isinstance(target, Constant):
+ result = target.value
+ column_name = str(result)
+ column_alias = '.'.join(target.alias.parts) if isinstance(target.alias, Identifier) else column_name
+ elif isinstance(target, Identifier):
+ result = '.'.join(target.parts)
+ column_name = str(result)
+ column_alias = '.'.join(target.alias.parts) if isinstance(target.alias, Identifier) else column_name
+
+ columns.append({
+ 'table_name': '',
+ 'name': column_name,
+ 'alias': column_alias,
+ 'type': TYPES.MYSQL_TYPE_VAR_STRING if isinstance(result, str) else TYPES.MYSQL_TYPE_LONG,
+ 'charset': self.charset_text_type if isinstance(result, str) else CHARSET_NUMBERS['binary']
+ })
+ data.append(result)
+
+ packages = self.getTabelPackets(
+ columns=columns,
+ data=[data]
+ )
+ if self.client_capabilities.DEPRECATE_EOF is True:
+ packages.append(self.packet(OkPacket, eof=True))
+ else:
+ packages.append(self.packet(EofPacket))
+ self.sendPackageGroup(packages)
+
def answer_show_create_table(self, table):
packages = []
packages += self.getTabelPackets(
@@ -1295,7 +1352,6 @@ def answer_show_create_table(self, table):
else:
packages.append(self.packet(EofPacket))
self.sendPackageGroup(packages)
- return
def answer_show_index(self):
packages = []
@@ -1529,16 +1585,10 @@ def answer_function_status(self):
packages.append(self.packet(EofPacket))
self.sendPackageGroup(packages)
- def answer_show_table_status(self, sql):
+ def answer_show_table_status(self, table_name):
# NOTE at this moment parsed statement only like `SHOW TABLE STATUS LIKE 'table'`.
# NOTE some columns has {'database': 'mysql'}, other not. That correct. This is how real DB sends messages.
- parts = sql.split(' ')
- if parts[3].lower() != 'like':
- raise NotImplementedError('Action not implemented')
- table = parts[4].strip("'")
-
- packages = []
- packages += self.getTabelPackets(
+ packages = self.getTabelPackets(
columns=[{
'database': 'mysql',
'table_name': 'tables',
@@ -1667,7 +1717,7 @@ def answer_show_table_status(self, sql):
'charset': self.charset_text_type
}],
data=[[
- table, # Name
+ table_name, # Name
'InnoDB', # Engine
10, # Version
'Dynamic', # Row_format
@@ -1690,7 +1740,7 @@ def answer_show_table_status(self, sql):
packages.append(self.packet(OkPacket, eof=True, status=0x0002))
self.sendPackageGroup(packages)
- def answerShowWarnings(self):
+ def answer_show_warnings(self):
packages = []
packages += self.getTabelPackets(
columns=[{
@@ -1720,25 +1770,7 @@ def answerShowWarnings(self):
packages.append(self.packet(OkPacket, eof=True, status=0x0002))
self.sendPackageGroup(packages)
- def answerSelectDatabase(self):
- packages = []
- packages += self.getTabelPackets(
- columns=[{
- 'database': '',
- 'table_name': '',
- 'name': 'database()',
- 'alias': 'database()',
- 'type': TYPES.MYSQL_TYPE_VAR_STRING,
- 'charset': self.charset_text_type
- }],
- data=[
- [self.session.database]
- ]
- )
- packages.append(self.packet(OkPacket, eof=True, status=0x0000))
- self.sendPackageGroup(packages)
-
- def answerShowCollation(self):
+ def answer_show_collation(self):
packages = []
packages += self.getTabelPackets(
columns=[{
@@ -1792,7 +1824,21 @@ def answerShowCollation(self):
packages.append(self.packet(OkPacket, eof=True, status=0x0002))
self.sendPackageGroup(packages)
- def answerShowCharset(self):
+ def answer_show_charset(self, charset=None):
+ charsets = {
+ 'utf8': ['utf8', 'UTF-8 Unicode', 'utf8_general_ci', 3],
+ 'latin1': ['latin1', 'cp1252 West European', 'latin1_swedish_ci', 1],
+ 'utf8mb4': ['utf8mb4', 'UTF-8 Unicode', 'utf8mb4_general_ci', 4]
+ }
+ if charset is None:
+ data = list(charset.values())
+ elif charset not in charsets:
+ err_str = f'Unknown charset: {charset}'
+ log.warning(err_str)
+ raise Exception(err_str)
+ else:
+ data = [charsets.get(charset)]
+
packages = []
packages += self.getTabelPackets(
columns=[{
@@ -1824,15 +1870,15 @@ def answerShowCharset(self):
'type': TYPES.MYSQL_TYPE_LONGLONG,
'charset': CHARSET_NUMBERS['binary']
}],
- data=[
- ['utf8', 'UTF-8 Unicode', 'utf8_general_ci', 3],
- ['latin1', 'cp1252 West European', 'latin1_swedish_ci', 1]
- ]
+ data=data
)
- packages.append(self.packet(OkPacket, eof=True, status=0x0002))
+ if self.client_capabilities.DEPRECATE_EOF is True:
+ packages.append(self.packet(OkPacket, eof=True))
+ else:
+ packages.append(self.packet(EofPacket))
self.sendPackageGroup(packages)
- def answerShowEngines(self):
+ def answer_show_engines(self):
packages = []
packages += self.getTabelPackets(
columns=[{
@@ -1898,40 +1944,6 @@ def answer_connection_id(self, sql):
packages.append(self.packet(OkPacket, eof=True, status=0x0002))
self.sendPackageGroup(packages)
- def answerVariables(self, sql):
- if '@@version_comment' in sql.lower():
- self.answerVersionComment()
- return
- elif '@@version' in sql.lower():
- self.answerVersion()
- return
- p = sql_parser.parse(sql)
- select = p.get('select')
- if isinstance(select, dict):
- select = [select]
- columns = []
- row = []
- for s in select:
- fill_name = s.get('value')
- alias = s.get('name', fill_name)
- variable = SERVER_VARIABLES.get(fill_name)
- columns.append({
- 'table_name': '',
- 'name': '',
- 'alias': alias,
- 'type': variable[1],
- 'charset': variable[2]
- })
- row.append(variable[0])
-
- packages = []
- packages += self.getTabelPackets(
- columns=columns,
- data=[row]
- )
- packages.append(self.packet(OkPacket, eof=True, status=0x0002))
- self.sendPackageGroup(packages)
-
def selectAnswer(self, query):
result = query.fetch(
self.session.datahub
@@ -2095,9 +2107,9 @@ def handle(self):
try:
if p.type.value == COMMANDS.COM_QUERY:
sql = self.decode_utf(p.sql.value)
- sql = SqlStatementParser(sql).sql
+ sql = SqlStatementParser.clear_sql(sql)
log.debug(f'COM_QUERY: {sql}')
- self.queryAnswer(sql)
+ self.query_answer(sql)
elif p.type.value == COMMANDS.COM_STMT_PREPARE:
# https://dev.mysql.com/doc/internals/en/com-stmt-prepare.html
sql = self.decode_utf(p.sql.value)
diff --git a/mindsdb/api/mysql/mysql_proxy/utilities/sql.py b/mindsdb/api/mysql/mysql_proxy/utilities/sql.py
--- a/mindsdb/api/mysql/mysql_proxy/utilities/sql.py
+++ b/mindsdb/api/mysql/mysql_proxy/utilities/sql.py
@@ -34,52 +34,3 @@ def where_to_dict(root):
return root.value
else:
raise Exception(f'unknown type in "where": {root}')
-
-
-def to_moz_sql_struct(mp):
- res = {
- 'select': [],
- 'from': []
- }
-
- for t in mp.targets:
- if isinstance(t, Star):
- res['select'].append({
- 'value': '*',
- 'name': '*'
- })
- else:
- res['select'].append({
- 'value': '.'.join(t.parts),
- 'name': get_alias(t)
- })
-
- if isinstance(mp.from_table, Identifier):
- res['from'] = [identifier_to_dict(mp.from_table)]
- elif isinstance(mp.from_table, Join):
- if mp.from_table.join_type == 'left join':
- if not isinstance(mp.from_table.right, Identifier):
- raise Exception("only one 'level' of join supports")
- res['from'] = [
- identifier_to_dict(mp.from_table.left),
- {
- 'left join': identifier_to_dict(mp.from_table.right)
- }
- ]
- if mp.from_table.condition is not None:
- if mp.from_table.condition.op == '=':
- res['from'][1]['on'] = {'eq': [identifier_to_dict(x) for x in mp.from_table.condition.args]}
- else:
- raise Exception('Only left join support')
- else:
- raise Exception(f'unexpected type {mp.from_table}')
-
- where = mp.where
- if where is not None:
- where = where_to_dict(where)
- res['where'] = where
-
- if mp.limit is not None:
- res['limit'] = mp.limit.value
-
- return res
diff --git a/mindsdb/integrations/clickhouse/clickhouse.py b/mindsdb/integrations/clickhouse/clickhouse.py
--- a/mindsdb/integrations/clickhouse/clickhouse.py
+++ b/mindsdb/integrations/clickhouse/clickhouse.py
@@ -151,3 +151,13 @@ def unregister_predictor(self, name):
drop table if exists {self.mindsdb_database}.{self._escape_table_name(name)};
"""
self._query(q)
+
+ def get_tables_list(self):
+ q = f"""SELECT database, table
+ FROM system.parts
+ WHERE active and database NOT IN ('system', 'mdb_system')
+ GROUP BY database, table
+ ORDER BY database, table;"""
+ tables_list = self._query(q)
+ tables= [f"{table[0]}.{table[1]}" for table in tables_list]
+ return tables
\ No newline at end of file
diff --git a/mindsdb/integrations/mysql/mysql.py b/mindsdb/integrations/mysql/mysql.py
--- a/mindsdb/integrations/mysql/mysql.py
+++ b/mindsdb/integrations/mysql/mysql.py
@@ -190,3 +190,26 @@ def unregister_predictor(self, name):
drop table if exists {self.mindsdb_database}.{self._escape_table_name(name)};
"""
self._query(q)
+
+ def get_row_count(self, query):
+ q = f"""
+ SELECT COUNT(*) as count
+ FROM ({query}) as query;"""
+ result = self._query(q)
+ return result[0]['count']
+
+ def get_columns(self):
+ q = f"""SELECT COLUMN_NAME ,TABLE_NAME
+ FROM INFORMATION_SCHEMA.COLUMNS
+ WHERE TABLE_SCHEMA = database()
+ ORDER BY COLUMN_NAME, TABLE_NAME;"""
+ columns_list = self._query(q)
+ columns = [f"{columns[0]}.{columns[1]}" for columns in columns_list]
+ return columns
+
+ def get_tables_list(self):
+ q= f"""
+ SHOW TABLES;
+ """
+ result = self._query(q)
+ return result
\ No newline at end of file
diff --git a/mindsdb/integrations/postgres/postgres.py b/mindsdb/integrations/postgres/postgres.py
--- a/mindsdb/integrations/postgres/postgres.py
+++ b/mindsdb/integrations/postgres/postgres.py
@@ -192,3 +192,29 @@ def unregister_predictor(self, name):
DROP FOREIGN TABLE IF EXISTS {self.mindsdb_database}.{self._escape_table_name(name)};
"""
self._query(q)
+
+ def get_row_count(self, query):
+ q = f"""
+ SELECT COUNT(*) as count
+ FROM ({query}) as query;"""
+ result = self._query(q)
+ return result[0]['count']
+
+ def get_tables_list(self):
+ q = f""" SELECT table_schema, table_name
+ FROM information_schema.tables
+ WHERE table_schema != 'pg_catalog'
+ AND table_schema != 'information_schema'
+ ORDER BY table_schema, table_name"""
+ tables_list = self._query(q)
+ tables= [f"{table['table_schema']}.{table['table_name']}" for table in tables_list]
+ return tables
+
+ def get_columns(self):
+ q = f"""SELECT column_name, table_name
+ FROM information_schema.columns
+ WHERE table_schema NOT IN ('information_schema', 'pg_catalog')
+ ORDER BY column_name, table_name;"""
+ columns_list = self._query(q)
+ columns = [f"{columns[0]}.{columns[1]}" for columns in columns_list]
+ return columns
\ No newline at end of file
diff --git a/mindsdb/interfaces/datastore/datastore.py b/mindsdb/interfaces/datastore/datastore.py
--- a/mindsdb/interfaces/datastore/datastore.py
+++ b/mindsdb/interfaces/datastore/datastore.py
@@ -123,6 +123,9 @@ def delete_datasource(self, name, company_id=None):
linked_models = Predictor.query.filter_by(company_id=company_id, datasource_id=datasource_record.id).all()
if linked_models:
raise Exception("Can't delete {} datasource because there are next models linked to it: {}".format(name, [model.name for model in linked_models]))
+ session.query(Semaphor).filter_by(
+ company_id=company_id, entity_id=datasource_record.id, entity_type='datasource'
+ ).delete()
session.delete(datasource_record)
session.commit()
self.fs_store.delete(f'datasource_{company_id}_{datasource_record.id}')
diff --git a/mindsdb/interfaces/model/learn_process.py b/mindsdb/interfaces/model/learn_process.py
--- a/mindsdb/interfaces/model/learn_process.py
+++ b/mindsdb/interfaces/model/learn_process.py
@@ -2,6 +2,7 @@
import traceback
import tempfile
from pathlib import Path
+from typing import Optional
import pandas as pd
from pandas.core.frame import DataFrame
@@ -13,7 +14,7 @@
import mindsdb.interfaces.storage.db as db
from mindsdb.interfaces.database.database import DatabaseWrapper
from mindsdb.interfaces.model.model_interface import ModelInterface, ModelInterfaceWrapper
-from mindsdb.interfaces.storage.db import session, Predictor
+from mindsdb.interfaces.storage.db import session, Predictor, Datasource
from mindsdb import __version__ as mindsdb_version
from mindsdb.interfaces.datastore.datastore import DataStore, DataStoreWrapper
from mindsdb.interfaces.storage.fs import FsStore
@@ -40,31 +41,20 @@ def delete_learn_mark():
@mark_process(name='learn')
-def run_generate(df: DataFrame, problem_definition: ProblemDefinition, name: str, company_id: int, datasource_id: int) -> int:
+def run_generate(df: DataFrame, problem_definition: ProblemDefinition, predictor_id: int) -> int:
json_ai = lightwood.json_ai_from_problem(df, problem_definition)
code = lightwood.code_from_json_ai(json_ai)
- predictor_record = db.Predictor(
- company_id=company_id,
- name=name,
- json_ai=json_ai.to_dict(),
- code=code,
- datasource_id=datasource_id,
- mindsdb_version=mindsdb_version,
- lightwood_version=lightwood_version,
- to_predict=[problem_definition.target],
- learn_args=problem_definition.to_dict(),
- data={'name': name}
- )
-
- db.session.add(predictor_record)
+ predictor_record = Predictor.query.with_for_update().get(predictor_id)
+ predictor_record.json_ai = json_ai.to_dict()
+ predictor_record.code = code
db.session.commit()
@mark_process(name='learn')
def run_fit(predictor_id: int, df: pd.DataFrame) -> None:
try:
- predictor_record = session.query(db.Predictor).filter_by(id=predictor_id).first()
+ predictor_record = Predictor.query.with_for_update().get(predictor_id)
assert predictor_record is not None
fs_store = FsStore()
@@ -97,11 +87,24 @@ def run_fit(predictor_id: int, df: pd.DataFrame) -> None:
raise e
-def run_learn(df: DataFrame, problem_definition: ProblemDefinition, name: str, company_id: int, datasource_id: int) -> None:
- run_generate(df, problem_definition, name, company_id, datasource_id)
- predictor_record = db.session.query(db.Predictor).filter_by(company_id=company_id, name=name).first()
- assert predictor_record is not None
- run_fit(predictor_record.id, df)
+def run_learn(df: DataFrame, problem_definition: ProblemDefinition, predictor_id: int,
+ delete_ds_on_fail: Optional[bool] = False) -> None:
+ try:
+ run_generate(df, problem_definition, predictor_id)
+ run_fit(predictor_id, df)
+ except Exception as e:
+ predictor_record = Predictor.query.with_for_update().get(predictor_id)
+ if delete_ds_on_fail is True:
+ linked_db_ds = Datasource.query.filter_by(id=predictor_record.datasource_id).first()
+ if linked_db_ds is not None:
+ predictors_with_ds = Predictor.query.filter(
+ (Predictor.id != predictor_id) & (Predictor.datasource_id == linked_db_ds.id)
+ ).all()
+ if len(predictors_with_ds) == 0:
+ session.delete(linked_db_ds)
+ predictor_record.datasource_id = None
+ predictor_record.data = {"error": str(e)}
+ session.commit()
def run_adjust(name, db_name, from_data, datasource_id, company_id):
@@ -156,7 +159,7 @@ def run_update(name: str, company_id: int):
predictor_record.data = predictor.model_analysis.to_dict() # type: ignore
session.commit()
- predictor_record.lightwood_version = lightwood.__version__
+ predictor_record.lightwood_version = lightwood_version
predictor_record.mindsdb_version = mindsdb_version
predictor_record.update_status = 'up_to_date'
session.commit()
diff --git a/mindsdb/interfaces/model/model_controller.py b/mindsdb/interfaces/model/model_controller.py
--- a/mindsdb/interfaces/model/model_controller.py
+++ b/mindsdb/interfaces/model/model_controller.py
@@ -10,7 +10,7 @@
import lightwood
from lightwood.api.types import ProblemDefinition
-from packaging import version
+from lightwood import __version__ as lightwood_version
import numpy as np
import pandas as pd
import mindsdb_datasources
@@ -94,6 +94,11 @@ def _unpack_old_args(self, from_data: dict, kwargs: dict, to_predict: Optional[U
if 'stop_training_in_x_seconds' in kwargs:
problem_definition['time_aim'] = kwargs['stop_training_in_x_seconds']
+ if kwargs.get('ignore_columns') is not None:
+ problem_definition['ignore_features'] = kwargs['ignore_columns']
+ if isinstance(problem_definition['ignore_features'], list) is False:
+ problem_definition['ignore_features'] = [problem_definition['ignore_features']]
+
ds_cls = getattr(mindsdb_datasources, from_data['class'])
ds = ds_cls(*from_data['args'], **from_data['kwargs'])
df = ds.df
@@ -101,14 +106,51 @@ def _unpack_old_args(self, from_data: dict, kwargs: dict, to_predict: Optional[U
return df, problem_definition, join_learn_process
@mark_process(name='learn')
- def learn(self, name: str, from_data: dict, to_predict: str, datasource_id: int, kwargs: dict, company_id: int) -> None:
+ def learn(self, name: str, from_data: dict, to_predict: str, datasource_id: int, kwargs: dict,
+ company_id: int, delete_ds_on_fail: Optional[bool] = False) -> None:
+ predictor_record = db.session.query(db.Predictor).filter_by(company_id=company_id, name=name).first()
+ if predictor_record is not None:
+ raise Exception('Predictor name must be unique.')
+
df, problem_definition, join_learn_process = self._unpack_old_args(from_data, kwargs, to_predict)
- p = LearnProcess(df, ProblemDefinition.from_dict(problem_definition), name, company_id, datasource_id)
+
+ problem_definition = ProblemDefinition.from_dict(problem_definition)
+ predictor_record = db.Predictor(
+ company_id=company_id,
+ name=name,
+ datasource_id=datasource_id,
+ mindsdb_version=mindsdb_version,
+ lightwood_version=lightwood_version,
+ to_predict=problem_definition.target,
+ learn_args=problem_definition.to_dict(),
+ data={'name': name}
+ )
+
+ db.session.add(predictor_record)
+ db.session.commit()
+ predictor_id = predictor_record.id
+
+ p = LearnProcess(df, problem_definition, predictor_id, delete_ds_on_fail)
p.start()
if join_learn_process:
p.join()
if not IS_PY36:
p.close()
+ db.session.refresh(predictor_record)
+
+ data = {}
+ if predictor_record.update_status == 'available':
+ data['status'] = 'complete'
+ elif predictor_record.json_ai is None and predictor_record.code is None:
+ data['status'] = 'generating'
+ elif predictor_record.data is None:
+ data['status'] = 'editable'
+ elif 'training_log' in predictor_record.data:
+ data['status'] = 'training'
+ elif 'error' not in predictor_record.data:
+ data['status'] = 'complete'
+ else:
+ data['status'] = 'error'
@mark_process(name='predict')
def predict(self, name: str, when_data: Union[dict, list, pd.DataFrame], pred_format: str, company_id: int):
@@ -135,7 +177,9 @@ def predict(self, name: str, when_data: Union[dict, list, pd.DataFrame], pred_fo
'pickle': str(os.path.join(self.config['paths']['predictors'], fs_name))
}
else:
- raise Exception(f'Trying to predict using predictor {original_name} with status: {predictor_data["status"]}')
+ raise Exception(
+ f'Trying to predict using predictor {original_name} with status: {predictor_data["status"]}. Error is: {predictor_data.get("error", "unknown")}'
+ )
if isinstance(when_data, dict) and 'kwargs' in when_data and 'args' in when_data:
ds_cls = getattr(mindsdb_datasources, when_data['class'])
@@ -208,11 +252,6 @@ def get_model_data(self, name, company_id: int):
linked_db_ds = db.session.query(db.Datasource).filter_by(company_id=company_id, id=predictor_record.datasource_id).first()
- # check update availability
- if version.parse(predictor_record.mindsdb_version) < version.parse(mindsdb_version):
- predictor_record.update_status = 'available'
- db.session.commit()
-
data = deepcopy(predictor_record.data)
data['dtype_dict'] = predictor_record.dtype_dict
data['created_at'] = str(parse_datetime(str(predictor_record.created_at).split('.')[0]))
@@ -226,7 +265,9 @@ def get_model_data(self, name, company_id: int):
data['problem_definition'] = predictor_record.learn_args
# assume older models are complete, only temporary
- if predictor_record.update_status == 'available':
+ if 'error' in predictor_record.data:
+ data['status'] = 'error'
+ elif predictor_record.update_status == 'available':
data['status'] = 'complete'
elif predictor_record.json_ai is None and predictor_record.code is None:
data['status'] = 'generating'
@@ -288,14 +329,38 @@ def update_model(self, name: str, company_id: int):
return 'Updated in progress'
@mark_process(name='learn')
- def generate_predictor(self, name: str, from_data: dict, datasource_id, problem_definition_dict: dict, join_learn_process: bool, company_id: int):
+ def generate_predictor(self, name: str, from_data: dict, datasource_id, problem_definition_dict: dict,
+ join_learn_process: bool, company_id: int):
+ predictor_record = db.session.query(db.Predictor).filter_by(company_id=company_id, name=name).first()
+ if predictor_record is not None:
+ raise Exception('Predictor name must be unique.')
+
df, problem_definition, _ = self._unpack_old_args(from_data, problem_definition_dict)
- p = GenerateProcess(df, ProblemDefinition.from_dict(problem_definition), name, company_id, datasource_id)
+
+ problem_definition = ProblemDefinition.from_dict(problem_definition)
+
+ predictor_record = db.Predictor(
+ company_id=company_id,
+ name=name,
+ datasource_id=datasource_id,
+ mindsdb_version=mindsdb_version,
+ lightwood_version=lightwood_version,
+ to_predict=problem_definition.target,
+ learn_args=problem_definition.to_dict(),
+ data={'name': name}
+ )
+
+ db.session.add(predictor_record)
+ db.session.commit()
+ predictor_id = predictor_record.id
+
+ p = GenerateProcess(df, problem_definition, predictor_id)
p.start()
if join_learn_process:
p.join()
if not IS_PY36:
p.close()
+ db.session.refresh(predictor_record)
def edit_json_ai(self, name: str, json_ai: dict, company_id=None):
predictor_record = db.session.query(db.Predictor).filter_by(company_id=company_id, name=name).first()
@@ -337,6 +402,7 @@ def fit_predictor(self, name: str, from_data: dict, join_learn_process: bool, co
if not IS_PY36:
p.close()
+
'''
Notes: Remove ray from actors are getting stuck
try:
diff --git a/mindsdb/streams/stream_controller.py b/mindsdb/streams/stream_controller.py
--- a/mindsdb/streams/stream_controller.py
+++ b/mindsdb/streams/stream_controller.py
@@ -1,4 +1,3 @@
-from copy import deepcopy
import os
from threading import Event, Thread
from time import time
@@ -19,7 +18,7 @@ def __init__(self, name, predictor, stream_in, stream_out, anomaly_stream=None,
self.stream_in = stream_in
self.stream_out = stream_out
self.anomaly_stream = anomaly_stream
-
+
self.learning_stream = learning_stream
self.learning_threshold = learning_threshold
self.learning_data = []
@@ -36,8 +35,8 @@ def __init__(self, name, predictor, stream_in, stream_out, anomaly_stream=None,
self.target = p.to_predict[0]
- ts_settings = p.learn_args.get('timeseries_settings', None)
- if not ts_settings['is_timeseries']:
+ ts_settings = p.learn_args.get('timeseries_settings', {})
+ if isinstance(ts_settings, dict) is False or ts_settings.get('is_timeseries', False) is False:
ts_settings = None
if ts_settings is None:
diff --git a/mindsdb/utilities/functions.py b/mindsdb/utilities/functions.py
--- a/mindsdb/utilities/functions.py
+++ b/mindsdb/utilities/functions.py
@@ -1,5 +1,6 @@
import argparse
import datetime
+import requests
from functools import wraps
from mindsdb.utilities.fs import create_process_mark, delete_process_mark
@@ -60,3 +61,38 @@ def wrapper(*args, **kwargs):
delete_process_mark(name)
return wrapper
return mark_process_wrapper
+
+
+def get_versions_where_predictors_become_obsolete():
+ """ Get list of MindsDB versions in which predictors should be retrained
+ Returns:
+ list of str or False
+ """
+ versions_for_updating_predictors = []
+ try:
+ try:
+ res = requests.get(
+ 'https://mindsdb-cloud-public-service-files.s3.us-east-2.amazonaws.com/version_for_updating_predictors.txt',
+ timeout=0.5
+ )
+ except (ConnectionError, requests.exceptions.ConnectionError) as e:
+ print(f'Is no connection. {e}')
+ raise
+ except Exception as e:
+ print(f'Is something wrong with getting version_for_updating_predictors.txt: {e}')
+ raise
+
+ if res.status_code != 200:
+ print(f'Cant get version_for_updating_predictors.txt: returned status code = {res.status_code}')
+ raise
+
+ try:
+ versions_for_updating_predictors = res.text.replace(' \t\r', '').split('\n')
+ except Exception as e:
+ print(f'Cant decode compatible-config.json: {e}')
+ raise
+ except Exception:
+ return False, versions_for_updating_predictors
+
+ versions_for_updating_predictors = [x for x in versions_for_updating_predictors if len(x) > 0]
+ return True, versions_for_updating_predictors
| diff --git a/tests/integration_tests/flows/test_http.py b/tests/integration_tests/flows/test_http.py
--- a/tests/integration_tests/flows/test_http.py
+++ b/tests/integration_tests/flows/test_http.py
@@ -1,12 +1,10 @@
-import time
+import requests
import unittest
from random import randint
from pathlib import Path
from uuid import uuid1
import json
-import lightwood
-import requests
from common import (
CONFIG_PATH,
@@ -184,7 +182,6 @@ def test_7_utils(self):
response = requests.get(f'{root}/util/ping')
assert response.status_code == 200
-
response = requests.get(f'{root}/config/vars')
assert response.status_code == 200
@@ -300,5 +297,6 @@ def test_98_predict_modified_predictor(self):
pvs = res.json()
assert pvs[0]['rental_price']['predicted_value'] == 5555555
+
if __name__ == '__main__':
unittest.main(failfast=True)
diff --git a/tests/integration_tests/flows/test_redis.py b/tests/integration_tests/flows/test_redis.py
--- a/tests/integration_tests/flows/test_redis.py
+++ b/tests/integration_tests/flows/test_redis.py
@@ -71,11 +71,11 @@ def test_length(self):
def upload_ds(self, name):
df = pd.DataFrame({
- 'group': ["A" for _ in range(100, 210)],
- 'order': [x for x in range(100, 210)],
- 'x1': [x for x in range(100,210)],
- 'x2': [x*2 for x in range(100,210)],
- 'y': [x*3 for x in range(100,210)]
+ 'group': ["A" for _ in range(100, 250)],
+ 'order': [x for x in range(100, 250)],
+ 'x1': [x for x in range(100, 250)],
+ 'x2': [x * 2 for x in range(100, 250)],
+ 'y': [x * 3 for x in range(100, 250)]
})
with tempfile.NamedTemporaryFile(mode='w+', newline='', delete=False) as f:
df.to_csv(f, index=False)
@@ -127,7 +127,7 @@ def train_ts_predictor(self, ds_name, predictor_name, with_gb=True):
url = f'{HTTP_API_ROOT}/predictors/{predictor_name}'
res = requests.put(url, json=params)
res.raise_for_status()
-
+
def test_1_create_integration(self):
print(f"\nExecuting {self._testMethodName}")
url = f'{HTTP_API_ROOT}/config/integrations/{INTEGRATION_NAME}'
@@ -138,12 +138,11 @@ def test_1_create_integration(self):
res = requests.put(url, json={"params": params})
self.assertEqual(res.status_code, 200)
-
+
def test_2_create_redis_stream(self):
print(f"\nExecuting {self._testMethodName}")
self.upload_ds(DS_NAME)
self.train_predictor(DS_NAME, DEFAULT_PREDICTOR)
- time.sleep(30)
url = f'{HTTP_API_ROOT}/streams/{NORMAL_STREAM_NAME}'
res = requests.put(url, json={
@@ -189,8 +188,8 @@ def test_5_making_ts_stream_prediction(self):
stream_in = RedisStream(STREAM_IN_TS, CONNECTION_PARAMS)
stream_out = RedisStream(STREAM_OUT_TS, CONNECTION_PARAMS)
- for x in range(210, 221):
- stream_in.write({'x1': x, 'x2': 2*x, 'order': x, 'group': "A", 'y': 3*x})
+ for x in range(230, 241):
+ stream_in.write({'x1': x, 'x2': 2 * x, 'order': x, 'group': "A", 'y': 3 * x})
time.sleep(0.01)
time.sleep(10)
@@ -214,7 +213,7 @@ def test_6_create_stream_redis_native_api(self):
stream_out = RedisStream(STREAM_OUT_NATIVE, CONNECTION_PARAMS)
for x in range(1, 3):
- stream_in.write({'x1': x, 'x2': 2*x})
+ stream_in.write({'x1': x, 'x2': 2 * x})
time.sleep(5)
time.sleep(30)
@@ -262,7 +261,7 @@ def test_9_making_ts_stream_prediction_no_group(self):
stream_out = RedisStream(STREAM_OUT, CONNECTION_PARAMS)
for x in range(210, 221):
- stream_in.write({'x1': x, 'x2': 2*x, 'order': x, 'y': 3*x})
+ stream_in.write({'x1': x, 'x2': 2*x, 'order': x, 'y': 3 * x})
time.sleep(5)
self.assertEqual(len(list(stream_out.read())), 2)
| Add URL redirection from old installation in docs :traffic_light:
The macOS installation docs were moved to the deployment directory. We need to set up redirection so the old links will be redirected to `deployment/macos`.
>Note: We are accepting the first PR that solves the issue.
## Steps :male_detective: :female_detective:
- Head over to https://github.com/mindsdb/mindsdb/blob/staging/docs/mindsdb-docs/mkdocs.yml#L46
- Add new redirect from:
```
'installation/macos/': '/deployment/macos.md'
```
## Additional rewards :1st_place_medal:
Each documentation PR brings :one: point for entry into the draw for a :computer: Deep Learning Laptop powered by the NVIDIA RTX 3080 Max-Q GPU or other swag :shirt: :bear: . For more info check out https://mindsdb.com/hacktoberfest/
| 2021-10-15T07:24:06Z | [] | [] |
|
mindsdb/mindsdb | 1,832 | mindsdb__mindsdb-1832 | [
"1826"
] | 9f40b6372759836efd7ad18737387600331413c8 | diff --git a/mindsdb/api/mysql/mysql_proxy/classes/sql_query.py b/mindsdb/api/mysql/mysql_proxy/classes/sql_query.py
--- a/mindsdb/api/mysql/mysql_proxy/classes/sql_query.py
+++ b/mindsdb/api/mysql/mysql_proxy/classes/sql_query.py
@@ -321,7 +321,7 @@ def _parse_query(self, sql):
}
return
- integrations_names = self.datahub.get_integrations_names()
+ integrations_names = self.datahub.get_datasources_names()
integrations_names.append('INFORMATION_SCHEMA')
integrations_names.append('information_schema')
diff --git a/mindsdb/api/mysql/mysql_proxy/datahub/datahub.py b/mindsdb/api/mysql/mysql_proxy/datahub/datahub.py
--- a/mindsdb/api/mysql/mysql_proxy/datahub/datahub.py
+++ b/mindsdb/api/mysql/mysql_proxy/datahub/datahub.py
@@ -1,15 +1,7 @@
from mindsdb.api.mysql.mysql_proxy.datahub.information_schema import InformationSchema
-from mindsdb.api.mysql.mysql_proxy.datahub.datanodes.integration_datanode import IntegrationDataNode
-# from mindsdb.interfaces.database.integrations import DatasourceController
def init_datahub(model_interface, ai_table, data_store, datasource_interface, company_id=None):
datahub = InformationSchema(model_interface, ai_table, data_store, datasource_interface)
- integrations = datasource_interface.get_db_integrations().keys()
- for key in integrations:
- datahub.add({
- key: IntegrationDataNode(key, data_store)
- })
-
return datahub
diff --git a/mindsdb/api/mysql/mysql_proxy/datahub/information_schema.py b/mindsdb/api/mysql/mysql_proxy/datahub/information_schema.py
--- a/mindsdb/api/mysql/mysql_proxy/datahub/information_schema.py
+++ b/mindsdb/api/mysql/mysql_proxy/datahub/information_schema.py
@@ -6,12 +6,7 @@
from mindsdb.api.mysql.mysql_proxy.datahub.datanodes.file_datanode import FileDataNode
from mindsdb.api.mysql.mysql_proxy.datahub.datanodes.mindsdb_datanode import MindsDBDataNode
from mindsdb.api.mysql.mysql_proxy.datahub.datanodes.datasource_datanode import DataSourceDataNode
-
-
-def get_table_alias(table_obj):
- if table_obj.alias is not None:
- return table_obj.alias
- return '.'.join(table_obj.parts)
+from mindsdb.api.mysql.mysql_proxy.datahub.datanodes.integration_datanode import IntegrationDataNode
class InformationSchema(DataNode):
@@ -28,29 +23,36 @@ class InformationSchema(DataNode):
}
def __init__(self, model_interface, ai_table, data_store, datasource_interface):
- self.index = {}
- self.add({
+ self.datasource_interface = datasource_interface
+ self.data_store = data_store
+ self.persis_datanodes = {
'mindsdb': MindsDBDataNode(model_interface, ai_table, data_store, datasource_interface),
'datasource': DataSourceDataNode(data_store),
'file': FileDataNode(data_store)
- })
+ }
def __getitem__(self, key):
return self.get(key)
- def add(self, dsObject):
- for key, val in dsObject.items():
- self.index[key.upper()] = val
-
def get(self, name):
- if name.upper() == 'INFORMATION_SCHEMA':
+ name_lower = name.lower()
+
+ if name.lower() == 'information_schema':
return self
- ds = self.index.get(name.upper())
- return ds
+
+ if name_lower in self.persis_datanodes:
+ return self.persis_datanodes[name_lower]
+
+ datasource_names = self.datasource_interface.get_db_integrations().keys()
+ for datasource_name in datasource_names:
+ if datasource_name.lower() == name_lower:
+ return IntegrationDataNode(datasource_name, self.data_store)
+
+ return None
def has_table(self, tableName):
tn = tableName.upper()
- if tn in self.information_schema or tn in self.index:
+ if tn in self.information_schema:
return True
return False
@@ -58,11 +60,12 @@ def get_table_columns(self, tableName):
tn = tableName.upper()
if tn in self.information_schema:
return self.information_schema[tn]
- raise Exception()
+ raise Exception(f'Table information_schema.{tableName} does not exists')
- def get_integrations_names(self):
+ def get_datasources_names(self):
+ datasource_names = self.datasource_interface.get_db_integrations().keys()
return [
- x.lower() for x in self.index if x.lower() not in ['mindsdb', 'datasource']
+ x.lower() for x in datasource_names
]
def _get_tables(self):
@@ -77,9 +80,13 @@ def _get_tables(self):
['PLUGINS', 'information_schema', 'SYSTEM VIEW', [], 'utf8mb4_0900_ai_ci'],
]
- for dsName, ds in self.index.items():
+ for ds_name, ds in self.persis_datanodes.items():
ds_tables = ds.get_tables()
- data += [[x, dsName, 'BASE TABLE', [], 'utf8mb4_0900_ai_ci'] for x in ds_tables]
+ data += [[x, ds_name, 'BASE TABLE', [], 'utf8mb4_0900_ai_ci'] for x in ds_tables]
+
+ for ds_name in self.get_datasources_names():
+ ds = self.get(ds_name)
+ data += [[x, ds_name, 'BASE TABLE', [], 'utf8mb4_0900_ai_ci'] for x in ds_tables]
df = pd.DataFrame(data, columns=columns)
return df
@@ -107,7 +114,7 @@ def _get_columns(self):
result_row[4] = i
result.append(result_row)
- mindsb_dn = self.index['MINDSDB']
+ mindsb_dn = self.get('MINDSDB')
for table_name in mindsb_dn.get_tables():
table_columns = mindsb_dn.get_table_columns(table_name)
for i, column_name in enumerate(table_columns):
@@ -127,7 +134,11 @@ def _get_schemata(self):
['def', 'information_schema', 'utf8', 'utf8_general_ci', None]
]
- for database_name in self.index:
+ for database_name in self.persis_datanodes:
+ data.append(['def', database_name, 'utf8mb4', 'utf8mb4_0900_ai_ci', None])
+
+ datasource_names = self.datasource_interface.get_db_integrations().keys()
+ for database_name in datasource_names:
data.append(['def', database_name, 'utf8mb4', 'utf8mb4_0900_ai_ci', None])
df = pd.DataFrame(data, columns=columns)
diff --git a/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py b/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py
--- a/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py
+++ b/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py
@@ -1140,6 +1140,17 @@ def query_answer(self, sql):
elif condition is not None:
raise Exception(f'Not implemented: {sql}')
+ query = SQLQuery(
+ str(new_statement),
+ session=self.session
+ )
+ self.answer_select(query)
+ return
+ elif sql_category == 'datasources':
+ new_statement = Select(
+ targets=[Star()],
+ from_table=Identifier(parts=['mindsdb', 'datasources'])
+ )
query = SQLQuery(
str(new_statement),
session=self.session
| diff --git a/tests/integration_tests/flows/test_mysql_api.py b/tests/integration_tests/flows/test_mysql_api.py
--- a/tests/integration_tests/flows/test_mysql_api.py
+++ b/tests/integration_tests/flows/test_mysql_api.py
@@ -145,27 +145,27 @@ def test_3_making_prediction(self):
self.assertTrue('rental_price' in res and 'rental_price_explain' in res,
f"error getting prediction from {predictor_name} - {res}")
-
def test_4_service_requests(self):
service_requests = [
- "show databases;",
- "show schemas;",
- "show tables;",
- "show tables from mindsdb;",
- "show full tables from mindsdb;",
- "show variables;",
- "show session status;",
- "show global variables;",
- "show engines;",
- "show warnings;",
- "show charset;",
- "show collation;",
- # "show function status where db = 'mindsdb';",
- # "show procedure status where db = 'mindsdb';",
- # "show table status like commands;",
- ]
+ "show databases;",
+ "show schemas;",
+ "show tables;",
+ "show tables from mindsdb;",
+ "show full tables from mindsdb;",
+ "show variables;",
+ "show session status;",
+ "show global variables;",
+ "show engines;",
+ "show warnings;",
+ "show charset;",
+ "show collation;",
+ "show datasources",
+ "show predictors"
+ # "show function status where db = 'mindsdb';",
+ # "show procedure status where db = 'mindsdb';",
+ # "show table status like commands;",
+ ]
for req in service_requests:
- name = "_".join(req.split(" "))
with self.subTest(msg=req):
print(f"\nExecuting {self._testMethodName} ({__name__}.{self.__class__.__name__}) [{req}]")
self.query(req)
| Sync new database integrations with clients
Atm, if the user creates a new database integration it will not be visible in the database client as Dbeaver without dissconect/connect to mindsdb. e.g if you create new integration named as `postgresql_db`
```sql
use postgresql_db; => This works
```
```sql
SELECT * FROM schema.table; => SQL Error [1149] [42000]: datanode is none
```
| 2021-12-23T12:40:38Z | [] | [] |
|
mindsdb/mindsdb | 1,879 | mindsdb__mindsdb-1879 | [
"1878"
] | 641ed41ff23585cd039352e559f4f8419d9c3dc7 | diff --git a/mindsdb/api/mysql/mysql_proxy/classes/sql_query.py b/mindsdb/api/mysql/mysql_proxy/classes/sql_query.py
--- a/mindsdb/api/mysql/mysql_proxy/classes/sql_query.py
+++ b/mindsdb/api/mysql/mysql_proxy/classes/sql_query.py
@@ -114,7 +114,7 @@ def markQueryVar(where):
elif isinstance(where, UnaryOperation):
markQueryVar(where.args[0])
elif isinstance(where, Constant):
- if where.value.startswith('$var['):
+ if str(where.value).startswith('$var['):
where.is_var = True
@@ -351,7 +351,7 @@ def _parse_query(self, sql):
predictor_metadata[model_name] = {
'timeseries': True,
'window': window,
- 'nr_predictions': ts_settings.get('nr_predictions'),
+ 'horizon': ts_settings.get('horizon'),
'order_by_column': order_by,
'group_by_columns': group_by
}
@@ -500,7 +500,7 @@ def _parse_query(self, sql):
# if is_timeseries:
# if 'LATEST' not in self.raw:
# # remove additional records from predictor results:
- # # first 'window_size' and last 'nr_prediction' records
+ # # first 'window_size' and last 'horizon' records
# # otherwise there are many unxpected rows in prediciton result:
# # ----------------------------------------------------------------------------------------
# # mysql> SELECT tb.time, tb.state, tb.pnew_case, tb.new_case from
@@ -529,11 +529,11 @@ def _parse_query(self, sql):
# # 14 rows in set (2.52 sec)
# window_size = predictor_metadata[predictor]['window']
- # nr_predictions = predictor_metadata[predictor]['nr_predictions']
- # if len(data) >= (window_size + nr_predictions):
+ # horizon = predictor_metadata[predictor]['horizon']
+ # if len(data) >= (window_size + horizon):
# data = data[window_size:]
- # if len(data) > nr_predictions and nr_predictions > 1:
- # data = data[:-nr_predictions+1]
+ # if len(data) > horizon and horizon > 1:
+ # data = data[:-horizon + 1]
data = [{(key, key): value for key, value in row.items()} for row in data]
table_name = get_preditor_alias(step, self.database)
diff --git a/mindsdb/api/mysql/mysql_proxy/classes/sql_statement_parser.py b/mindsdb/api/mysql/mysql_proxy/classes/sql_statement_parser.py
--- a/mindsdb/api/mysql/mysql_proxy/classes/sql_statement_parser.py
+++ b/mindsdb/api/mysql/mysql_proxy/classes/sql_statement_parser.py
@@ -329,7 +329,7 @@ def parse_as_create_predictor(self) -> dict:
+ Optional(ORDER_BY + delimitedList(order_item, delim=',')('order_by'))
+ Optional(GROUP_BY + delimitedList(word | QuotedString("`"), delim=',')('group_by'))
+ Optional(WINDOW + s_int('window'))
- + Optional(HORIZON + s_int('nr_predictions'))
+ + Optional(HORIZON + s_int('horizon'))
+ Optional(
(USING + delimitedList(using_item, delim=',')('using'))
| (USING + originalTextFor(nestedExpr('{', '}'))('using'))
@@ -541,7 +541,7 @@ def check_recursive(a, b):
'order_by': ['f_order_1', 'f_order_2', 'f_order_3'],
'group_by': ['f_group_1', 'f_group_2'],
'window': 100,
- 'nr_predictions': 7,
+ 'horizon': 7,
'using': {'x': 1, 'y': 'a'}
}
], [
diff --git a/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/mindsdb_datanode.py b/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/mindsdb_datanode.py
--- a/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/mindsdb_datanode.py
+++ b/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/mindsdb_datanode.py
@@ -287,7 +287,7 @@ def select(self, table, columns=None, where=None, where_data=None, order_by=None
predict = model['predict']
group_by = timeseries_settings['group_by']
order_by_column = timeseries_settings['order_by'][0]
- nr_predictions = timeseries_settings['nr_predictions']
+ horizon = timeseries_settings['horizon']
groups = set()
for row in pred_dicts:
@@ -329,18 +329,18 @@ def select(self, table, columns=None, where=None, where_data=None, order_by=None
date_values = [date_values]
for i in range(len(rows) - 1):
- if nr_predictions > 1:
+ if horizon > 1:
rows[i][predict] = rows[i][predict][0]
rows[i][order_by_column] = rows[i][order_by_column][0]
for col in ('predicted_value', 'confidence', 'confidence_lower_bound', 'confidence_upper_bound'):
- if nr_predictions > 1:
+ if horizon > 1:
explanations[i][predict][col] = explanations[i][predict][col][0]
last_row = rows.pop()
last_explanation = explanations.pop()
- for i in range(nr_predictions):
+ for i in range(horizon):
new_row = copy.deepcopy(last_row)
- if nr_predictions > 1:
+ if horizon > 1:
new_row[predict] = new_row[predict][i]
new_row[order_by_column] = new_row[order_by_column][i]
if '__mindsdb_row_id' in new_row and (i > 0 or __mdb_make_predictions is False):
@@ -349,7 +349,7 @@ def select(self, table, columns=None, where=None, where_data=None, order_by=None
new_explanation = copy.deepcopy(last_explanation)
for col in ('predicted_value', 'confidence', 'confidence_lower_bound', 'confidence_upper_bound'):
- if nr_predictions > 1:
+ if horizon > 1:
new_explanation[predict][col] = new_explanation[predict][col][i]
if i != 0:
new_explanation[predict]['anomaly'] = None
diff --git a/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py b/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py
--- a/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py
+++ b/mindsdb/api/mysql/mysql_proxy/mysql_proxy.py
@@ -559,7 +559,7 @@ def answer_create_predictor(self, statement):
if statement.window is not None:
struct['window'] = statement.window
if statement.horizon is not None:
- struct['nr_predictions'] = statement.horizon
+ struct['horizon'] = statement.horizon
model_interface = self.session.model_interface
data_store = self.session.data_store
@@ -583,7 +583,7 @@ def answer_create_predictor(self, statement):
ds_data = data_store.get_datasource(ds_name)
timeseries_settings = {}
- for w in ['order_by', 'group_by', 'window', 'nr_predictions']:
+ for w in ['order_by', 'group_by', 'window', 'horizon']:
if w in struct:
timeseries_settings[w] = struct.get(w)
| diff --git a/tests/integration_tests/flows/test_kafka.py b/tests/integration_tests/flows/test_kafka.py
--- a/tests/integration_tests/flows/test_kafka.py
+++ b/tests/integration_tests/flows/test_kafka.py
@@ -114,7 +114,7 @@ def train_ts_predictor(self, ds_name, predictor_name):
'timeseries_settings': {
"order_by": ["order"],
"group_by": ["group"],
- "nr_predictions": 1,
+ "horizon": 1,
"use_previous_target": True,
"window": 10
},
diff --git a/tests/integration_tests/flows/test_redis.py b/tests/integration_tests/flows/test_redis.py
--- a/tests/integration_tests/flows/test_redis.py
+++ b/tests/integration_tests/flows/test_redis.py
@@ -106,7 +106,7 @@ def train_predictor(self, ds_name, predictor_name):
def train_ts_predictor(self, ds_name, predictor_name, with_gb=True):
ts_settings = {
"order_by": ["order"],
- "nr_predictions": 1,
+ "horizon": 1,
"use_previous_target": True,
"window": 10}
| Rename `nr_prediction` to `horizon`
| 2022-01-21T09:27:15Z | [] | [] |
|
mindsdb/mindsdb | 1,923 | mindsdb__mindsdb-1923 | [
"1922"
] | 10a5300838e4ae45d42495fdf53d76c702f66518 | diff --git a/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/integration_datanode.py b/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/integration_datanode.py
--- a/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/integration_datanode.py
+++ b/mindsdb/api/mysql/mysql_proxy/datahub/datanodes/integration_datanode.py
@@ -1,14 +1,17 @@
import pandas as pd
+from mindsdb_sql.render.sqlalchemy_render import SqlalchemyRender
from mindsdb.api.mysql.mysql_proxy.datahub.datanodes.datanode import DataNode
+from mindsdb.utilities.log import log
class IntegrationDataNode(DataNode):
type = 'integration'
- def __init__(self, integration_name, data_store):
+ def __init__(self, integration_name, data_store, ds_type):
self.integration_name = integration_name
self.data_store = data_store
+ self.ds_type = ds_type
def get_type(self):
return self.type
@@ -23,9 +26,18 @@ def get_table_columns(self, tableName):
return []
def select(self, query):
- sql_query = str(query)
-
- dso, _creation_info = self.data_store.create_datasource(self.integration_name, {'query': sql_query})
+ if self.ds_type in ('postgres', 'snowflake'):
+ dialect = 'postgres'
+ else:
+ dialect = 'mysql'
+ render = SqlalchemyRender(dialect)
+ try:
+ query_str = render.get_string(query, with_failback=False)
+ except Exception as e:
+ log.error(f"Exception during query casting to '{dialect}' dialect. Query: {query}. Error: {e}")
+ query_str = render.get_string(query, with_failback=True)
+
+ dso, _creation_info = self.data_store.create_datasource(self.integration_name, {'query': query_str})
data = dso.df.to_dict(orient='records')
column_names = list(dso.df.columns)
diff --git a/mindsdb/api/mysql/mysql_proxy/datahub/information_schema.py b/mindsdb/api/mysql/mysql_proxy/datahub/information_schema.py
--- a/mindsdb/api/mysql/mysql_proxy/datahub/information_schema.py
+++ b/mindsdb/api/mysql/mysql_proxy/datahub/information_schema.py
@@ -58,7 +58,8 @@ def get(self, name):
datasource_names = self.datasource_interface.get_db_integrations().keys()
for datasource_name in datasource_names:
if datasource_name.lower() == name_lower:
- return IntegrationDataNode(datasource_name, self.data_store)
+ datasource = self.datasource_interface.get_db_integration(name=datasource_name)
+ return IntegrationDataNode(datasource_name, self.data_store, ds_type=datasource['type'])
return None
| diff --git a/tests/integration_tests/flows/test_http.py b/tests/integration_tests/flows/test_http.py
--- a/tests/integration_tests/flows/test_http.py
+++ b/tests/integration_tests/flows/test_http.py
@@ -240,9 +240,10 @@ def test_94_edit_json_ai(self):
# Edit it
json_ai = predictor_data['json_ai']
json_ai['problem_definition']
- mixers = json_ai['outputs']['rental_price']['mixers']
+ mixers = json_ai['model']['args']['submodels']
keep_only = [x for x in mixers if x['module'] != 'Regression']
- json_ai['outputs']['rental_price']['mixers'] = keep_only
+ json_ai['model']['args']['submodels'] = keep_only
+
# Upload it
r = requests.put(
| Render queries to external DBs considering its syntax
different databases have different syntax. At this moment we render all queries using mysql syntax
| 2022-02-02T11:49:05Z | [] | [] |
|
mindsdb/mindsdb | 2,472 | mindsdb__mindsdb-2472 | [
"2312"
] | 054c13525d72618e1ee210079ce4554bc688864a | diff --git a/mindsdb/integrations/handlers/ckan_handler/__about__.py b/mindsdb/integrations/handlers/ckan_handler/__about__.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/integrations/handlers/ckan_handler/__about__.py
@@ -0,0 +1,9 @@
+__title__ = 'MindsDB CKAN handler'
+__package_name__ = 'mindsdb_ckan_handler'
+__version__ = '0.0.1'
+__description__ = "MindsDB handler for CKAN"
+__author__ = 'Konstantin Sivakov'
+__github__ = 'https://github.com/mindsdb/mindsdb'
+__pypi__ = 'https://pypi.org/project/mindsdb/'
+__license__ = 'GPL-3.0'
+__copyright__ = 'Copyright 2022- mindsdb'
diff --git a/mindsdb/integrations/handlers/ckan_handler/__init__.py b/mindsdb/integrations/handlers/ckan_handler/__init__.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/integrations/handlers/ckan_handler/__init__.py
@@ -0,0 +1,19 @@
+from mindsdb.integrations.libs.const import HANDLER_TYPE
+
+from mindsdb.integrations.handlers.ckan_handler.__about__ import __version__ as version, __description__ as description
+try:
+ from mindsdb.integrations.handlers.ckan_handler import CkanHandler as Handler
+ import_error = None
+except Exception as e:
+ Handler = None
+ import_error = e
+
+title = 'CKAN'
+name = 'ckan'
+type = HANDLER_TYPE.DATA
+icon_path = 'icon.svg'
+
+__all__ = [
+ 'Handler', 'version', 'name', 'type', 'title',
+ 'description', 'import_error', 'icon_path'
+]
diff --git a/mindsdb/integrations/handlers/ckan_handler/ckan_handler.py b/mindsdb/integrations/handlers/ckan_handler/ckan_handler.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/integrations/handlers/ckan_handler/ckan_handler.py
@@ -0,0 +1,99 @@
+from ckanapi import RemoteCKAN as rc
+import pandas as pd
+
+from mindsdb.integrations.libs.base_handler import DatabaseHandler
+from mindsdb.integrations.libs.response import HandlerStatusResponse, HandlerResponse, RESPONSE_TYPE
+from mindsdb_sql.parser.ast.base import ASTNode
+from mindsdb_sql.render.sqlalchemy_render import SqlalchemyRender
+from mindsdb.utilities.log import log
+
+
+class CkanHandler(DatabaseHandler):
+ """
+ This handler handles connection and consumption of the CKAN API.
+ """
+ name = "ckan"
+
+ def __init__(self, name=None, **kwargs):
+ super().__init__(name)
+ self.type = 'ckan'
+ self.name = name
+ self.connection_args = kwargs.get('connection_data')
+ self.ckan = None
+ self.dialect = 'postgresql'
+ self.renderer = SqlalchemyRender('postgres')
+ self.connection = None
+ self.is_connected = False
+
+ def connect(self) -> HandlerStatusResponse:
+ """
+ Handles the connection to a CKAN remote portal instance.
+ """
+ url = self.connection_args.get('url')
+ try:
+ ckan = rc(url)
+ self.is_connected = True
+ self.ckan = ckan
+ except Exception as e:
+ return HandlerStatusResponse(False, f'Failed to connect to CKAN: {e}')
+ self.connection = ckan
+ return HandlerStatusResponse(True)
+
+ def disconnect(self):
+ self.is_connected = False
+
+ def check_connection(self) -> HandlerStatusResponse:
+ response = HandlerStatusResponse(False)
+ try:
+ self.connect()
+ result = self.ckan.action.status_show()
+ if 'datastore' not in result.get('extensions'):
+ """
+ If the CKAN instance does not have the datastore extension,
+ we can't use it.
+ """
+ response.message = 'CKAN datastore is not enabled'
+ response.status = False
+ self.is_connected = False
+ return response
+
+ except Exception as e:
+ log.error(f'Error connecting to CKAN: {e}!')
+ self.is_connected = False
+ response.error_message = e
+
+ if response.success is False and self.is_connected is True:
+ self.is_connected = False
+
+ response.success = True
+ return response
+
+ def query(self, query: ASTNode) -> HandlerResponse:
+ if not self.ckan:
+ self.connect()
+ query_str = self.renderer.get_string(query, with_failback=True)
+ return self.native_query(query_str)
+
+ def native_query(self, query_str: str) -> HandlerResponse:
+ """
+ Execute a query on the CKAN instance.
+ :param query_str: The query to execute.
+ :return: The response of the query.
+ """
+ if not self.ckan:
+ self.connect()
+ result = self.ckan.action.datastore_search_sql(sql=query_str)
+ if len(result.get('records')) > 0:
+ df = pd.DataFrame(result['records'])
+ response = HandlerResponse(RESPONSE_TYPE.TABLE, df)
+ else:
+ response = HandlerResponse(RESPONSE_TYPE.TABLE, None)
+ return response
+
+ def get_tables(self) -> HandlerResponse:
+ if not self.ckan:
+ self.connect()
+ result = self.ckan.action.datastore_search(resource_id='_table_metadata')
+ df = pd.DataFrame(result['records'])
+ response = HandlerResponse(RESPONSE_TYPE.TABLE, df)
+ return response
| diff --git a/mindsdb/integrations/handlers/ckan_handler/tests/__init__.py b/mindsdb/integrations/handlers/ckan_handler/tests/__init__.py
new file mode 100644
diff --git a/mindsdb/integrations/handlers/ckan_handler/tests/test_ckan_handler.py b/mindsdb/integrations/handlers/ckan_handler/tests/test_ckan_handler.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/integrations/handlers/ckan_handler/tests/test_ckan_handler.py
@@ -0,0 +1,28 @@
+import unittest
+
+from mindsdb.integrations.handlers.ckan_handler.ckan_handler import CkanHandler
+from mindsdb.api.mysql.mysql_proxy.libs.constants.response_type import RESPONSE_TYPE
+
+
+class CkanHandlerTest(unittest.TestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ cls.kwargs = {
+ "connection_data": {
+ "url": "http://demo.ckan.org/"}
+ }
+ cls.handler = CkanHandler('test_ckan_handler', **cls.kwargs)
+
+ def test_connect(self):
+ self.handler.connect()
+
+ def test_query(self):
+ self.handler.query("SELECT * from 'b53c9e72-6b59-4cda-8c0c-7d6a51dad12a'")
+
+ def test_disconnect(self):
+ self.handler.disconnect()
+
+ def test_get_tables(self):
+ tables = self.handler.get_tables()
+ assert tables['type'] is not RESPONSE_TYPE.ERROR
| [New Integration]: Integration with CKAN
### Is there an existing integration?
- [X] I have searched the existing integrations.
### Use Case
CKAN is an open-source DMS (data management system) for powering data hubs and data portals. It not only stores the data, but makes it discoverable and presentable. Having integration with the ML for its DataStore would be additon for better data presentations, visualizations and makes it smarter
### Motivation
Having this integration implemented will bring the ML capabilities into the CKAN ecosystem without thee needs of use to external tools or libraries.
### Implementation
_No response_
### Anything else?
_No response_
| 2022-07-11T16:08:59Z | [] | [] |
|
mindsdb/mindsdb | 2,534 | mindsdb__mindsdb-2534 | [
"2526"
] | adec9650b4e2e5aa95d93f8d33ad95901be44af5 | diff --git a/mindsdb/api/http/initialize.py b/mindsdb/api/http/initialize.py
--- a/mindsdb/api/http/initialize.py
+++ b/mindsdb/api/http/initialize.py
@@ -8,6 +8,8 @@
from pathlib import Path
import traceback
import tempfile
+import mimetypes
+
# import concurrent.futures
from flask import Flask, url_for, make_response
from flask.json import dumps
@@ -214,20 +216,23 @@ def update_static():
def initialize_flask(config, init_static_thread, no_studio):
- # Apparently there's a bug that causes the static path not to work if it's '/' -- https://github.com/pallets/flask/issues/3134, I think '' should achieve the same thing (???)
- if no_studio:
- app = Flask(
- __name__
- )
- else:
+ # region required for windows https://github.com/mindsdb/mindsdb/issues/2526
+ mimetypes.add_type('text/css', '.css')
+ mimetypes.add_type('text/javascript', '.js')
+ # endregion
+
+ kwargs = {}
+ if no_studio is not True:
static_path = os.path.join(config['paths']['static'], 'static/')
if os.path.isabs(static_path) is False:
static_path = os.path.join(os.getcwd(), static_path)
- app = Flask(
- __name__,
- static_url_path='/static',
- static_folder=static_path
- )
+ kwargs['static_url_path'] = '/static'
+ kwargs['static_folder'] = static_path
+
+ app = Flask(
+ __name__,
+ **kwargs
+ )
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 60
app.config['SWAGGER_HOST'] = 'http://localhost:8000/mindsdb'
diff --git a/mindsdb/api/http/namespaces/config.py b/mindsdb/api/http/namespaces/config.py
--- a/mindsdb/api/http/namespaces/config.py
+++ b/mindsdb/api/http/namespaces/config.py
@@ -126,7 +126,7 @@ def put(self, name):
pass
stream_controller = StreamController(request.company_id)
- if params.get('type') in stream_controller.known_dbs and params.get('publish', False) is True:
+ if engine in stream_controller.known_dbs and params.get('publish', False) is True:
stream_controller.setup(name)
except Exception as e:
log.error(str(e))
diff --git a/mindsdb/api/http/namespaces/stream.py b/mindsdb/api/http/namespaces/stream.py
--- a/mindsdb/api/http/namespaces/stream.py
+++ b/mindsdb/api/http/namespaces/stream.py
@@ -56,7 +56,7 @@ def put(self, name):
if integration is None:
return abort(404, 'Integration "{}" doesn\'t exist'.format(params['integration']))
- if integration['type'] not in STREAM_INTEGRATION_TYPES:
+ if integration['engine'] not in STREAM_INTEGRATION_TYPES:
return abort(400, 'Integration "{}" is not of type [{}]'.format(
params['integration'],
'/'.join(STREAM_INTEGRATION_TYPES)
@@ -64,8 +64,8 @@ def put(self, name):
else:
# cloud
- if 'type' not in params_keys:
- return abort(404, "'type' parameter is required in case of cloud.")
+ if 'engine' not in params_keys:
+ return abort(404, "'engine' parameter is required in case of cloud.")
# because '_' is not allowed in pod name - replace it.
name = name.replace('_', '-')
@@ -83,7 +83,7 @@ def put(self, name):
stream_in=params['stream_in'],
stream_out=params['stream_out'],
anomaly_stream=params.get('anomaly_stream'),
- type=params.get('type'),
+ type=params.get('engine'),
connection_info=params.get('connection'),
learning_params=params.get('learning_params', {}),
learning_threshold=params.get('learning_threshold', 0)
diff --git a/mindsdb/interfaces/stream/redis/redisdb.py b/mindsdb/interfaces/stream/redis/redisdb.py
--- a/mindsdb/interfaces/stream/redis/redisdb.py
+++ b/mindsdb/interfaces/stream/redis/redisdb.py
@@ -25,12 +25,13 @@ def check_connection(self):
class Redis(StreamIntegration, RedisConnectionChecker):
def __init__(self, config, name, db_info):
- self.connection_info = db_info['connection']
+ connection_data = db_info['connection_data']
+ self.connection_info = connection_data['connection']
# Back compatibility with initial API version
- self.control_stream = db_info.get('control_stream') or db_info.get('stream') or None
- if 'advanced' in db_info:
- self.connection_info['advanced'] = db_info['advanced']
+ self.control_stream = connection_data.get('control_stream') or connection_data.get('stream') or None
+ if 'advanced' in connection_data:
+ self.connection_info['advanced'] = connection_data['advanced']
StreamIntegration.__init__(
self,
| diff --git a/tests/integration_tests/flows/test_redis.py b/tests/integration_tests/flows/test_redis.py
--- a/tests/integration_tests/flows/test_redis.py
+++ b/tests/integration_tests/flows/test_redis.py
@@ -132,9 +132,12 @@ def test_1_create_integration(self):
print(f"\nExecuting {self._testMethodName}")
url = f'{HTTP_API_ROOT}/config/integrations/{INTEGRATION_NAME}'
- params = {"type": "redis",
- "connection": CONNECTION_PARAMS,
- "control_stream": CONTROL_STREAM}
+ params = {
+ "type": "redis",
+ "publish": True,
+ "connection": CONNECTION_PARAMS,
+ "control_stream": CONTROL_STREAM
+ }
res = requests.put(url, json={"params": params})
self.assertEqual(res.status_code, 200)
| [Bug]: wrong content type for static files at windows
at windows user got `text/plain` content type for js files
| 2022-07-20T11:12:13Z | [] | [] |
|
mindsdb/mindsdb | 2,557 | mindsdb__mindsdb-2557 | [
"2330"
] | 315bd684938a81635f15f3f07dd73c5021ad2faf | diff --git a/mindsdb/integrations/handlers/oracle_handler/__about__.py b/mindsdb/integrations/handlers/oracle_handler/__about__.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/integrations/handlers/oracle_handler/__about__.py
@@ -0,0 +1,9 @@
+__title__ = "MindsDB Oracle Database handler"
+__package_name__ = "mindsdb_oracle_handler"
+__version__ = "0.0.1"
+__description__ = "MindsDB handler for Oracle Database"
+__author__ = "MindsDB Inc"
+__github__ = "https://github.com/mindsdb/mindsdb"
+__pypi__ = "https://pypi.org/project/mindsdb/"
+__license__ = "GPL-3.0"
+__copyright__ = "Copyright 2022- mindsdb"
diff --git a/mindsdb/integrations/handlers/oracle_handler/__init__.py b/mindsdb/integrations/handlers/oracle_handler/__init__.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/integrations/handlers/oracle_handler/__init__.py
@@ -0,0 +1,32 @@
+from mindsdb.integrations.libs.const import HANDLER_TYPE
+
+try:
+ from .oracle_handler import (
+ OracleHandler as Handler,
+ connection_args_example,
+ connection_args,
+ )
+
+ import_error = None
+except Exception as e:
+ Handler = None
+ import_error = e
+from .__about__ import __version__ as version, __description__ as description
+
+title = "Oracle DB"
+name = "oracle"
+type = HANDLER_TYPE.DATA
+icon_path = "icon.svg"
+
+__all__ = [
+ "Handler",
+ "version",
+ "name",
+ "type",
+ "title",
+ "description",
+ "connection_args",
+ "connection_args_example",
+ "import_error",
+ "icon_path",
+]
diff --git a/mindsdb/integrations/handlers/oracle_handler/oracle_handler.py b/mindsdb/integrations/handlers/oracle_handler/oracle_handler.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/integrations/handlers/oracle_handler/oracle_handler.py
@@ -0,0 +1,196 @@
+from typing import Optional
+from collections import OrderedDict
+
+import pandas as pd
+import oracledb
+from oracledb import connect, Connection, makedsn
+
+from mindsdb_sql.render.sqlalchemy_render import SqlalchemyRender
+from mindsdb_sql.parser.ast.base import ASTNode
+
+from mindsdb.utilities.log import log
+from mindsdb.integrations.libs.base_handler import DatabaseHandler
+from mindsdb.integrations.libs.response import (
+ HandlerStatusResponse as StatusResponse,
+ HandlerResponse as Response,
+ RESPONSE_TYPE,
+)
+from mindsdb.integrations.libs.const import HANDLER_CONNECTION_ARG_TYPE as ARG_TYPE
+
+oracledb.defaults.fetch_lobs = False # return LOBs directly as strings or bytes
+
+
+class OracleHandler(DatabaseHandler):
+ """
+ This handler handles connection and execution of the Microsoft SQL Server statements.
+ """
+
+ name = "oracle"
+
+ def __init__(self, name: str, connection_data: Optional[dict], **kwargs):
+ super().__init__(name)
+ self.host = connection_data.get("host")
+ self.port = int(connection_data.get("port") or 1521)
+ self.sid = connection_data.get("sid")
+ self.service_name = connection_data.get("service_name")
+ self.user = connection_data.get("user")
+ self.password = connection_data.get("password")
+
+ if self.sid is None and self.service_name is None:
+ raise ValueError("Either 'sid' or 'service_name' must be given")
+ if self.sid and self.service_name:
+ raise ValueError("Only one of 'sid' or 'service_name' must be given")
+
+ if self.sid:
+ self.dsn = makedsn(host=self.host, port=self.port, sid=self.sid)
+ else:
+ self.dsn = makedsn(host=self.host, port=self.port, service_name=self.service_name)
+
+ self.connection = None
+ self.is_connected = False
+
+ def connect(self) -> Connection:
+ if self.is_connected is True:
+ return self.connection
+
+ connection = connect(user=self.user, password=self.password, dsn=self.dsn)
+
+ self.is_connected = True
+ self.connection = connection
+ return self.connection
+
+ def disconnect(self):
+ if self.is_connected:
+ self.connection.close()
+ self.is_connected = False
+ return
+
+ def check_connection(self) -> StatusResponse:
+ """
+ Check the connection of the database
+ :return: success status and error message if error occurs
+ """
+
+ response = StatusResponse(False)
+ need_to_close = self.is_connected is False
+
+ try:
+ con = self.connect()
+ con.ping()
+ response.success = True
+ except Exception as e:
+ log.error(f"Error connecting to Oracle DB {self.dsn}, {e}!")
+ response.error_message = str(e)
+ finally:
+ if response.success is True and need_to_close:
+ self.disconnect()
+ if response.success is False and self.is_connected is True:
+ self.is_connected = False
+ return response
+
+ def native_query(self, query: str) -> Response:
+ """
+ Receive SQL query and runs it
+ :param query: The SQL query to run
+ :return: returns the records from the current recordset
+ """
+ need_to_close = self.is_connected is False
+
+ connection = self.connect()
+ with connection.cursor() as cur:
+ try:
+ cur.execute(query)
+ result = cur.fetchall()
+ if result:
+ response = Response(
+ RESPONSE_TYPE.TABLE,
+ data_frame=pd.DataFrame(
+ result,
+ columns=[row[0] for row in cur.description],
+ ),
+ )
+ else:
+ response = Response(RESPONSE_TYPE.OK)
+
+ connection.commit()
+ except Exception as e:
+ log.error(f"Error running query: {query} on {self.dsn}!")
+ response = Response(
+ RESPONSE_TYPE.ERROR,
+ error_message=str(e),
+ )
+ connection.rollback()
+
+ if need_to_close is True:
+ self.disconnect()
+
+ return response
+
+ def query(self, query: ASTNode) -> Response:
+ """
+ Retrieve the data from the SQL statement.
+ """
+ renderer = SqlalchemyRender("oracle")
+ query_str = renderer.get_string(query, with_failback=True)
+ return self.native_query(query_str)
+
+ def get_tables(self) -> Response:
+ """
+ List all tables in Oracle DB owned by the current user.
+ """
+ query = """
+ SELECT table_name
+ FROM user_tables
+ ORDER BY 1;
+ """
+ return self.native_query(query)
+
+ def get_columns(self, table_name: str) -> Response:
+ """
+ Show details about the table.
+ """
+ query = f"""
+ SELECT
+ column_name,
+ data_type
+ FROM USER_TAB_COLUMNS
+ WHERE table_name = '{table_name}'
+ """
+ result = self.native_query(query)
+ return result
+
+
+connection_args = OrderedDict(
+ host={
+ "type": ARG_TYPE.STR,
+ "description": "The host name or IP address of the Oracle DB.",
+ },
+ port={
+ "type": ARG_TYPE.INT,
+ "description": "The TCP/IP port of the Oracle DB. Must be an integer. Default 1521.",
+ },
+ sid={
+ "type": ARG_TYPE.STR,
+ "description": "The site identifier of the Oracle DB. Either sid or service_name should be provided.",
+ },
+ service_name={
+ "type": ARG_TYPE.STR,
+ "description": "The name of the Oracle DB service. Either sid or service_name should be provided.",
+ },
+ user={
+ "type": ARG_TYPE.STR,
+ "description": "The user name used to authenticate against the Oracle DB.",
+ },
+ password={
+ "type": ARG_TYPE.STR,
+ "description": "The password to authenticate the user against Oracle DB.",
+ },
+)
+
+connection_args_example = OrderedDict(
+ host="127.0.0.1",
+ port=1521,
+ user="admin",
+ password="password",
+ sid="ORCL",
+)
| diff --git a/mindsdb/integrations/handlers/oracle_handler/tests/__init__.py b/mindsdb/integrations/handlers/oracle_handler/tests/__init__.py
new file mode 100644
diff --git a/mindsdb/integrations/handlers/oracle_handler/tests/test_oracle_handler.py b/mindsdb/integrations/handlers/oracle_handler/tests/test_oracle_handler.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/integrations/handlers/oracle_handler/tests/test_oracle_handler.py
@@ -0,0 +1,32 @@
+import unittest
+from mindsdb.integrations.handlers.oracle_handler.oracle_handler import OracleHandler
+from mindsdb.api.mysql.mysql_proxy.libs.constants.response_type import RESPONSE_TYPE
+
+
+class OracleHandlerTest(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ cls.kwargs = {
+ "host": "127.0.0.1",
+ "port": "1521",
+ "user": "admin",
+ "password": "password",
+ "sid": "ORCL"
+ }
+ cls.handler = OracleHandler('test_oracle_handler', **cls.kwargs)
+
+ def test_0_check_connection(self):
+ assert self.handler.check_connection()
+
+ def test_1_native_query_select(self):
+ query = "SELECT * FROM DUAL"
+ result = self.handler.native_query(query)
+ assert result.type is RESPONSE_TYPE.TABLE
+
+ def test_2_get_tables(self):
+ tables = self.handler.get_tables()
+ assert tables.type is not RESPONSE_TYPE.ERROR
+
+ def test_4_get_columns(self):
+ columns = self.handler.get_columns('customers')
+ assert columns.type is not RESPONSE_TYPE.ERROR
| [New Integration]: 👀👀 Oracle Database Integration 👍👍
### Is there an existing integration?
- [X] I have searched the existing integrations.
### Use Case
## Oracle is the most used Database in the Industry
As it is the most popular SQL database it should be in the integration of MindsDB.
- It would help developers to implement In database AI.
- It would make a headache for developers to implement ML.
### Motivation
As a developer, I think for my developers **`Friends`** and their problems to implement AI.
As most of the industry is using oracle as it is `easy + popular` database .
And it would be great to get a model with an `account + few clicks on the buttons` and ready to predict and use AI Tables in Database.
### Implementation
_No response_
### Anything else?
_No response_
| I'd like to work on this issue, could you assign it to me pls?
Sure @eskarimov to start you can check https://docs.mindsdb.com/contribute/integrations/ and review the code of the handlers https://github.com/mindsdb/mindsdb/tree/staging/mindsdb/integrations/handlers. If you have more questions we can discuss this on slack | 2022-07-25T07:52:44Z | [] | [] |
mindsdb/mindsdb | 2,567 | mindsdb__mindsdb-2567 | [
"1170"
] | d6ca2240de59ae730bd25d08cef73f22356dce3c | diff --git a/mindsdb/integrations/handlers/db2_handler/__about__.py b/mindsdb/integrations/handlers/db2_handler/__about__.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/integrations/handlers/db2_handler/__about__.py
@@ -0,0 +1,9 @@
+__title__ = 'MindsDB IBM DB2 handler'
+__package_name__ = 'mindsdb_db2_handler'
+__version__ = '0.0.1'
+__description__ = "MindsDB handler for IBM DB2"
+__author__ = 'MindsDB Inc'
+__github__ = 'https://github.com/mindsdb/mindsdb'
+__pypi__ = 'https://pypi.org/project/mindsdb/'
+__license__ = 'GPL-3.0'
+__copyright__ = 'Copyright 2022- mindsdb'
diff --git a/mindsdb/integrations/handlers/db2_handler/__init__.py b/mindsdb/integrations/handlers/db2_handler/__init__.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/integrations/handlers/db2_handler/__init__.py
@@ -0,0 +1,20 @@
+from mindsdb.integrations.libs.const import HANDLER_TYPE
+
+try:
+ from .db2_handler import DB2Handler as Handler
+ import_error = None
+except Exception as e:
+ Handler = None
+ import_error = e
+from .__about__ import __version__ as version, __description__ as description
+
+
+title = 'IBM DB2'
+name = 'db2'
+type = HANDLER_TYPE.DATA
+icon_path = 'icon.png'
+
+__all__ = [
+ 'Handler', 'version', 'name', 'type', 'title', 'description',
+ 'import_error', 'icon_path'
+]
diff --git a/mindsdb/integrations/handlers/db2_handler/db2_handler.py b/mindsdb/integrations/handlers/db2_handler/db2_handler.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/integrations/handlers/db2_handler/db2_handler.py
@@ -0,0 +1,299 @@
+from collections import OrderedDict
+from typing import Optional
+from mindsdb_sql.parser.ast.base import ASTNode
+from mindsdb.integrations.libs.base_handler import DatabaseHandler
+from mindsdb.utilities.log import log
+from mindsdb_sql import parse_sql
+from mindsdb_sql.render.sqlalchemy_render import SqlalchemyRender
+from mindsdb.integrations.libs.response import (
+ HandlerStatusResponse as StatusResponse,
+ HandlerResponse as Response,
+ RESPONSE_TYPE
+)
+from mindsdb.integrations.libs.const import HANDLER_CONNECTION_ARG_TYPE as ARG_TYPE
+
+
+
+import pandas as pd
+import ibm_db_dbi as love
+
+from ibm_db_sa.ibm_db import DB2Dialect_ibm_db as DB2Dialect
+
+
+
+
+class DB2Handler(DatabaseHandler):
+
+
+ name= 'DB2'
+
+ def __init__(self, name: str, connection_data: Optional[dict], **kwargs):
+ """ Initialize the handler
+ Args:
+ name (str): name of particular handler instance
+ connection_data (dict): parameters for connecting to the database
+ **kwargs: arbitrary keyword arguments.
+ """
+ super().__init__(name)
+
+ self.kwargs = kwargs
+ self.parser = parse_sql
+ self.driver = "{IBM DB2 ODBC DRIVER}"
+ self.database = connection_data['database']
+ self.user = connection_data['user']
+ self.password = connection_data['password']
+ self.schemaName = connection_data['schema_name']
+ self.host = connection_data['host']
+ self.port = connection_data['port']
+ self.connString = (
+ "DRIVER={0};"
+ "DATABASE={1};"
+ "HOST={2};"
+ "PORT={3};"
+ "PROTOCOL={4};"
+ "UID={5};"
+ "PWD={6};").format(self.driver, self.database, self.host, self.port,"TCPIP" , self.user, self.password)
+
+
+
+
+ self.connection = None
+ self.is_connected = False
+
+
+ def connect(self):
+ """ Set up any connections required by the handler
+ Should return output of check_connection() method after attempting
+ connection. Should switch self.is_connected.
+ Returns:
+ Connection Object
+ """
+ if self.is_connected is True:
+ return self.connection
+
+ try:
+ self.connection = love.pconnect(self.connString,'','')
+
+ self.is_connected= True
+ except Exception as e:
+ log.error(f"Error while connecting to {self.database}, {e}")
+
+
+ return self.connection
+
+
+ def disconnect(self):
+ """ Close any existing connections
+ Should switch self.is_connected.
+ """
+ if self.is_connected is False:
+ return
+ try:
+ self.connection.close()
+ except Exception as e:
+ log.error(f"Error while disconnecting to {self.database}, {e}")
+
+ return
+
+
+ def check_connection(self) -> StatusResponse:
+ """ Check connection to the handler
+ Returns:
+ HandlerStatusResponse
+ """
+ responseCode = StatusResponse(False)
+ need_to_close = self.is_connected is False
+
+ try:
+ self.connect()
+ responseCode.success = True
+ except Exception as e:
+ log.error(f'Error connecting to database {self.database}, {e}!')
+ responseCode.error_message = str(e)
+ finally:
+ if responseCode.success is True and need_to_close:
+ self.disconnect()
+ if responseCode.success is False and self.is_connected is True:
+ self.is_connected = False
+
+ return responseCode
+
+
+ def native_query(self, query: str) -> StatusResponse:
+ """Receive raw query and act upon it somehow.
+ Args:
+ query (Any): query in native format (str for sql databases,
+ dict for mongo, etc)
+ Returns:
+ HandlerResponse
+ """
+ need_to_close = self.is_connected is False
+ query=query.upper()
+ conn = self.connect()
+ with conn.cursor() as cur:
+ try:
+ cur.execute(query)
+
+ if cur._result_set_produced :
+ result = cur.fetchall()
+ response = Response(
+ RESPONSE_TYPE.TABLE,
+ data_frame=pd.DataFrame(
+ result,
+ columns=[x[0] for x in cur.description]
+ )
+ )
+ else:
+ response = Response(RESPONSE_TYPE.OK)
+ self.connection.commit()
+ except Exception as e:
+ log.error(f'Error running query: {query} on {self.database}!')
+ response = Response(
+ RESPONSE_TYPE.ERROR,
+ error_message=str(e)
+ )
+ self.connection.rollback()
+
+ if need_to_close is True:
+ self.disconnect()
+
+ return response
+
+
+ def query(self, query: ASTNode) -> StatusResponse:
+ """Receive query as AST (abstract syntax tree) and act upon it somehow.
+ Args:
+ query (ASTNode): sql query represented as AST. May be any kind
+ of query: SELECT, INTSERT, DELETE, etc
+ Returns:
+ HandlerResponse
+ """
+
+
+
+ renderer = SqlalchemyRender(DB2Dialect)
+ query_str = renderer.get_string(query, with_failback=True)
+ return self.native_query(query_str)
+
+
+ def get_tables(self) -> StatusResponse:
+ """ Return list of entities
+ Return list of entities that will be accesible as tables.
+ Returns:
+ HandlerResponse: shoud have same columns as information_schema.tables
+ (https://dev.mysql.com/doc/refman/8.0/en/information-schema-tables-table.html)
+ Column 'TABLE_NAME' is mandatory, other is optional.
+ """
+ self.connect()
+
+
+ result=self.connection.tables(self.schemaName)
+ try:
+ if result:
+ response = Response(
+ RESPONSE_TYPE.TABLE,
+ data_frame=pd.DataFrame(
+ [result[i]['TABLE_NAME'] for i in range(len(result)) ],
+ columns=['TABLE_NAME']
+
+ )
+ )
+ else:
+ response = Response(RESPONSE_TYPE.OK)
+
+ except Exception as e:
+ log.error(f'Error running while getting table {e} on ')
+ response = Response(
+ RESPONSE_TYPE.ERROR,
+ error_message=str(e)
+ )
+
+
+
+ return response
+
+
+ def get_columns(self, table_name: str) -> StatusResponse:
+ """ Returns a list of entity columns
+ Args:
+ table_name (str): name of one of tables returned by self.get_tables()
+ Returns:
+ HandlerResponse: shoud have same columns as information_schema.columns
+ (https://dev.mysql.com/doc/refman/8.0/en/information-schema-columns-table.html)
+ Column 'COLUMN_NAME' is mandatory, other is optional. Hightly
+ recomended to define also 'DATA_TYPE': it should be one of
+ python data types (by default it str).
+ """
+
+ self.connect()
+
+
+ result=self.connection.columns(table_name=table_name)
+ try:
+ if result:
+ response = Response(
+ RESPONSE_TYPE.TABLE,
+ data_frame=pd.DataFrame(
+ [result[i]['COLUMN_NAME'] for i in range(len(result)) ],
+ columns=['COLUMN_NAME']
+
+ )
+ )
+ else:
+ response = Response(RESPONSE_TYPE.OK)
+
+ except Exception as e:
+ log.error(f'Error running while getting table {e} on ')
+ response = Response(
+ RESPONSE_TYPE.ERROR,
+ error_message=str(e)
+ )
+
+
+
+ return response
+
+
+
+
+
+
+
+connection_args = OrderedDict(
+ host={
+ 'type': ARG_TYPE.STR,
+ 'description': 'The host name or IP address of the DB2 server/database.'
+ },
+ database={
+ 'type': ARG_TYPE.STR,
+ 'description': """
+ The database name to use when connecting with the DB2 server.
+ """
+ },
+ user={
+ 'type': ARG_TYPE.STR,
+ 'description': 'The user name used to authenticate with the DB2 server.'
+ },
+ password={
+ 'type': ARG_TYPE.STR,
+ 'description': 'The password to authenticate the user with the DB2 server.'
+ },
+ port={
+ 'type': ARG_TYPE.INT,
+ 'description': 'Specify port to connect DB2 through TCP/IP'
+ },
+ schemaName={
+ 'type': ARG_TYPE.STR,
+ 'description': 'Specify the schema name '
+ },
+
+)
+
+connection_args_example = OrderedDict(
+ host='127.0.0.1',
+ port='25000',
+ password='1234',
+ user='db2admin',
+ schemaName="db2admin",
+ database="BOOKS",
+)
| diff --git a/mindsdb/integrations/handlers/db2_handler/tests/__init__.py b/mindsdb/integrations/handlers/db2_handler/tests/__init__.py
new file mode 100644
diff --git a/mindsdb/integrations/handlers/db2_handler/tests/test_db2_handler.py b/mindsdb/integrations/handlers/db2_handler/tests/test_db2_handler.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/integrations/handlers/db2_handler/tests/test_db2_handler.py
@@ -0,0 +1,49 @@
+import unittest
+from mindsdb.integrations.handlers.db2_handler.db2_handler import DB2Handler
+from mindsdb.api.mysql.mysql_proxy.libs.constants.response_type import RESPONSE_TYPE
+
+
+class DB2HandlerTest(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ cls.kwargs = {
+ "host": "127.0.0.1",
+ "port": "25000",
+ "user": "db2admin",
+ "password": "1234",
+ "database": "Books",
+ "schemaName": "db2admin"
+ }
+ cls.handler = DB2Handler('test_db2_handler', **cls.kwargs)
+
+ def test_0_connect(self):
+ self.handler.connect()
+
+
+
+ def test_1_drop_table(self):
+ res = self.handler.query("DROP TABLE IF EXISTS LOVE")
+ assert res.type is not RESPONSE_TYPE.ERROR
+
+
+ def test_2_create_table(self):
+ res = self.handler.query("CREATE TABLE IF NOT EXISTS LOVE (LOVER varchar(20))")
+ assert res.type is not RESPONSE_TYPE.ERROR
+
+
+ def test_3_get_tables(self):
+ tables = self.handler.get_tables()
+ assert tables.type is not RESPONSE_TYPE.ERROR
+
+
+ def test_4_select_query(self):
+ query = "SELECT * FROM AUTHORS"
+ result = self.handler.native_query(query)
+ assert result.type is RESPONSE_TYPE.TABLE
+
+ def test_5_check_connection(self):
+ self.handler.check_connection()
+
+
+if __name__ == '__main__':
+ unittest.main()
\ No newline at end of file
diff --git a/mindsdb/integrations/handlers/mysql_handler/tests/test_mysql_handler.py b/mindsdb/integrations/handlers/mysql_handler/tests/test_mysql_handler.py
--- a/mindsdb/integrations/handlers/mysql_handler/tests/test_mysql_handler.py
+++ b/mindsdb/integrations/handlers/mysql_handler/tests/test_mysql_handler.py
@@ -48,3 +48,4 @@ def test_7_select_query(self):
query = "SELECT * FROM test_mdb WHERE 'id'='a'"
result = self.handler.query(query)
assert result['type'] is RESPONSE_TYPE.TABLE
+
| Support DB2 database integration
Support IBM DB2 database integration
We need to figure out how to connect, to enable AI-Tables or deploy in DB2 container or DB2 on Cloud
P.D. An apology for incomplete or not answering issue: https://github.com/mindsdb/mindsdb/issues/1167#event-4489203966
| HI @eucariop
Thank you, looking forward to help you, can you describe your use case, what data you have on DB2 and what would you like to predict,
Best regards,
Jorge
@ZoranPandovski i want to work on this .
please assign it to me and also guide me.
To start you can check https://docs.mindsdb.com/contribute/integrations/ and review the code of the handlers https://github.com/mindsdb/mindsdb/tree/staging/mindsdb/integrations/handlers. If you have more questions we can discuss this on slack | 2022-07-25T18:40:14Z | [] | [] |
mindsdb/mindsdb | 2,633 | mindsdb__mindsdb-2633 | [
"2589"
] | e1d0162b8bd5372725a481bcfe2f16e91555509a | diff --git a/mindsdb/integrations/handlers/vertica_handler/__about__.py b/mindsdb/integrations/handlers/vertica_handler/__about__.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/integrations/handlers/vertica_handler/__about__.py
@@ -0,0 +1,9 @@
+__title__ = 'MindsDB Vertica handler'
+__package_name__ = 'mindsdb_vertica_handler'
+__version__ = '0.0.1'
+__description__ = "MindsDB handler for Vertica"
+__author__ = 'Parthiv Makwana'
+__github__ = 'https://github.com/mindsdb/mindsdb'
+__pypi__ = 'https://pypi.org/project/mindsdb/'
+__license__ = 'GPL-3.0'
+__copyright__ = 'Copyright 2022- mindsdb'
diff --git a/mindsdb/integrations/handlers/vertica_handler/__init__.py b/mindsdb/integrations/handlers/vertica_handler/__init__.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/integrations/handlers/vertica_handler/__init__.py
@@ -0,0 +1,23 @@
+from mindsdb.integrations.libs.const import HANDLER_TYPE
+
+from .__about__ import __version__ as version, __description__ as description
+try:
+ from .vertica_handler import (
+ VerticaHandler as Handler,
+ connection_args_example,
+ connection_args
+ )
+ import_error = None
+except Exception as e:
+ Handler = None
+ import_error = e
+
+title = 'Vertica'
+name = 'vertica'
+type = HANDLER_TYPE.DATA
+icon_path = 'icon.svg'
+
+__all__ = [
+ 'Handler', 'version', 'name', 'type', 'title', 'description',
+ 'connection_args', 'connection_args_example', 'import_error', 'icon_path'
+]
diff --git a/mindsdb/integrations/handlers/vertica_handler/vertica_handler.py b/mindsdb/integrations/handlers/vertica_handler/vertica_handler.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/integrations/handlers/vertica_handler/vertica_handler.py
@@ -0,0 +1,200 @@
+from collections import OrderedDict
+from typing import Optional
+import pandas as pd
+import vertica_python as vp
+
+
+from mindsdb_sql import parse_sql
+from mindsdb_sql.render.sqlalchemy_render import SqlalchemyRender
+from mindsdb_sql.parser.ast.base import ASTNode
+
+from mindsdb.utilities.log import log
+from mindsdb.integrations.libs.base_handler import DatabaseHandler
+from mindsdb.integrations.libs.response import (
+ HandlerStatusResponse as StatusResponse,
+ HandlerResponse as Response,
+ RESPONSE_TYPE
+)
+from mindsdb.integrations.libs.const import HANDLER_CONNECTION_ARG_TYPE as ARG_TYPE
+
+# from sqlalchemy_vertica.dialect_pyodbc import VerticaDialect
+from sqla_vertica_python.vertica_python import VerticaDialect
+
+
+
+
+class VerticaHandler(DatabaseHandler):
+ """
+ This handler handles connection and execution of the Vertica statements.
+ """
+
+ name = 'vertica'
+
+ def __init__(self, name, connection_data: Optional[dict], **kwargs):
+ super().__init__(name)
+
+ self.parser = parse_sql
+ self.dialect = 'vertica'
+ self.kwargs = kwargs
+ self.connection_data = connection_data
+ self.schema_name = connection_data['schema_name'] if 'schema_name' in connection_data else "public"
+
+ self.connection = None
+ self.is_connected = False
+
+
+ def connect(self):
+ if self.is_connected is True:
+ return self.connection
+
+ config = {
+ 'host': self.connection_data['host'],
+ 'port': self.connection_data['port'],
+ 'user': self.connection_data['user'],
+ 'password': self.connection_data['password'],
+ 'database': self.connection_data['database']
+ }
+
+ connection = vp.connect(**config)
+ self.is_connected = True
+ self.connection = connection
+ return self.connection
+
+ def disconnect(self):
+ if self.is_connected is False:
+ return
+ self.connection.close()
+ self.is_connected = False
+ return
+
+ def check_connection(self) -> StatusResponse:
+
+ result = StatusResponse(False)
+ need_to_close = self.is_connected is False
+
+ try:
+ connection = self.connect()
+ result.success = connection.opened()
+ except Exception as e:
+ log.error(f'Error connecting to Vertica {self.connection_data["database"]}, {e}!')
+ result.error_message = str(e)
+
+ if result.success is True and need_to_close:
+ self.disconnect()
+ if result.success is False and self.is_connected is True:
+ self.is_connected = False
+
+ return result
+
+ def native_query(self, query: str) -> Response:
+ """
+ Receive SQL query and runs it
+ :param query: The SQL query to run in VERTICA
+ :return: returns the records from the current recordset
+ """
+
+ need_to_close = self.is_connected is False
+
+ connection = self.connect()
+ with connection.cursor() as cur:
+ try:
+ e=cur.execute(query)
+ result = e.fetchall()
+ if e.rowcount != -1:
+
+ response = Response(
+ RESPONSE_TYPE.TABLE,
+ pd.DataFrame(
+ result,
+ columns=[x.name for x in cur.description]
+ )
+ )
+ else:
+ response = Response(RESPONSE_TYPE.OK)
+ connection.commit()
+ except Exception as e:
+ log.error(f'Error running query: {query} on {self.connection_data["database"]}!')
+ response = Response(
+ RESPONSE_TYPE.ERROR,
+ error_message=str(e)
+ )
+ connection.rollback()
+
+ if need_to_close is True:
+ self.disconnect()
+
+ return response
+
+ def query(self, query: ASTNode) -> Response:
+ """
+ Retrieve the data from the SQL statement.
+ """
+ renderer = SqlalchemyRender(VerticaDialect)
+ query_str = renderer.get_string(query, with_failback=True)
+ return self.native_query(query_str)
+
+ def get_tables(self) -> Response:
+ """
+ Get a list with all of the tabels in VERTICA
+ """
+ q = f'''SELECT
+ TABLE_NAME,
+ TABLE_SCHEMA
+ from v_catalog.tables
+ WHERE table_schema='{self.schema_name}'
+ order by
+ table_name;'''
+
+
+ return self.native_query(q)
+
+ def get_columns(self, table_name) -> Response:
+ """
+ Show details about the table
+ """
+ q = f'''SELECT
+ column_name ,
+ data_type
+ FROM v_catalog.columns
+ WHERE table_name='{table_name}';'''
+
+
+ return self.native_query(q)
+
+
+connection_args = OrderedDict(
+ user={
+ 'type': ARG_TYPE.STR,
+ 'description': 'The user name used to authenticate with the Vertica server.'
+ },
+ password={
+ 'type': ARG_TYPE.STR,
+ 'description': 'The password to authenticate the user with the VERTICA server.'
+ },
+ database={
+ 'type': ARG_TYPE.STR,
+ 'description': 'The database name to use when connecting with the VERTICA server.'
+ },
+ host={
+ 'type': ARG_TYPE.STR,
+ 'description': 'The host name or IP address of the VERTICA server. NOTE: use \'127.0.0.1\' instead of \'localhost\' to connect to local server.'
+ },
+ port={
+ 'type': ARG_TYPE.INT,
+ 'description': 'The TCP/IP port of the VERTICA server. Must be an integer.'
+ },
+ schema_name={
+ 'type': ARG_TYPE.STR,
+ 'description': 'Table are listed according to schema name (it is optional). Note: Default value is "public"'
+ }
+
+)
+
+connection_args_example = OrderedDict(
+ host='127.0.0.1',
+ port=5433,
+ user='root',
+ password='password',
+ database='database',
+ schema_name='xyz'
+)
| diff --git a/mindsdb/integrations/handlers/vertica_handler/tests/__init__.py b/mindsdb/integrations/handlers/vertica_handler/tests/__init__.py
new file mode 100644
diff --git a/mindsdb/integrations/handlers/vertica_handler/tests/test_vertica_handler.py b/mindsdb/integrations/handlers/vertica_handler/tests/test_vertica_handler.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/integrations/handlers/vertica_handler/tests/test_vertica_handler.py
@@ -0,0 +1,51 @@
+import unittest
+from mindsdb.integrations.handlers.vertica_handler.vertica_handler import VerticaHandler
+from mindsdb.api.mysql.mysql_proxy.libs.constants.response_type import RESPONSE_TYPE
+
+
+class VerticaHandlerTest(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ cls.kwargs = {
+ "host":'127.0.0.1',
+ "port":5433,
+ "user":'dbadmin',
+ "password":'',
+ "database":'VMart',
+ "schema_name":'public'
+
+ }
+ cls.handler = VerticaHandler('test_vertica_handler', cls.kwargs)
+
+ def test_0_check_connection(self):
+ assert self.handler.check_connection()
+
+ def test_1_connect(self):
+ assert self.handler.connect()
+
+ def test_2_create_table(self):
+ query = "CREATE Table TEST(id Number(1),Name Varchar(33))"
+ result = self.handler.query(query)
+ assert result.type is not RESPONSE_TYPE.ERROR
+
+ def test_3_insert(self):
+ query = "INSERT INTO TEST (1,'lOVe yOU)"
+ result = self.handler.query(query)
+ assert result.type is not RESPONSE_TYPE.ERROR
+
+ def test_4_native_query_select(self):
+ query = "SELECT * FROM TEST;"
+ result = self.handler.query(query)
+ assert result.type is RESPONSE_TYPE.TABLE
+
+ def test_5_get_tables(self):
+ tables = self.handler.get_tables()
+ assert tables.type is not RESPONSE_TYPE.TABLE
+
+ def test_6_get_columns(self):
+ columns = self.handler.get_columns('TEMP')
+ assert columns.type is not RESPONSE_TYPE.ERROR
+
+
+if __name__ == '__main__':
+ unittest.main()
| [New Integration]: Integration with Vertica
### Is there an existing integration?
- [X] I have searched the existing integrations.
### Use Case
Vertica has very vast use cases and many features on its platform.
Vertica it self has ML In-Database Prediction ability but if anyone wants to use it with mindsdb then this integration can help.
Also mindsdb has easy to use interface than Vertica..
### Motivation
I thought it would be helpful for those want to use vertica with mindsdb
### Implementation
If you want I can try to add this integration.
### Anything else?
_No response_
| @ZoranPandovski if you want to implement this , I can try !!
Sure | 2022-08-04T20:12:59Z | [] | [] |
mindsdb/mindsdb | 2,682 | mindsdb__mindsdb-2682 | [
"2674"
] | 6d0cc51e1f348559b42a0fbd2210499b52c388b7 | diff --git a/mindsdb/integrations/handlers/crate_handler/__about__.py b/mindsdb/integrations/handlers/crate_handler/__about__.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/integrations/handlers/crate_handler/__about__.py
@@ -0,0 +1,9 @@
+__title__ = 'MindsDB Crate DB handler'
+__package_name__ = 'mindsdb_cratedb_handler'
+__version__ = '0.0.1'
+__description__ = "MindsDB handler for Crate DB"
+__author__ = 'Parthiv Makwana'
+__github__ = 'https://github.com/mindsdb/mindsdb'
+__pypi__ = 'https://pypi.org/project/mindsdb/'
+__license__ = 'GPL-3.0'
+__copyright__ = 'Copyright 2022- mindsdb'
diff --git a/mindsdb/integrations/handlers/crate_handler/__init__.py b/mindsdb/integrations/handlers/crate_handler/__init__.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/integrations/handlers/crate_handler/__init__.py
@@ -0,0 +1,20 @@
+from mindsdb.integrations.libs.const import HANDLER_TYPE
+
+try:
+ from .crate_handler import CrateHandler as Handler
+ import_error = None
+except Exception as e:
+ Handler = None
+ import_error = e
+from .__about__ import __version__ as version, __description__ as description
+
+
+title = 'CrateDB'
+name = 'crate'
+type = HANDLER_TYPE.DATA
+icon_path = 'icon.svg'
+
+__all__ = [
+ 'Handler', 'version', 'name', 'type', 'title', 'description',
+ 'import_error', 'icon_path'
+]
diff --git a/mindsdb/integrations/handlers/crate_handler/crate_handler.py b/mindsdb/integrations/handlers/crate_handler/crate_handler.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/integrations/handlers/crate_handler/crate_handler.py
@@ -0,0 +1,239 @@
+from collections import OrderedDict
+from typing import Optional
+from mindsdb_sql.parser.ast.base import ASTNode
+from mindsdb.integrations.libs.base_handler import DatabaseHandler
+from mindsdb.utilities.log import log
+from mindsdb_sql import parse_sql
+from mindsdb_sql.render.sqlalchemy_render import SqlalchemyRender
+from mindsdb.integrations.libs.response import (
+ HandlerStatusResponse as StatusResponse,
+ HandlerResponse as Response,
+ RESPONSE_TYPE
+)
+from mindsdb.integrations.libs.const import HANDLER_CONNECTION_ARG_TYPE as ARG_TYPE
+
+
+
+import pandas as pd
+from crate import client as db
+from crate.client.sqlalchemy.dialect import CrateDialect
+
+
+
+class CrateHandler(DatabaseHandler):
+
+
+ name= 'crate'
+
+ def __init__(self, name: str, connection_data: Optional[dict], **kwargs):
+ """ Initialize the handler
+ Args:
+ name (str): name of particular handler instance
+ connection_data (dict): parameters for connecting to the database
+ **kwargs: arbitrary keyword arguments.
+ """
+ super().__init__(name)
+
+ self.kwargs = kwargs
+ self.parser = parse_sql
+ self.dialect = 'crate'
+ self.user = connection_data['user']
+ self.password = connection_data['password']
+ self.schemaName = connection_data.get('schema_name','doc')
+ self.host = connection_data['host']
+ self.port = connection_data['port']
+
+
+
+
+ self.connection = None
+ self.is_connected = False
+
+
+ def connect(self):
+ """ Set up any connections required by the handler
+ Should return output of check_connection() method after attempting
+ connection. Should switch self.is_connected.
+ Returns:
+ Connection Object
+ """
+ if self.is_connected is True:
+ return self.connection
+
+ url="http://{0}:{1}@{2}:{3}".format(self.user,self.password,self.host,self.port)
+ try:
+ self.connection = db.connect(url)
+
+ self.is_connected= True
+ except Exception as e:
+ log.error(f"Error while connecting to CrateDB, {e}")
+
+
+ return self.connection
+
+
+ def disconnect(self):
+ """ Close any existing connections
+ Should switch self.is_connected.
+ """
+
+ if self.is_connected is False:
+ return
+ try:
+ self.connection.close()
+ self.is_connected=False
+ except Exception as e:
+ log.error(f"Error while disconnecting to CrateDB, {e}")
+
+ return
+
+
+ def check_connection(self) -> StatusResponse:
+ """ Check connection to the handler
+ Returns:
+ HandlerStatusResponse
+ """
+
+ responseCode = StatusResponse(False)
+ need_to_close = self.is_connected is False
+
+ try:
+ self.connect()
+ responseCode.success = True
+ except Exception as e:
+ log.error(f'Error connecting to CrateDB, {e}!')
+ responseCode.error_message = str(e)
+ finally:
+ if responseCode.success is True and need_to_close:
+ self.disconnect()
+ if responseCode.success is False and self.is_connected is True:
+ self.is_connected = False
+
+ return responseCode
+
+
+ def native_query(self, query: str) -> StatusResponse:
+ """Receive raw query and act upon it somehow.
+ Args:
+ query (Any): query in native format (str for sql databases,
+ dict for mongo, etc)
+ Returns:
+ HandlerResponse
+ """
+
+ need_to_close = self.is_connected is False
+
+ conn = self.connect()
+ cur = conn.cursor()
+ try:
+ cur.execute(query)
+ if cur.rowcount :
+ result = cur.fetchall()
+ response = Response(
+ RESPONSE_TYPE.TABLE,
+ data_frame=pd.DataFrame(
+ result,
+ columns=[x[0] for x in cur.description]
+ )
+ )
+ else:
+ response = Response(RESPONSE_TYPE.OK)
+ except Exception as e:
+ log.error(f'Error running query: {query} on CrateDB!')
+ response = Response(
+ RESPONSE_TYPE.ERROR,
+ error_message=str(e)
+ )
+ cur.close()
+
+ if need_to_close is True:
+ self.disconnect()
+
+ return response
+
+
+ def query(self, query: ASTNode) -> StatusResponse:
+ """Receive query as AST (abstract syntax tree) and act upon it somehow.
+ Args:
+ query (ASTNode): sql query represented as AST. May be any kind
+ of query: SELECT, INTSERT, DELETE, etc
+ Returns:
+ HandlerResponse
+ """
+
+
+
+ renderer = SqlalchemyRender(CrateDialect)
+ query_str = renderer.get_string(query, with_failback=True)
+ return self.native_query(query_str)
+
+
+ def get_tables(self) -> StatusResponse:
+ """ Return list of entities
+ Return list of entities that will be accesible as tables.
+ Returns:
+ HandlerResponse: shoud have same columns as information_schema.tables
+ (https://dev.mysql.com/doc/refman/8.0/en/information-schema-tables-table.html)
+ Column 'TABLE_NAME' is mandatory, other is optional.
+ """
+
+ q = f"SHOW TABLES FROM {self.schemaName};"
+ result = self.native_query(q)
+ return result
+
+ def get_columns(self, table_name: str) -> StatusResponse:
+ """ Returns a list of entity columns
+ Args:
+ table_name (str): name of one of tables returned by self.get_tables()
+ Returns:
+ HandlerResponse: shoud have same columns as information_schema.columns
+ (https://dev.mysql.com/doc/refman/8.0/en/information-schema-columns-table.html)
+ Column 'COLUMN_NAME' is mandatory, other is optional. Hightly
+ recomended to define also 'DATA_TYPE': it should be one of
+ python data types (by default it str).
+ """
+
+
+ q = f"SHOW COLUMNS FROM {table_name};"
+ result = self.native_query(q)
+ return result
+
+
+
+
+
+
+
+
+connection_args = OrderedDict(
+ host={
+ 'type': ARG_TYPE.STR,
+ 'description': 'The host name or IP address of the CrateDB server/database.'
+ },
+ user={
+ 'type': ARG_TYPE.STR,
+ 'description': 'The user name used to authenticate with the CrateDB server.'
+ },
+ password={
+ 'type': ARG_TYPE.STR,
+ 'description': 'The password to authenticate the user with the CrateDB server.'
+ },
+ port={
+ 'type': ARG_TYPE.INT,
+ 'description': 'Specify port to connect CrateDB server'
+ },
+ schemaName={
+ 'type': ARG_TYPE.STR,
+ 'description': 'Specify the schema name. Note: It is optional DEFAULT is "doc"'
+ },
+
+)
+
+connection_args_example = OrderedDict(
+ host='127.0.0.1',
+ port='4200',
+ password='',
+ user='crate',
+
+
+)
| diff --git a/mindsdb/integrations/handlers/crate_handler/tests/__init__.py b/mindsdb/integrations/handlers/crate_handler/tests/__init__.py
new file mode 100644
diff --git a/mindsdb/integrations/handlers/crate_handler/tests/test_crate_handler.py b/mindsdb/integrations/handlers/crate_handler/tests/test_crate_handler.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/integrations/handlers/crate_handler/tests/test_crate_handler.py
@@ -0,0 +1,51 @@
+import unittest
+from mindsdb.integrations.handlers.crate_handler.crate_handler import CrateHandler
+from mindsdb.api.mysql.mysql_proxy.libs.constants.response_type import RESPONSE_TYPE
+
+
+class CrateHandlerTest(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ cls.kwargs = {
+ "host": "127.0.0.1",
+ "port": 4200,
+ "user": "crate",
+ "password": ""
+ }
+ cls.handler = CrateHandler('test_crate_handler', cls.kwargs)
+
+ def test_0_connect(self):
+ self.handler.connect()
+
+
+
+ def test_1_drop_table(self):
+ res = self.handler.query("DROP TABLE IF EXISTS PREM;")
+ assert res.type is not RESPONSE_TYPE.ERROR
+
+
+ def test_2_create_table(self):
+ res = self.handler.query("CREATE TABLE IF NOT EXISTS PREM (Premi varchar(50));")
+ assert res.type is not RESPONSE_TYPE.ERROR
+
+ def test_3_insert_table(self):
+ res = self.handler.query("INSERT INTO PREM VALUES('Radha <3 Krishna');")
+ assert res.type is not RESPONSE_TYPE.ERROR
+
+
+ def test_4_get_tables(self):
+ tables = self.handler.get_tables()
+ assert tables.type is not RESPONSE_TYPE.ERROR
+
+
+ def test_5_select_query(self):
+ query = "SELECT * FROM PREM;"
+ result = self.handler.native_query(query)
+ assert result.type is RESPONSE_TYPE.TABLE or RESPONSE_TYPE.OK
+
+ def test_6_check_connection(self):
+ self.handler.check_connection()
+
+
+if __name__ == '__main__':
+ unittest.main()
\ No newline at end of file
| [New Integration]: Integration with crateDB
### Is there an existing integration?
- [X] I have searched the existing integrations.
### Use Case
**CrateDB is more efficient than MongoDB in terms of memory and storage, and it is easier to scale. It offers real-time responses even with high data volumes, high concurrency, and a variety of data types. On top of that, CrateDB is accessible through SQL.
.**
It is used by many organization as cloud database for there App/WebApp/IotApp to provide fast and user-friendly interaction And it as it is **open Source** it is used vastly.
### Motivation
### As it is opensource it is used by many developers and organization and by adding this as integration crateDB users can use mindsdb for there usescases.
### Implementation
_No response_
### Anything else?
_No response_
| Please tell if this integration should be added.
If **YES**
### assign it me @ZoranPandovski . | 2022-08-15T13:49:57Z | [] | [] |
mindsdb/mindsdb | 2,696 | mindsdb__mindsdb-2696 | [
"2619"
] | a6918e027c01f8b193dfe6c6f2fe67acf359ace6 | diff --git a/mindsdb/integrations/handlers/informix_handler/__about__.py b/mindsdb/integrations/handlers/informix_handler/__about__.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/integrations/handlers/informix_handler/__about__.py
@@ -0,0 +1,9 @@
+__title__ = 'MindsDB IBM Informix handler'
+__package_name__ = 'mindsdb_informix_handler'
+__version__ = '0.0.1'
+__description__ = "MindsDB handler for IBM Informix"
+__author__ = 'Parthiv Makwana'
+__github__ = 'https://github.com/mindsdb/mindsdb'
+__pypi__ = 'https://pypi.org/project/mindsdb/'
+__license__ = 'GPL-3.0'
+__copyright__ = 'Copyright 2022- mindsdb'
diff --git a/mindsdb/integrations/handlers/informix_handler/__init__.py b/mindsdb/integrations/handlers/informix_handler/__init__.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/integrations/handlers/informix_handler/__init__.py
@@ -0,0 +1,20 @@
+from mindsdb.integrations.libs.const import HANDLER_TYPE
+
+try:
+ from .informix_handler import InformixHandler as Handler
+ import_error = None
+except Exception as e:
+ Handler = None
+ import_error = e
+from .__about__ import __version__ as version, __description__ as description
+
+
+title = 'IBM Informix'
+name = 'informix'
+type = HANDLER_TYPE.DATA
+icon_path = 'icon.png'
+
+__all__ = [
+ 'Handler', 'version', 'name', 'type', 'title', 'description',
+ 'import_error', 'icon_path'
+]
diff --git a/mindsdb/integrations/handlers/informix_handler/informix_handler.py b/mindsdb/integrations/handlers/informix_handler/informix_handler.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/integrations/handlers/informix_handler/informix_handler.py
@@ -0,0 +1,318 @@
+from collections import OrderedDict
+from typing import Optional
+from mindsdb_sql.parser.ast.base import ASTNode
+from mindsdb.integrations.libs.base_handler import DatabaseHandler
+from mindsdb.utilities.log import log
+from mindsdb_sql import parse_sql
+from mindsdb_sql.render.sqlalchemy_render import SqlalchemyRender
+from mindsdb.integrations.libs.response import (
+ HandlerStatusResponse as StatusResponse,
+ HandlerResponse as Response,
+ RESPONSE_TYPE
+)
+from mindsdb.integrations.libs.const import HANDLER_CONNECTION_ARG_TYPE as ARG_TYPE
+
+
+
+import pandas as pd
+import IfxPyDbi as I
+from sqlalchemy_informix.ibmdb import InformixDialect
+
+
+
+class InformixHandler(DatabaseHandler):
+
+
+ name= 'informix'
+
+ def __init__(self, name: str, connection_data: Optional[dict], **kwargs):
+ ''' Initialize the handler
+ Args:
+ name (str): name of particular handler instance
+ connection_data (dict): parameters for connecting to the database
+ **kwargs: arbitrary keyword arguments.
+ '''
+ super().__init__(name)
+
+ self.kwargs = kwargs
+ self.parser = parse_sql
+ self.loging_enabled = connection_data['loging_enabled'] if 'loging_enabled' in connection_data else True
+ self.server = connection_data['server']
+ self.database = connection_data['database']
+ self.user = connection_data['user']
+ self.password = connection_data['password']
+ self.schemaName = connection_data['schema_name']
+ self.host = connection_data['host']
+ self.port = connection_data['port']
+ self.connString = (
+ 'SERVER={0};'
+ 'DATABASE={1};'
+ 'HOST={2};'
+ 'PORT={3};'
+ 'UID={4};'
+ 'PWD={5};').format(self.server, self.database, self.host, self.port, self.user, self.password)
+
+
+
+
+ self.connection = None
+ self.is_connected = False
+
+
+ def connect(self):
+ ''' Set up any connections required by the handler
+ Should return output of check_connection() method after attempting
+ connection. Should switch self.is_connected.
+ Returns:
+ Connection Object
+ '''
+ if self.is_connected is True:
+ return self.connection
+
+ try:
+ self.connection = I.connect(self.connString,'','')
+
+ self.is_connected= True
+ except Exception as e:
+ log.error(f'Error while connecting to {self.database}, {e}')
+
+
+ return self.connection
+
+
+ def disconnect(self):
+ ''' Close any existing connections
+ Should switch self.is_connected.
+ '''
+ if self.is_connected is False:
+ return
+ try:
+ self.connection.close()
+ self.is_connected = False
+ except Exception as e:
+ log.error(f'Error while disconnecting to {self.database}, {e}')
+
+ return
+
+
+ def check_connection(self) -> StatusResponse:
+ ''' Check connection to the handler
+ Returns:
+ HandlerStatusResponse
+ '''
+ responseCode = StatusResponse(False)
+ need_to_close = self.is_connected is False
+
+ try:
+ self.connect()
+ responseCode.success = True
+ except Exception as e:
+ log.error(f'Error connecting to database {self.database}, {e}!')
+ responseCode.error_message = str(e)
+ finally:
+ if responseCode.success is True and need_to_close:
+ self.disconnect()
+ if responseCode.success is False and self.is_connected is True:
+ self.is_connected = False
+
+ return responseCode
+
+
+ def native_query(self, query: str) -> StatusResponse:
+ '''Receive raw query and act upon it somehow.
+ Args:
+ query (Any): query in native format (str for sql databases,
+ dict for mongo, etc)
+ Returns:
+ HandlerResponse
+ '''
+ need_to_close = self.is_connected is False
+ conn = self.connect()
+ cur = conn.cursor()
+ try:
+ cur.execute(query)
+
+ if cur._result_set_produced :
+ result = cur.fetchall()
+ response = Response(
+ RESPONSE_TYPE.TABLE,
+ data_frame=pd.DataFrame(
+ result,
+ columns=[x[0] for x in cur.description]
+ )
+ )
+ else:
+ response = Response(RESPONSE_TYPE.OK)
+ if self.loging_enabled:
+ self.connection.commit()
+ except Exception as e:
+ log.error(f'Error running query: {query} on {self.database}')
+ response = Response(
+ RESPONSE_TYPE.ERROR,
+ error_message=str(e)
+ )
+ if self.loging_enabled:
+ self.connection.rollback()
+
+ cur.close()
+
+
+ if need_to_close is True:
+ self.disconnect()
+
+ return response
+
+
+ def query(self, query: ASTNode) -> StatusResponse:
+ '''Receive query as AST (abstract syntax tree) and act upon it somehow.
+ Args:
+ query (ASTNode): sql query represented as AST. May be any kind
+ of query: SELECT, INTSERT, DELETE, etc
+ Returns:
+ HandlerResponse
+ '''
+
+
+
+ renderer = SqlalchemyRender(InformixDialect)
+ query_str = renderer.get_string(query, with_failback=True)
+ return self.native_query(query_str)
+
+
+ def get_tables(self) -> StatusResponse:
+ ''' Return list of entities
+ Return list of entities that will be accesible as tables.
+ Returns:
+ HandlerResponse: shoud have same columns as information_schema.tables
+ (https://dev.mysql.com/doc/refman/8.0/en/information-schema-tables-table.html)
+ Column 'TABLE_NAME' is mandatory, other is optional.
+ '''
+ self.connect()
+
+
+ result=self.connection.tables()
+ try:
+ if result:
+ response = Response(
+ RESPONSE_TYPE.TABLE,
+ data_frame=pd.DataFrame(
+ [x['TABLE_NAME'] for x in result if x['TABLE_SCHEM']==self.schemaName],
+ columns=['TABLE_NAME']
+
+ )
+ )
+ else:
+ response = Response(RESPONSE_TYPE.OK)
+
+ except Exception as e:
+ log.error(f'Error running while getting table {e} on ')
+ response = Response(
+ RESPONSE_TYPE.ERROR,
+ error_message=str(e)
+ )
+
+
+
+ return response
+
+
+ def get_columns(self, table_name: str) -> StatusResponse:
+ ''' Returns a list of entity columns
+ Args:
+ table_name (str): name of one of tables returned by self.get_tables()
+ Returns:
+ HandlerResponse: shoud have same columns as information_schema.columns
+ (https://dev.mysql.com/doc/refman/8.0/en/information-schema-columns-table.html)
+ Column 'COLUMN_NAME' is mandatory, other is optional. Hightly
+ recomended to define also 'DATA_TYPE': it should be one of
+ python data types (by default it str).
+ '''
+
+ self.connect()
+
+
+ result=self.connection.columns(table_name=table_name)
+ try:
+ if result:
+ response = Response(
+ RESPONSE_TYPE.TABLE,
+ data_frame=pd.DataFrame(
+ [result[i]['COLUMN_NAME'] for i in range(len(result)) ],
+ columns=['COLUMN_NAME']
+
+ )
+ )
+ else:
+ response = Response(RESPONSE_TYPE.OK)
+
+ except Exception as e:
+ log.error(f'Error running while getting table {e} on ')
+ response = Response(
+ RESPONSE_TYPE.ERROR,
+ error_message=str(e)
+ )
+
+
+
+ return response
+
+
+
+
+
+
+
+connection_args = OrderedDict(
+ erver={
+ 'type': ARG_TYPE.STR,
+ 'description': '''
+ The server name you want to get connected.
+ '''
+ },
+ database={
+ 'type': ARG_TYPE.STR,
+ 'description': '''
+ The database name to use when connecting with the DB2 server.
+ '''
+ },
+ user={
+ 'type': ARG_TYPE.STR,
+ 'description': 'The user name used to authenticate with the DB2 server.'
+ },
+ password={
+ 'type': ARG_TYPE.STR,
+ 'description': 'The password to authenticate the user with the DB2 server.'
+ },
+ host={
+ 'type': ARG_TYPE.STR,
+ 'description': 'The host name or IP address of the DB2 server/database.'
+ },
+ port={
+ 'type': ARG_TYPE.INT,
+ 'description': 'Specify port to connect DB2 through TCP/IP'
+ },
+ schema_name={
+ 'type': ARG_TYPE.STR,
+ 'description': 'Specify the schema name for showing tables '
+ },
+ loging_enabled={
+ 'type': ARG_TYPE.BOOL,
+ 'description': '''
+ Used for COMMIT and ROLLBACK as this command works only for logging enabled database.
+ Note: Its optional.
+ Default is TRUE
+ '''
+ }
+
+)
+
+connection_args_example = OrderedDict(
+ server='server',
+ database='stores_demo',
+ user='informix',
+ password='in4mix',
+ host='127.0.0.1',
+ port='9091',
+ schema_name='Love'
+
+)
| diff --git a/mindsdb/integrations/handlers/informix_handler/tests/__init__.py b/mindsdb/integrations/handlers/informix_handler/tests/__init__.py
new file mode 100644
diff --git a/mindsdb/integrations/handlers/informix_handler/tests/test_informix_handler.py b/mindsdb/integrations/handlers/informix_handler/tests/test_informix_handler.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/integrations/handlers/informix_handler/tests/test_informix_handler.py
@@ -0,0 +1,54 @@
+import unittest
+from mindsdb.integrations.handlers.informix_handler.informix_handler import InformixHandler
+from mindsdb.api.mysql.mysql_proxy.libs.constants.response_type import RESPONSE_TYPE
+
+
+class InformixHandlerTest(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ cls.kwargs = {
+ "server": "server",
+ "host": "127.0.0.1",
+ "port": 9093,
+ "user": "informix",
+ "password": "in4mix",
+ "database": "demo",
+ "schema_name": "love",
+ "loging_enabled": False
+ }
+ cls.handler = InformixHandler('test_informix_handler', cls.kwargs)
+
+ def test_0_connect(self):
+ self.handler.connect()
+
+
+
+ def test_1_drop_table(self):
+ res = self.handler.query("DROP TABLE IF EXISTS LOVE;")
+ assert res.type is not RESPONSE_TYPE.ERROR
+
+
+ def test_2_create_table(self):
+ res = self.handler.query("CREATE TABLE IF NOT EXISTS LOVE (LOVER varchar(20));")
+ assert res.type is not RESPONSE_TYPE.ERROR
+
+ def test_3_insert(self):
+ res = self.handler.query("INSERT INTO LOVE VALUES('Hari');")
+ assert res.type is not RESPONSE_TYPE.ERROR
+
+ def test_4_get_tables(self):
+ tables = self.handler.get_tables()
+ assert tables.type is RESPONSE_TYPE.TABLE
+
+
+ def test_5_select_query(self):
+ query = "SELECT * FROM LOVE;"
+ result = self.handler.native_query(query)
+ assert result.type is RESPONSE_TYPE.TABLE
+
+ def test_5_check_connection(self):
+ self.handler.check_connection()
+
+
+if __name__ == '__main__':
+ unittest.main()
\ No newline at end of file
| [New Integration]: Integration with Informix
### Is there an existing integration?
- [X] I have searched the existing integrations.
### Use Case
Informix is versatile database provided by IBM.
It is used by many prominent company and also by many developers.
IBM Informix is an embeddable, high-performance database for integrating SQL, NoSQL, JSON, time-series and spatial data. Designed for analytics on the edge, in the cloud or on premises, the low-footprint database features self-management and automated administrative capabilities.
### Motivation
It would attract company and developer using Informix to use mindsDB.
And it will be very effective to get large customer base from Informix.
### Implementation
_No response_
### Anything else?
_No response_
| ### @ZoranPandovski
**What do you think about this ?**
@ZoranPandovski let's talk about this integration
Sure, let's add handler for it
**_Sure i will do it as soon as possible_**
| 2022-08-17T09:01:19Z | [] | [] |
mindsdb/mindsdb | 2,718 | mindsdb__mindsdb-2718 | [
"2672"
] | a44a0d54364b5cb36f38cb15c34aa4453555f129 | diff --git a/mindsdb/integrations/handlers/yugabyte_handler/__about__.py b/mindsdb/integrations/handlers/yugabyte_handler/__about__.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/integrations/handlers/yugabyte_handler/__about__.py
@@ -0,0 +1,9 @@
+__title__ = 'MindsDB YugabyteDB handler'
+__package_name__ = 'mindsdb_yugabyte_handler'
+__version__ = '0.0.1'
+__description__ = "MindsDB handler for YugabyteDB"
+__author__ = 'Parthiv Makwana'
+__github__ = 'https://github.com/mindsdb/mindsdb'
+__pypi__ = 'https://pypi.org/project/mindsdb/'
+__license__ = 'GPL-3.0'
+__copyright__ = 'Copyright 2022- mindsdb'
diff --git a/mindsdb/integrations/handlers/yugabyte_handler/__init__.py b/mindsdb/integrations/handlers/yugabyte_handler/__init__.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/integrations/handlers/yugabyte_handler/__init__.py
@@ -0,0 +1,19 @@
+from mindsdb.integrations.libs.const import HANDLER_TYPE
+
+from .__about__ import __version__ as version, __description__ as description
+try:
+ from .yugabyte_handler import YugabyteHandler as Handler
+ import_error = None
+except Exception as e:
+ Handler = None
+ import_error = e
+
+title = 'YugabyteDB'
+name = 'yugabyte'
+type = HANDLER_TYPE.DATA
+icon_path = 'icon.svg'
+
+__all__ = [
+ 'Handler', 'version', 'name', 'type', 'title',
+ 'description', 'import_error', 'icon_path'
+]
diff --git a/mindsdb/integrations/handlers/yugabyte_handler/yugabyte_handler.py b/mindsdb/integrations/handlers/yugabyte_handler/yugabyte_handler.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/integrations/handlers/yugabyte_handler/yugabyte_handler.py
@@ -0,0 +1,177 @@
+import psycopg2
+from pandas import DataFrame
+from typing import Optional
+from collections import OrderedDict
+from mindsdb.integrations.libs.const import HANDLER_CONNECTION_ARG_TYPE as ARG_TYPE
+from mindsdb_sql import parse_sql
+from mindsdb_sql.render.sqlalchemy_render import SqlalchemyRender
+from mindsdb_sql.parser.ast.base import ASTNode
+
+from mindsdb.integrations.libs.base_handler import DatabaseHandler
+from mindsdb.utilities.log import log
+from mindsdb.integrations.libs.response import (
+ HandlerStatusResponse as StatusResponse,
+ HandlerResponse as Response,
+ RESPONSE_TYPE
+)
+
+
+class YugabyteHandler(DatabaseHandler):
+ """
+ This handler handles connection and execution of the YugabyteSQL statements.
+ """
+ name = 'yugabyte'
+
+ def __init__(self, name: str, connection_data: Optional[dict], **kwargs):
+ super().__init__(name)
+ self.parser = parse_sql
+ self.connection_data = connection_data
+ self.dialect = 'postgresql'
+ self.database = connection_data['database']
+ self.renderer = SqlalchemyRender('postgres')
+
+ self.connection = None
+ self.is_connected = False
+
+
+ def connect(self):
+ """
+ Handles the connection to a YugabyteSQL database insance.
+ """
+ if self.is_connected is True:
+ return self.connection
+
+ args={
+ "dbname": self.database,
+ "host": self.connection_data['host'],
+ "port": self.connection_data['port'],
+ "user": self.connection_data['user'],
+ "password": self.connection_data['password'],
+ }
+
+
+
+ connection = psycopg2.connect(**args, connect_timeout=10)
+
+ self.is_connected = True
+ self.connection = connection
+ return self.connection
+
+ def check_connection(self) -> StatusResponse:
+ """
+ Check the connection of the PostgreSQL database
+ :return: success status and error message if error occurs
+ """
+ response = StatusResponse(False)
+ need_to_close = self.is_connected is False
+
+ try:
+ connection = self.connect()
+ with connection.cursor() as cur:
+ cur.execute('select 1;')
+ response.success = True
+ except psycopg2.Error as e:
+ log.error(f'Error connecting to PostgreSQL {self.database}, {e}!')
+ response.error_message = e
+
+ if response.success is True and need_to_close:
+ self.disconnect()
+ if response.success is False and self.is_connected is True:
+ self.is_connected = False
+
+ return response
+
+ def native_query(self, query: str) -> StatusResponse:
+ """Receive raw query and act upon it somehow.
+ Args:
+ query (Any): query in native format (str for sql databases,
+ dict for mongo, etc)
+ Returns:
+ HandlerResponse
+ """
+ need_to_close = self.is_connected is False
+ conn = self.connect()
+ with conn.cursor() as cur:
+ try:
+ cur.execute(query)
+
+ if cur.rowcount >0 and query.upper().startswith('SELECT') :
+ result = cur.fetchall()
+ response = Response(
+ RESPONSE_TYPE.TABLE,
+ data_frame=DataFrame(
+ result,
+ columns=[x[0] for x in cur.description]
+ )
+ )
+ else:
+ response = Response(RESPONSE_TYPE.OK)
+ self.connection.commit()
+ except Exception as e:
+ log.error(f'Error running query: {query} on {self.database}!')
+ response = Response(
+ RESPONSE_TYPE.ERROR,
+ error_message=str(e)
+ )
+ self.connection.rollback()
+
+ if need_to_close is True:
+ self.disconnect()
+
+ return response
+
+ def query(self, query: ASTNode) -> Response:
+ """
+ Retrieve the data from the SQL statement with eliminated rows that dont satisfy the WHERE condition
+ """
+ query_str = self.renderer.get_string(query, with_failback=True)
+ return self.native_query(query_str)
+
+ def get_tables(self) -> Response:
+ """
+ List all tabels in PostgreSQL without the system tables information_schema and pg_catalog
+ """
+
+ query = """SELECT table_schema,table_name,table_type FROM information_schema.tables WHERE table_schema NOT IN ('information_schema', 'pg_catalog') and table_type in ('BASE TABLE', 'VIEW')"""
+ return self.query(query)
+
+ def get_columns(self, table_name):
+ query = f"""SELECT column_name as "Field", data_type as "Type" FROM information_schema.columns WHERE table_name = '{table_name}'"""
+ return self.query(query)
+
+
+
+connection_args = OrderedDict(
+ host={
+ 'type': ARG_TYPE.STR,
+ 'description': 'The host name or IP address of the YugabyteDB server/database.'
+ },
+ user={
+ 'type': ARG_TYPE.STR,
+ 'description': 'The user name used to authenticate with the YugabyteDB server.'
+ },
+ password={
+ 'type': ARG_TYPE.STR,
+ 'description': 'The password to authenticate the user with the YugabyteDB server.'
+ },
+ port={
+ 'type': ARG_TYPE.INT,
+ 'description': 'Specify port to connect YugabyteDB server'
+ },
+ database={
+ 'type': ARG_TYPE.STR,
+ 'description': 'Specify database name to connect YugabyteDB server'
+ },
+
+
+)
+
+connection_args_example = OrderedDict(
+ host='127.0.0.1',
+ port=5433,
+ password='',
+ user='admin',
+ database='yugabyte'
+
+
+)
| diff --git a/mindsdb/integrations/handlers/yugabyte_handler/tests/__init__.py b/mindsdb/integrations/handlers/yugabyte_handler/tests/__init__.py
new file mode 100644
diff --git a/mindsdb/integrations/handlers/yugabyte_handler/tests/test_yugabyte_handler.py b/mindsdb/integrations/handlers/yugabyte_handler/tests/test_yugabyte_handler.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/integrations/handlers/yugabyte_handler/tests/test_yugabyte_handler.py
@@ -0,0 +1,52 @@
+import unittest
+from mindsdb.integrations.handlers.yugabyte_handler.yugabyte_handler import YugabyteHandler
+from mindsdb.api.mysql.mysql_proxy.libs.constants.response_type import RESPONSE_TYPE
+
+
+class YugabyteHandlerTest(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ cls.kwargs = {
+ "host": "localhost",
+ "port": 5433,
+ "user": "admin",
+ "password": "",
+ "database": "yugabyte"
+ }
+ cls.handler = YugabyteHandler('test_yugabyte_handler', cls.kwargs)
+
+ def test_0_connect(self):
+ self.handler.connect()
+
+
+
+ def test_1_drop_table(self):
+ res = self.handler.query("DROP TABLE IF EXISTS PREM;")
+ assert res.type is not RESPONSE_TYPE.ERROR
+
+
+ def test_2_create_table(self):
+ res = self.handler.query("CREATE TABLE IF NOT EXISTS PREM (Premi varchar(50));")
+ assert res.type is not RESPONSE_TYPE.ERROR
+
+ def test_3_insert_table(self):
+ res = self.handler.query("INSERT INTO PREM VALUES('Radha <3 Krishna');")
+ assert res.type is not RESPONSE_TYPE.ERROR
+
+
+ def test_4_get_tables(self):
+ tables = self.handler.get_tables()
+ assert tables.type is not RESPONSE_TYPE.ERROR
+
+
+ def test_5_select_query(self):
+ query = "SELECT * FROM PREM;"
+ result = self.handler.native_query(query)
+ assert result.type is RESPONSE_TYPE.TABLE or RESPONSE_TYPE.OK
+
+ def test_6_check_connection(self):
+ self.handler.check_connection()
+
+
+if __name__ == '__main__':
+ unittest.main()
\ No newline at end of file
| [New Integration]: Integration with YugabyteDB
### Is there an existing integration?
- [X] I have searched the existing integrations.
### Use Case
YugabyteDB is a high-performance, cloud-native distributed SQL database.
It is also open source and similiar to postgres SQL.
It is best to fit for cloud-native OLTP (i.e. real-time, business-critical) applications that need absolute data correctness and require at least one of the following: scalability, high tolerance to failures, or globally-distributed deployments.
### Motivation
A it is compatible with postgresSQL it can be easily integrated and as YugabyteDB is very effective in app and therefore it is use to store business data.
### Implementation
_No response_
### Anything else?
_No response_
| I want to work on this @ZoranPandovski .
If you want to me to work on this please assigned it to me. | 2022-08-18T06:49:03Z | [] | [] |
mindsdb/mindsdb | 2,743 | mindsdb__mindsdb-2743 | [
"2729"
] | 769423f91754c9be7095d325cc47aedae81b93ba | diff --git a/mindsdb/integrations/handlers/monetdb_handler/__about__.py b/mindsdb/integrations/handlers/monetdb_handler/__about__.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/integrations/handlers/monetdb_handler/__about__.py
@@ -0,0 +1,9 @@
+__title__ = 'MindsDB MonetDB handler'
+__package_name__ = 'mindsdb_monetdb_handler'
+__version__ = '0.0.1'
+__description__ = "MindsDB handler for MonetDB"
+__author__ = 'Parthiv Makwana'
+__github__ = 'https://github.com/mindsdb/mindsdb'
+__pypi__ = 'https://pypi.org/project/mindsdb/'
+__license__ = 'GPL-3.0'
+__copyright__ = 'Copyright 2022- mindsdb'
diff --git a/mindsdb/integrations/handlers/monetdb_handler/__init__.py b/mindsdb/integrations/handlers/monetdb_handler/__init__.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/integrations/handlers/monetdb_handler/__init__.py
@@ -0,0 +1,23 @@
+from mindsdb.integrations.libs.const import HANDLER_TYPE
+
+from .__about__ import __version__ as version, __description__ as description
+try:
+ from .monetdb_handler import (
+ MonetDBHandler as Handler,
+ connection_args_example,
+ connection_args
+ )
+ import_error = None
+except Exception as e:
+ Handler = None
+ import_error = e
+
+title = 'MonetDB'
+name = 'monetdb'
+type = HANDLER_TYPE.DATA
+icon_path = 'icon.png'
+
+__all__ = [
+ 'Handler', 'version', 'name', 'type', 'title', 'description',
+ 'connection_args', 'connection_args_example', 'import_error', 'icon_path'
+]
diff --git a/mindsdb/integrations/handlers/monetdb_handler/monetdb_handler.py b/mindsdb/integrations/handlers/monetdb_handler/monetdb_handler.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/integrations/handlers/monetdb_handler/monetdb_handler.py
@@ -0,0 +1,270 @@
+from collections import OrderedDict
+from typing import Optional
+from mindsdb_sql.parser.ast.base import ASTNode
+from mindsdb.integrations.libs.base_handler import DatabaseHandler
+from mindsdb.utilities.log import log
+from mindsdb_sql import parse_sql
+from mindsdb_sql.render.sqlalchemy_render import SqlalchemyRender
+from mindsdb.integrations.libs.response import (
+ HandlerStatusResponse as StatusResponse,
+ HandlerResponse as Response,
+ RESPONSE_TYPE
+)
+from mindsdb.integrations.libs.const import HANDLER_CONNECTION_ARG_TYPE as ARG_TYPE
+
+
+
+import pandas as pd
+import pymonetdb as mdb
+from .utils.monet_get_id import *
+from sqlalchemy_monetdb.dialect import MonetDialect
+
+
+
+
+class MonetDBHandler(DatabaseHandler):
+
+
+ name= 'monetdb'
+
+ def __init__(self, name: str, connection_data: Optional[dict], **kwargs):
+ """ Initialize the handler
+ Args:
+ name (str): name of particular handler instance
+ connection_data (dict): parameters for connecting to the database
+ **kwargs: arbitrary keyword arguments.
+ """
+ super().__init__(name)
+
+ self.kwargs = kwargs
+ self.parser = parse_sql
+ self.database = connection_data['database']
+ self.user = connection_data['user']
+ self.password = connection_data['password']
+ self.schemaName = connection_data['schema_name'] if 'schema_name' in connection_data else None
+ self.host = connection_data['host']
+ self.port = connection_data['port']
+
+
+ self.connection = None
+ self.is_connected = False
+
+
+ def connect(self):
+ """ Set up any connections required by the handler
+ Should return output of check_connection() method after attempting
+ connection. Should switch self.is_connected.
+ Returns:
+ Connection Object
+ """
+ if self.is_connected is True:
+ return self.connection
+
+ try:
+ self.connection = mdb.connect(
+ database=self.database,
+ hostname=self.host,
+ port=self.port,
+ username=self.user,
+ password=self.password,
+ )
+
+
+ self.is_connected= True
+ except Exception as e:
+ log.error(f"Error while connecting to {self.database}, {e}")
+
+
+ return self.connection
+
+
+ def disconnect(self):
+ """ Close any existing connections
+ Should switch self.is_connected.
+ """
+ if self.is_connected is False:
+ return
+ try:
+ self.connection.close()
+ self.is_connected=False
+ except Exception as e:
+ log.error(f"Error while disconnecting to {self.database}, {e}")
+
+ return
+
+
+ def check_connection(self) -> StatusResponse:
+ """ Check connection to the handler
+ Returns:
+ HandlerStatusResponse
+ """
+ responseCode = StatusResponse(False)
+ need_to_close = self.is_connected is False
+
+ try:
+ self.connect()
+ responseCode.success = True
+ except Exception as e:
+ log.error(f'Error connecting to database {self.database}, {e}!')
+ responseCode.error_message = str(e)
+ finally:
+ if responseCode.success is True and need_to_close:
+ self.disconnect()
+ if responseCode.success is False and self.is_connected is True:
+ self.is_connected = False
+
+ return responseCode
+
+
+ def native_query(self, query: str) -> StatusResponse:
+ """Receive raw query and act upon it somehow.
+ Args:
+ query (Any): query in native format (str for sql databases,
+ dict for mongo, etc)
+ Returns:
+ HandlerResponse
+ """
+ need_to_close = self.is_connected is False
+ conn = self.connect()
+ cur=conn.cursor()
+ try:
+ cur.execute(query)
+
+ if cur.rowcount>0 and query.strip().upper().startswith('SELECT') :
+ result = cur.fetchall()
+ response = Response(
+ RESPONSE_TYPE.TABLE,
+ data_frame=pd.DataFrame(
+ result,
+ columns=[x[0] for x in cur.description]
+ )
+ )
+ else:
+ response = Response(RESPONSE_TYPE.OK)
+ self.connection.commit()
+ except Exception as e:
+ log.error(f'Error running query: {query} on {self.database}!')
+ response = Response(
+ RESPONSE_TYPE.ERROR,
+ error_message=str(e)
+ )
+ self.connection.rollback()
+
+ cur.close()
+
+ if need_to_close is True:
+ self.disconnect()
+
+ return response
+
+
+ def query(self, query: ASTNode) -> StatusResponse:
+ """Receive query as AST (abstract syntax tree) and act upon it somehow.
+ Args:
+ query (ASTNode): sql query represented as AST. May be any kind
+ of query: SELECT, INTSERT, DELETE, etc
+ Returns: HandlerResponse
+ """
+
+
+
+ renderer = SqlalchemyRender(MonetDialect)
+ query_str = renderer.get_string(query, with_failback=True)
+ return self.native_query(query_str)
+
+
+ def get_tables(self) -> StatusResponse:
+ """ Return list of entities
+ Return list of entities that will be accesible as tables.
+ Returns: HandlerResponse: shoud have same columns as information_schema.tables
+ (https://dev.mysql.com/doc/refman/8.0/en/information-schema-tables-table.html)
+ Column 'TABLE_NAME' is mandatory, other is optional.
+ """
+ self.connect()
+ schema = schema_id(connection=self.connection,schema_name=self.schemaName)
+
+
+
+ q = f"""
+ SELECT name as TABLE_NAME
+ FROM sys.tables
+ WHERE system = False
+ AND type = 0
+ AND schema_id = {schema}
+ """
+
+
+ return self.query(q)
+
+
+ def get_columns(self, table_name: str) -> StatusResponse:
+ """ Returns a list of entity columns
+ Args:
+ table_name (str): name of one of tables returned by self.get_tables()
+ Returns:
+ HandlerResponse: shoud have same columns as information_schema.columns
+ (https://dev.mysql.com/doc/refman/8.0/en/information-schema-columns-table.html)
+ Column 'COLUMN_NAME' is mandatory, other is optional. Hightly
+ recomended to define also 'DATA_TYPE': it should be one of
+ python data types (by default it str).
+ """
+ self.connect()
+ table=table_id(
+ connection=self.connection,
+ table_name=table_name,
+ schema_name=self.schemaName
+ )
+
+ q = f"""
+ SELECT
+ name as COLUMN_NAME,
+ type as DATA_TYPE
+ FROM sys.columns
+ WHERE table_id = {table}
+ """
+ return self.query(q)
+
+
+
+
+
+
+
+connection_args = OrderedDict(
+ host={
+ 'type': ARG_TYPE.STR,
+ 'description': 'The host name or IP address of the MonetDB server/database.'
+ },
+ database={
+ 'type': ARG_TYPE.STR,
+ 'description': """
+ The database name to use when connecting with the MonetDB server.
+ """
+ },
+ user={
+ 'type': ARG_TYPE.STR,
+ 'description': 'The user name used to authenticate with the MonetDB server.'
+ },
+ password={
+ 'type': ARG_TYPE.STR,
+ 'description': 'The password to authenticate the user with the MonetDB server.'
+ },
+ port={
+ 'type': ARG_TYPE.INT,
+ 'description': 'Specify port to connect MonetDB through TCP/IP'
+ },
+ schema_name={
+ 'type': ARG_TYPE.STR,
+ 'description': 'Specify the schema name for Listing Table '
+ },
+
+)
+
+connection_args_example = OrderedDict(
+ host='127.0.0.1',
+ port=50000,
+ password='monetdb',
+ user='monetdb',
+ schemaName="sys",
+ database="demo",
+)
diff --git a/mindsdb/integrations/handlers/monetdb_handler/utils/__init__.py b/mindsdb/integrations/handlers/monetdb_handler/utils/__init__.py
new file mode 100644
diff --git a/mindsdb/integrations/handlers/monetdb_handler/utils/monet_get_id.py b/mindsdb/integrations/handlers/monetdb_handler/utils/monet_get_id.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/integrations/handlers/monetdb_handler/utils/monet_get_id.py
@@ -0,0 +1,50 @@
+from sqlalchemy import exc
+
+def schema_id(connection, schema_name=None):
+ """Fetch the id for schema"""
+ cur=connection.cursor()
+ if schema_name is None:
+ cur.execute("SELECT current_schema")
+ schema_name=cur.fetchall()[0][0]
+
+ query = f"""
+ SELECT id
+ FROM sys.schemas
+ WHERE name = '{schema_name}'
+ """
+
+ cur.execute(query)
+
+ try:
+ schema_id = cur.fetchall()[0][0]
+ except:
+ raise exc.InvalidRequestError(schema_name)
+
+ return schema_id
+
+
+def table_id(connection, table_name, schema_name=None):
+ """Fetch the id for schema.table_name, defaulting to current schema if
+ schema is None
+ """
+
+ schema_idm = schema_id(connection=connection,schema_name=schema_name)
+
+ q = f"""
+ SELECT id
+ FROM sys.tables
+ WHERE name = '{table_name}'
+ AND schema_id = {schema_idm}
+ """
+
+ cur = connection.cursor()
+ cur.execute(q)
+
+ try:
+ table_id = cur.fetchall()[0][0]
+ except:
+ raise exc.NoSuchTableError(table_name)
+
+
+
+ return table_id
| diff --git a/mindsdb/integrations/handlers/monetdb_handler/tests/__init__.py b/mindsdb/integrations/handlers/monetdb_handler/tests/__init__.py
new file mode 100644
diff --git a/mindsdb/integrations/handlers/monetdb_handler/tests/test_monetdb_handler.py b/mindsdb/integrations/handlers/monetdb_handler/tests/test_monetdb_handler.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/integrations/handlers/monetdb_handler/tests/test_monetdb_handler.py
@@ -0,0 +1,52 @@
+import unittest
+from mindsdb.integrations.handlers.monetdb_handler.monetdb_handler import MonetDBHandler
+from mindsdb.api.mysql.mysql_proxy.libs.constants.response_type import RESPONSE_TYPE
+
+
+class MonetDBHandlerTest(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ cls.kwargs = {
+ "host": "127.0.0.1",
+ "port": 50000,
+ "user": "monetdb",
+ "password": "monetdb",
+ "database": "demo",
+
+ }
+ cls.handler = MonetDBHandler('test_monet_handler', cls.kwargs)
+
+ def test_0_connect(self):
+ self.handler.connect()
+
+
+
+ def test_1_drop_table(self):
+ res = self.handler.query("DROP TABLE IF EXISTS PREM;")
+ assert res.type is not RESPONSE_TYPE.ERROR
+
+
+ def test_2_create_table(self):
+ res = self.handler.query("CREATE TABLE IF NOT EXISTS PREM (Premi varchar(50));")
+ assert res.type is not RESPONSE_TYPE.ERROR
+
+ def test_3_insert_table(self):
+ res = self.handler.query("INSERT INTO PREM VALUES('Radha <3 Krishna');")
+ assert res.type is not RESPONSE_TYPE.ERROR
+
+
+ def test_4_get_tables(self):
+ tables = self.handler.get_tables()
+ assert tables.type is not RESPONSE_TYPE.ERROR
+
+
+ def test_5_select_query(self):
+ query = "SELECT * FROM PREM;"
+ result = self.handler.native_query(query)
+ assert result.type is RESPONSE_TYPE.TABLE or RESPONSE_TYPE.OK
+
+ def test_6_check_connection(self):
+ self.handler.check_connection()
+
+if __name__ == '__main__':
+ unittest.main()
\ No newline at end of file
| [New Integration]: Integration with MonetDB
### Is there an existing integration?
- [X] I have searched the existing integrations.
### Use Case
**MonetDB** is an open-source column-oriented relational database management system. It is designed to provide high performance on complex queries against large databases, such as combining tables with hundreds of columns and millions of rows.
It is used worldwide for _education_, _research_, and _businesses_.
### Motivation
As MonetDB is very fast in executing complex queries and it is open source as well.
It is used in education, research and businesses as Data warehouse.
It can be very nice to integrate with mindsdb's amazing ML platforms.
### Implementation
_No response_
### Anything else?
_No response_
| @ZoranPandovski can i work on this. | 2022-08-20T13:32:57Z | [] | [] |
mindsdb/mindsdb | 2,749 | mindsdb__mindsdb-2749 | [
"2748"
] | 769423f91754c9be7095d325cc47aedae81b93ba | diff --git a/mindsdb/integrations/handlers/matrixone_handler/__about__.py b/mindsdb/integrations/handlers/matrixone_handler/__about__.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/integrations/handlers/matrixone_handler/__about__.py
@@ -0,0 +1,9 @@
+__title__ = 'MindsDB MatrixOne handler'
+__package_name__ = 'mindsdb_matrixone_handler'
+__version__ = '0.0.1'
+__description__ = "MindsDB handler for Matrixone"
+__author__ = 'Parthiv Makwana'
+__github__ = 'https://github.com/mindsdb/mindsdb'
+__pypi__ = 'https://pypi.org/project/mindsdb/'
+__license__ = 'GPL-3.0'
+__copyright__ = 'Copyright 2022- mindsdb'
diff --git a/mindsdb/integrations/handlers/matrixone_handler/__init__.py b/mindsdb/integrations/handlers/matrixone_handler/__init__.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/integrations/handlers/matrixone_handler/__init__.py
@@ -0,0 +1,23 @@
+from mindsdb.integrations.libs.const import HANDLER_TYPE
+
+from .__about__ import __version__ as version, __description__ as description
+try:
+ from .matrixone_handler import (
+ MatrixOneHandler as Handler,
+ connection_args_example,
+ connection_args
+ )
+ import_error = None
+except Exception as e:
+ Handler = None
+ import_error = e
+
+title = 'MatrixOne'
+name = 'matrixone'
+type = HANDLER_TYPE.DATA
+icon_path = 'icon.png'
+
+__all__ = [
+ 'Handler', 'version', 'name', 'type', 'title', 'description',
+ 'connection_args', 'connection_args_example', 'import_error', 'icon_path'
+]
diff --git a/mindsdb/integrations/handlers/matrixone_handler/matrixone_handler.py b/mindsdb/integrations/handlers/matrixone_handler/matrixone_handler.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/integrations/handlers/matrixone_handler/matrixone_handler.py
@@ -0,0 +1,215 @@
+from collections import OrderedDict
+from typing import Optional
+import pandas as pd
+import pymysql as matone
+from pymysql.cursors import DictCursor as dict
+from mindsdb_sql import parse_sql
+from mindsdb_sql.render.sqlalchemy_render import SqlalchemyRender
+from mindsdb_sql.parser.ast.base import ASTNode
+
+from mindsdb.utilities.log import log
+from mindsdb.integrations.libs.base_handler import DatabaseHandler
+from mindsdb.integrations.libs.response import (
+ HandlerStatusResponse as StatusResponse,
+ HandlerResponse as Response,
+ RESPONSE_TYPE
+)
+from mindsdb.integrations.libs.const import HANDLER_CONNECTION_ARG_TYPE as ARG_TYPE
+
+
+class MatrixOneHandler(DatabaseHandler):
+ """
+ This handler handles connection and execution of the MatrixOne statements.
+ """
+
+ name = 'matrixone'
+
+ def __init__(self, name,connection_data: Optional[dict], **kwargs):
+ super().__init__(name)
+ self.mysql_url = None
+ self.parser = parse_sql
+ self.dialect = 'mysql'
+ self.connection_data = connection_data
+ self.database = self.connection_data.get('database')
+
+ self.connection = None
+ self.is_connected = False
+
+ def connect(self):
+ if self.is_connected is True:
+ return self.connection
+
+ config = {
+ 'host': self.connection_data.get('host'),
+ 'port': self.connection_data.get('port'),
+ 'user': self.connection_data.get('user'),
+ 'password': self.connection_data.get('password'),
+ 'database': self.connection_data.get('database')
+ }
+
+ ssl = self.connection_data.get('ssl')
+ if ssl is True:
+ ssl_ca = self.connection_data.get('ssl_ca')
+ ssl_cert = self.connection_data.get('ssl_cert')
+ ssl_key = self.connection_data.get('ssl_key')
+ config['client_flags'] = [matone.constants.ClientFlag.SSL]
+ if ssl_ca is not None:
+ config["ssl_ca"] = ssl_ca
+ if ssl_cert is not None:
+ config["ssl_cert"] = ssl_cert
+ if ssl_key is not None:
+ config["ssl_key"] = ssl_key
+
+ connection = matone.connect(**config)
+ self.is_connected = True
+ self.connection = connection
+ return self.connection
+
+ def disconnect(self):
+ if self.is_connected is False:
+ return
+ self.connection.close()
+ self.is_connected = False
+ return
+
+ def check_connection(self) -> StatusResponse:
+ """
+ Check the connection of the MatrixOne database
+ :return: success status and error message if error occurs
+ """
+
+ result = StatusResponse(False)
+ need_to_close = self.is_connected is False
+
+ try:
+ connection = self.connect()
+ result.success = connection.open
+ except Exception as e:
+ log.error(f'Error connecting to MatrixOne {self.connection_data["database"]}, {e}!')
+ result.error_message = str(e)
+
+ if result.success is True and need_to_close:
+ self.disconnect()
+ if result.success is False and self.is_connected is True:
+ self.is_connected = False
+
+ return result
+
+ def native_query(self, query: str) -> Response:
+ """
+ Receive SQL query and runs it
+ :param query: The SQL query to run in MatrixOne
+ :return: returns the records from the current recordset
+ """
+
+ need_to_close = self.is_connected is False
+
+ connection = self.connect()
+ with connection.cursor(cursor=dict) as cur:
+ try:
+ cur.execute(query)
+ if cur._rows:
+ result = cur.fetchall()
+ response = Response(
+ RESPONSE_TYPE.TABLE,
+ pd.DataFrame(
+ result,
+ # columns=[x[0] for x in cur.description]
+ )
+ )
+ else:
+ response = Response(RESPONSE_TYPE.OK)
+ connection.commit()
+ except Exception as e:
+ log.error(f'Error running query: {query} on {self.connection_data["database"]}!')
+ response = Response(
+ RESPONSE_TYPE.ERROR,
+ error_message=str(e)
+ )
+ connection.rollback()
+
+ if need_to_close is True:
+ self.disconnect()
+
+ return response
+
+ def query(self, query: ASTNode) -> Response:
+ """
+ Retrieve the data from the SQL statement.
+ """
+ renderer = SqlalchemyRender('mysql')
+ query_str = renderer.get_string(query, with_failback=True)
+ return self.native_query(query_str)
+
+ def get_tables(self) -> Response:
+ """
+ Get a list with all of the tabels in MatrixOne
+ """
+ q = "SHOW TABLES;"
+ result = self.native_query(q)
+ df = result.data_frame
+ result.data_frame = df.rename(columns={df.columns[0]: 'table_name'})
+ return result
+
+ def get_columns(self, table_name) -> Response:
+ """
+ Show details about the table
+ """
+ q = f"SHOW COLUMNS FROM {table_name};"
+ result = self.native_query(q)
+ df=result.data_frame
+ result.data_frame=df.rename(columns={
+ df.columns[0]: 'COLUMN_NAME',
+ df.columns[1]: 'DATA TYPE'
+ })
+
+
+ return result
+
+
+connection_args = OrderedDict(
+ user={
+ 'type': ARG_TYPE.STR,
+ 'description': 'The user name used to authenticate with the MatrixOne server.'
+ },
+ password={
+ 'type': ARG_TYPE.STR,
+ 'description': 'The password to authenticate the user with the MatrixOne server.'
+ },
+ database={
+ 'type': ARG_TYPE.STR,
+ 'description': 'The database name to use when connecting with the MatrixOne server.'
+ },
+ host={
+ 'type': ARG_TYPE.STR,
+ 'description': 'The host name or IP address of the MatrixOne server. '
+ },
+ port={
+ 'type': ARG_TYPE.INT,
+ 'description': 'The TCP/IP port of the MatrixOne server. Must be an integer.'
+ },
+ ssl={
+ 'type': ARG_TYPE.BOOL,
+ 'description': 'Set it to False to disable ssl.'
+ },
+ ssl_ca={
+ 'type': ARG_TYPE.PATH,
+ 'description': 'Path or URL of the Certificate Authority (CA) certificate file'
+ },
+ ssl_cert={
+ 'type': ARG_TYPE.PATH,
+ 'description': 'Path name or URL of the server public key certificate file'
+ },
+ ssl_key={
+ 'type': ARG_TYPE.PATH,
+ 'description': 'The path name or URL of the server private key file'
+ }
+)
+
+connection_args_example = OrderedDict(
+ host='127.0.0.1',
+ port=6001,
+ user='dump',
+ password='111',
+ database='mo_catalog'
+)
| diff --git a/mindsdb/integrations/handlers/matrixone_handler/tests/__init__.py b/mindsdb/integrations/handlers/matrixone_handler/tests/__init__.py
new file mode 100644
diff --git a/mindsdb/integrations/handlers/matrixone_handler/tests/test_matrixone_handler.py b/mindsdb/integrations/handlers/matrixone_handler/tests/test_matrixone_handler.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/integrations/handlers/matrixone_handler/tests/test_matrixone_handler.py
@@ -0,0 +1,59 @@
+import unittest
+
+from mindsdb.integrations.handlers.matrixone_handler.matrixone_handler import MatrixOneHandler
+from mindsdb.api.mysql.mysql_proxy.libs.constants.response_type import RESPONSE_TYPE
+
+
+class MatrixOneHandlerTest(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ cls.kwargs = {
+ "host": "localhost",
+ "port": 6001,
+ "user": "dump",
+ "password": "111",
+ "database": "mo_catalog",
+ "ssl": False
+ }
+ cls.handler = MatrixOneHandler('test_mysql_handler', cls.kwargs)
+
+
+ def test_0_connect(self):
+ self.handler.connect()
+
+
+
+ def test_1_drop_table(self):
+ res = self.handler.query("DROP TABLE IF EXISTS PREM;")
+ assert res.type is not RESPONSE_TYPE.ERROR
+
+
+ def test_2_create_table(self):
+ res = self.handler.query("CREATE TABLE IF NOT EXISTS PREM (Premi varchar(50));")
+ assert res.type is not RESPONSE_TYPE.ERROR
+
+ def test_3_insert_table(self):
+ res = self.handler.query("INSERT INTO PREM VALUES('Radha <3 Krishna');")
+ assert res.type is not RESPONSE_TYPE.ERROR
+
+
+ def test_4_get_tables(self):
+ tables = self.handler.get_tables()
+ assert tables.type is not RESPONSE_TYPE.ERROR
+
+
+ def test_5_select_query(self):
+ query = "SELECT * FROM PREM;"
+ result = self.handler.native_query(query)
+ assert result.type is not RESPONSE_TYPE.ERROR
+
+ def test_6_get_columns(self):
+
+ result = self.handler.get_columns('PREM')
+ assert result.type is not RESPONSE_TYPE.ERROR
+
+ def test_7_check_connection(self):
+ self.handler.check_connection()
+
+if __name__ == '__main__':
+ unittest.main()
\ No newline at end of file
| [New Integration]: Integration with Matrixone
### Is there an existing integration?
- [X] I have searched the existing integrations.
### Use Case
**MatrixOne is a future-oriented hyper-converged cloud and edge native DBMS that supports transactional, analytical, and streaming workloads with a simplified and distributed database engine, across multiple data centers, clouds, edges and other heterogeneous infrastructures.**
### Motivation
- Below all are the amazing application of matrixone.
- For more [info ](https://github.com/matrixorigin/matrixone)
![Matrixone](https://raw.githubusercontent.com/matrixorigin/artwork/main/docs/overview/all-in-one.png)
### Implementation
_No response_
### Anything else?
_No response_
| 2022-08-22T10:26:32Z | [] | [] |
|
mindsdb/mindsdb | 2,774 | mindsdb__mindsdb-2774 | [
"2750"
] | 2cf9e1c88cefe74c3aac0ae35721e7dae278b3da | diff --git a/mindsdb/integrations/handlers/d0lt_handler/__about__.py b/mindsdb/integrations/handlers/d0lt_handler/__about__.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/integrations/handlers/d0lt_handler/__about__.py
@@ -0,0 +1,9 @@
+__title__ = 'MindsDB D0lt handler'
+__package_name__ = 'mindsdb_d0lt_handler'
+__version__ = '0.0.1'
+__description__ = "MindsDB handler for D0lt"
+__author__ = 'Parthiv Makwana'
+__github__ = 'https://github.com/mindsdb/mindsdb'
+__pypi__ = 'https://pypi.org/project/mindsdb/'
+__license__ = 'GPL-3.0'
+__copyright__ = 'Copyright 2022- mindsdb'
diff --git a/mindsdb/integrations/handlers/d0lt_handler/__init__.py b/mindsdb/integrations/handlers/d0lt_handler/__init__.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/integrations/handlers/d0lt_handler/__init__.py
@@ -0,0 +1,22 @@
+from mindsdb.integrations.libs.const import HANDLER_TYPE
+
+from .__about__ import __version__ as version, __description__ as description
+try:
+ from .d0lt_handler import (
+ D0ltHandler as Handler,
+
+ )
+ import_error = None
+except Exception as e:
+ Handler = None
+ import_error = e
+
+title = 'D0lt'
+name = 'd0lt'
+type = HANDLER_TYPE.DATA
+icon_path = 'icon.svg'
+
+__all__ = [
+ 'Handler', 'version', 'name', 'type', 'title', 'description',
+ 'import_error', 'icon_path'
+]
diff --git a/mindsdb/integrations/handlers/d0lt_handler/d0lt_handler.py b/mindsdb/integrations/handlers/d0lt_handler/d0lt_handler.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/integrations/handlers/d0lt_handler/d0lt_handler.py
@@ -0,0 +1,11 @@
+from ..matrixone_handler import Handler as MatrixOneHandler
+
+
+class D0ltHandler(MatrixOneHandler):
+ """
+ This handler handles connection and execution of the MariaDB statements.
+ """
+ name = 'd0lt'
+
+ def __init__(self, name, **kwargs):
+ super().__init__(name, **kwargs)
| diff --git a/mindsdb/integrations/handlers/d0lt_handler/tests/__init__.py b/mindsdb/integrations/handlers/d0lt_handler/tests/__init__.py
new file mode 100644
diff --git a/mindsdb/integrations/handlers/d0lt_handler/tests/test_d0lt_handler.py b/mindsdb/integrations/handlers/d0lt_handler/tests/test_d0lt_handler.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/integrations/handlers/d0lt_handler/tests/test_d0lt_handler.py
@@ -0,0 +1,62 @@
+import unittest
+
+from mindsdb.integrations.handlers.d0lt_handler.d0lt_handler import D0ltHandler
+from mindsdb.api.mysql.mysql_proxy.libs.constants.response_type import RESPONSE_TYPE
+
+
+class D0ltHandlerTest(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ cls.kwargs = {
+ "host": "localhost",
+ "port": 3306,
+ "user": "root",
+ "password": "",
+ "database": "getting_started",
+ "ssl": False
+ }
+ cls.handler = D0ltHandler('test_mysql_handler', connection_data=cls.kwargs)
+
+ def test_0_connect(self):
+ self.handler.connect()
+
+
+ def test_0_connect(self):
+ self.handler.connect()
+
+
+
+ def test_1_drop_table(self):
+ res = self.handler.query("DROP TABLE IF EXISTS PREM;")
+ assert res.type is not RESPONSE_TYPE.ERROR
+
+
+ def test_2_create_table(self):
+ res = self.handler.query("CREATE TABLE IF NOT EXISTS PREM (Premi varchar(50));")
+ assert res.type is not RESPONSE_TYPE.ERROR
+
+ def test_3_insert_table(self):
+ res = self.handler.query("INSERT INTO PREM VALUES('Radha <3 Krishna');")
+ assert res.type is not RESPONSE_TYPE.ERROR
+
+
+ def test_4_get_tables(self):
+ tables = self.handler.get_tables()
+ assert tables.type is not RESPONSE_TYPE.ERROR
+
+
+ def test_5_select_query(self):
+ query = "SELECT * FROM PREM;"
+ result = self.handler.native_query(query)
+ assert result.type is not RESPONSE_TYPE.ERROR
+
+ def test_6_get_columns(self):
+
+ result = self.handler.get_columns('PREM')
+ assert result.type is not RESPONSE_TYPE.ERROR
+
+ def test_7_check_connection(self):
+ self.handler.check_connection()
+
+if __name__ == '__main__':
+ unittest.main()
\ No newline at end of file
| [New Integration]: Integration with D0lt
### Is there an existing integration?
- [X] I have searched the existing integrations.
### Use Case
**Using Dolt to manage and share your Machine Learning data amongst your data analysts, engineers, and scientists make collaboration easy. Dolt gives you human and machine readable diffs. Diffs are useful providing data oriented insights into your ML models. Why is this model performing better than this one? What in the data changed? Dolt provides data lineage as a first class entity in your Machine Learning pipelines. Dolt provides model reproducibility by storing each version of the data you use to train a model. Dolt is especially useful in Natural Language Processing (NLP) where the data is mostly text.**
For more UseCases you can refer [here](https://docs.dolthub.com/introduction/use-cases)
### Motivation
Dolt is like Imagine if Git and MySQL had a baby.
It has very amazing and powerful features.
It has amazing usecases as described above.
### Implementation
_No response_
### Anything else?
_No response_
| @ZoranPandovski **_Last assigned issues completed and Made PR._**
_Assignee this to me._ | 2022-08-25T07:58:33Z | [] | [] |
mindsdb/mindsdb | 2,843 | mindsdb__mindsdb-2843 | [
"2842"
] | c68d5363ed8dad479c29f1c9c3b14b5fa49d81a2 | diff --git a/mindsdb/integrations/handlers/timescaledb_handler/__about__.py b/mindsdb/integrations/handlers/timescaledb_handler/__about__.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/integrations/handlers/timescaledb_handler/__about__.py
@@ -0,0 +1,9 @@
+__title__ = 'MindsDB TimeScaleDB handler'
+__package_name__ = 'mindsdb_timescaledb_handler'
+__version__ = '0.0.1'
+__description__ = "MindsDB handler for TimeScaleDB"
+__author__ = 'Parthiv MAkwana'
+__github__ = 'https://github.com/mindsdb/mindsdb'
+__pypi__ = 'https://pypi.org/project/mindsdb/'
+__license__ = 'GPL-3.0'
+__copyright__ = 'Copyright 2022- mindsdb'
diff --git a/mindsdb/integrations/handlers/timescaledb_handler/__init__.py b/mindsdb/integrations/handlers/timescaledb_handler/__init__.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/integrations/handlers/timescaledb_handler/__init__.py
@@ -0,0 +1,19 @@
+from mindsdb.integrations.libs.const import HANDLER_TYPE
+
+from .__about__ import __version__ as version, __description__ as description
+try:
+ from .timescaledb_handler import PostgresHandler as Handler
+ import_error = None
+except Exception as e:
+ Handler = None
+ import_error = e
+
+title = 'TimeScaleDB'
+name = 'timescaledb'
+type = HANDLER_TYPE.DATA
+icon_path = 'icon.svg'
+
+__all__ = [
+ 'Handler', 'version', 'name', 'type', 'title',
+ 'description', 'import_error', 'icon_path'
+]
diff --git a/mindsdb/integrations/handlers/timescaledb_handler/timescaledb_handler.py b/mindsdb/integrations/handlers/timescaledb_handler/timescaledb_handler.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/integrations/handlers/timescaledb_handler/timescaledb_handler.py
@@ -0,0 +1,46 @@
+from ..postgres_handler import Handler as PostgresHandler
+from collections import OrderedDict
+from mindsdb.integrations.libs.const import HANDLER_CONNECTION_ARG_TYPE as ARG_TYPE
+
+
+class TimeScaleDBHandler(PostgresHandler):
+ name = 'timescaledb'
+
+ def __init__(self, name, **kwargs):
+ super().__init__(name, **kwargs)
+
+
+
+
+connection_args = OrderedDict(
+ host={
+ 'type': ARG_TYPE.STR,
+ 'description': 'The host name or IP address of the TimeScaleDB server/database.'
+ },
+ database={
+ 'type': ARG_TYPE.STR,
+ 'description': """
+ The database name to use when connecting with the TimeScaleDB server.
+ """
+ },
+ user={
+ 'type': ARG_TYPE.STR,
+ 'description': 'The user name used to authenticate with the TimeScaleDB server.'
+ },
+ password={
+ 'type': ARG_TYPE.STR,
+ 'description': 'The password to authenticate the user with the TimeScaleDB server.'
+ },
+ port={
+ 'type': ARG_TYPE.INT,
+ 'description': 'Specify port to connect TimeScaleDB '
+ }
+)
+
+connection_args_example = OrderedDict(
+ host='127.0.0.1',
+ port=36806,
+ password='P455W0rD',
+ user='tsdbadmin',
+ database="tsdb"
+)
| diff --git a/mindsdb/integrations/handlers/timescaledb_handler/tests/__init__.py b/mindsdb/integrations/handlers/timescaledb_handler/tests/__init__.py
new file mode 100644
diff --git a/mindsdb/integrations/handlers/timescaledb_handler/tests/test_timescaledb_handler.py b/mindsdb/integrations/handlers/timescaledb_handler/tests/test_timescaledb_handler.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/integrations/handlers/timescaledb_handler/tests/test_timescaledb_handler.py
@@ -0,0 +1,51 @@
+import unittest
+from mindsdb.integrations.handlers.timescaledb_handler.timescaledb_handler import TimeScaleDBHandler
+from mindsdb.api.mysql.mysql_proxy.libs.constants.response_type import RESPONSE_TYPE
+
+
+class TimeScaleDBHandlerTest(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ cls.kwargs = {
+ "host": "localhost",
+ "port": 36806,
+ "user": "USERNAME",
+ "password": "P455W0rd",
+ "database": "tsdb"
+ }
+ cls.handler = TimeScaleDBHandler('test_timescaledb_handler', connection_data=cls.kwargs)
+
+ def test_0_connect(self):
+ self.handler.connect()
+
+
+
+ def test_1_drop_table(self):
+ res = self.handler.query("DROP TABLE IF EXISTS LOVE;")
+ assert res.type is not RESPONSE_TYPE.ERROR
+
+
+ def test_2_create_table(self):
+ res = self.handler.query("CREATE TABLE IF NOT EXISTS LOVE (LOVER varchar(20));")
+ assert res.type is not RESPONSE_TYPE.ERROR
+
+ def test_3_insert(self):
+ res = self.handler.query("INSERT INTO LOVE VALUES('Hari');")
+ assert res.type is not RESPONSE_TYPE.ERROR
+
+ def test_4_get_tables(self):
+ tables = self.handler.get_tables()
+ assert tables.type is RESPONSE_TYPE.TABLE
+
+
+ def test_5_select_query(self):
+ query = "SELECT * FROM LOVE;"
+ result = self.handler.native_query(query)
+ assert result.type is RESPONSE_TYPE.TABLE
+
+ def test_5_check_connection(self):
+ self.handler.check_connection()
+
+
+if __name__ == '__main__':
+ unittest.main()
\ No newline at end of file
| [New Integration]: Integration With TimeScaleDB
### Is there an existing integration?
- [X] I have searched the existing integrations.
### Use Case
TimescaleDB is an open-source developed by Timescale Inc. It is written in C and extends PostgreSQL. TimescaleDB supports standard SQL queries and is a relational database.Additional SQL functions and table structures provide support for time series data oriented towards storage, performance, and analysis facilities for data-at-scale. Performance characteristics have been compared to InfluxDB Time-based data partitioning provides for improved query execution and performance when used for time oriented applications.
### Motivation
As PostgreSQL is already working with mindsdb .
TimeScaleDB is time series extension of PostgreSQL also it works faster than PostgreSQL .
### Implementation
_No response_
### Anything else?
_No response_
| 2022-08-31T12:33:18Z | [] | [] |
|
mindsdb/mindsdb | 2,895 | mindsdb__mindsdb-2895 | [
"2360"
] | 78ed5abe15d9c435a6e66e21bcfddc1085d48eab | diff --git a/mindsdb/integrations/handlers/hive_handler/__about__.py b/mindsdb/integrations/handlers/hive_handler/__about__.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/integrations/handlers/hive_handler/__about__.py
@@ -0,0 +1,9 @@
+__title__ = 'MindsDB Hive handler'
+__package_name__ = 'mindsdb_hive_handler'
+__version__ = '0.0.1'
+__description__ = "MindsDB handler for Hive2"
+__author__ = 'Biswadip Paul'
+__github__ = 'https://github.com/mindsdb/mindsdb'
+__pypi__ = 'https://pypi.org/project/mindsdb/'
+__license__ = 'GPL-3.0'
+__copyright__ = 'Copyright 2022- mindsdb'
diff --git a/mindsdb/integrations/handlers/hive_handler/__init__.py b/mindsdb/integrations/handlers/hive_handler/__init__.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/integrations/handlers/hive_handler/__init__.py
@@ -0,0 +1,23 @@
+from mindsdb.integrations.libs.const import HANDLER_TYPE
+
+from .__about__ import __version__ as version, __description__ as description
+try:
+ from .hive_handler import (
+ HiveHandler as Handler,
+ connection_args_example,
+ connection_args
+ )
+ import_error = None
+except Exception as e:
+ Handler = None
+ import_error = e
+
+title = 'Hive'
+name = 'hive'
+type = HANDLER_TYPE.DATA
+icon_path = 'icon.svg'
+
+__all__ = [
+ 'Handler', 'version', 'name', 'type', 'title', 'description',
+ 'connection_args', 'connection_args_example', 'import_error', 'icon_path'
+]
diff --git a/mindsdb/integrations/handlers/hive_handler/hive_handler.py b/mindsdb/integrations/handlers/hive_handler/hive_handler.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/integrations/handlers/hive_handler/hive_handler.py
@@ -0,0 +1,191 @@
+from typing import Optional
+from collections import OrderedDict
+import pandas as pd
+from pyhive import (hive, sqlalchemy_hive)
+from sqlalchemy import create_engine
+
+from mindsdb_sql import parse_sql
+from mindsdb_sql.render.sqlalchemy_render import SqlalchemyRender
+from mindsdb_sql.parser.ast.base import ASTNode
+
+from mindsdb.utilities.log import log
+from mindsdb.integrations.libs.base_handler import DatabaseHandler
+from mindsdb.integrations.libs.response import (
+ HandlerStatusResponse as StatusResponse,
+ HandlerResponse as Response,
+ RESPONSE_TYPE
+)
+from mindsdb.integrations.libs.const import HANDLER_CONNECTION_ARG_TYPE as ARG_TYPE
+
+
+class HiveHandler(DatabaseHandler):
+ """
+ This handler handles connection and execution of the Hive SQL statements.
+ """
+
+ name = 'hive'
+
+ def __init__(self, name: str, connection_data: Optional[dict], **kwargs):
+ super().__init__(name)
+ self.parser = parse_sql
+ self.dialect = 'hive'
+
+ self.connection_data = connection_data
+ self.kwargs = kwargs
+
+ self.connection = None
+ self.is_connected = False
+
+ def __del__(self):
+ if self.is_connected is True:
+ self.disconnect()
+
+ def connect(self):
+ if self.is_connected is True:
+ return self.connection
+
+ config = {
+ 'host': self.connection_data.get('host'),
+ 'port': self.connection_data.get('port'),
+ 'auth': self.connection_data.get('auth', 'CUSTOM'),
+ 'username': self.connection_data.get('user'),
+ 'password': self.connection_data.get('password'),
+ 'database': self.connection_data.get('database')
+ }
+
+ connection = hive.Connection(**config)
+ self.is_connected = True
+ self.connection = connection
+ return self.connection
+
+ def disconnect(self):
+ if self.is_connected is False:
+ return
+ self.connection.close()
+ self.is_connected = False
+ return
+
+ def check_connection(self) -> StatusResponse:
+ """
+ Check the connection of the Hive database
+ :return: success status and error message if error occurs
+ """
+
+ response = StatusResponse(False)
+ need_to_close = self.is_connected is False
+
+ try:
+ self.connect()
+ response.success = True
+ except Exception as e:
+ log.error(f'Error connecting to Hive {self.connection_data["database"]}, {e}!')
+ response.error_message = str(e)
+
+ if response.success is True and need_to_close:
+ self.disconnect()
+ if response.success is False and self.is_connected is True:
+ self.is_connected = False
+
+ return response
+
+ def native_query(self, query: str) -> Response:
+ """
+ Receive SQL query and runs it
+ :param query: The SQL query to run in Hive
+ :return: returns the records from the current recordset
+ """
+
+ need_to_close = self.is_connected is False
+
+ connection = self.connect()
+ with connection.cursor() as cur:
+ try:
+ cur.execute(query)
+ result = cur.fetchall()
+ if result:
+ response = Response(
+ RESPONSE_TYPE.TABLE,
+ pd.DataFrame(
+ result,
+ columns=[x[0].split('.')[-1] for x in cur.description]
+ )
+ )
+ else:
+ response = Response(RESPONSE_TYPE.OK)
+ connection.commit()
+ except Exception as e:
+ log.error(f'Error running query: {query} on {self.connection_data["database"]}!')
+ response = Response(
+ RESPONSE_TYPE.ERROR,
+ error_message=str(e)
+ )
+ connection.rollback()
+
+ if need_to_close is True:
+ self.disconnect()
+
+ return response
+
+ def query(self, query: ASTNode) -> Response:
+ """
+ Retrieve the data from the SQL statement.
+ """
+ renderer = SqlalchemyRender(sqlalchemy_hive.HiveDialect)
+ query_str = renderer.get_string(query, with_failback=True)
+ return self.native_query(query_str)
+ # return self.native_query(query.to_string())
+
+ def get_tables(self) -> Response:
+ """
+ Get a list with all of the tables in Hive
+ """
+ q = "SHOW TABLES"
+ result = self.native_query(q)
+ df = result.data_frame
+ result.data_frame = df.rename(columns={df.columns[0]: 'table_name'})
+ return result
+
+ def get_columns(self, table_name) -> Response:
+ """
+ Show details about the table
+ """
+ q = f"DESCRIBE {table_name}"
+ result = self.native_query(q)
+ return result
+
+
+connection_args = OrderedDict(
+ user={
+ 'type': ARG_TYPE.STR,
+ 'description': 'The user name used to authenticate with the Hive server.'
+ },
+ password={
+ 'type': ARG_TYPE.STR,
+ 'description': 'The password to authenticate the user with the Hive server.'
+ },
+ database={
+ 'type': ARG_TYPE.STR,
+ 'description': 'The database name to use when connecting with the Hive server.'
+ },
+ host={
+ 'type': ARG_TYPE.STR,
+ 'description': 'The host name or IP address of the Hive server. NOTE: use \'127.0.0.1\' instead of \'localhost\' to connect to local server.'
+ },
+ port={
+ 'type': ARG_TYPE.INT,
+ 'description': 'The TCP/IP port of the Hive server. Must be an integer.'
+ },
+ auth={
+ 'type': ARG_TYPE.STR,
+ 'description': 'The Auth type of the Hive server.'
+ }
+)
+
+connection_args_example = OrderedDict(
+ host='127.0.0.1',
+ port='10000',
+ auth='CUSTOM',
+ user='root',
+ password='password',
+ database='database'
+)
| diff --git a/mindsdb/integrations/handlers/hive_handler/tests/__init__.py b/mindsdb/integrations/handlers/hive_handler/tests/__init__.py
new file mode 100644
diff --git a/mindsdb/integrations/handlers/hive_handler/tests/test_hive_handler.py b/mindsdb/integrations/handlers/hive_handler/tests/test_hive_handler.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/integrations/handlers/hive_handler/tests/test_hive_handler.py
@@ -0,0 +1,53 @@
+import unittest
+
+from mindsdb.integrations.handlers.hive_handler.hive_handler import HiveHandler
+from mindsdb.api.mysql.mysql_proxy.libs.constants.response_type import RESPONSE_TYPE
+
+
+class HiveHandlerTest(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ cls.kwargs = {
+ "host": "localhost",
+ "port": "10000",
+ "user": "admin",
+ "password": "password",
+ "database": "default",
+ "auth": "CUSTOM"
+ }
+ cls.handler = HiveHandler('test_hive_handler', **cls.kwargs)
+
+ def test_0_connect(self):
+ self.handler.check_connection()
+
+ def test_1_native_query_show_dbs(self):
+ dbs = self.handler.native_query("SHOW DATABASES;")
+ assert dbs['type'] is not RESPONSE_TYPE.ERROR
+
+ def test_2_get_tables(self):
+ tbls = self.handler.get_tables()
+ assert tbls['type'] is not RESPONSE_TYPE.ERROR
+
+ def test_3_get_views(self):
+ views = self.handler.get_views()
+ assert views['type'] is not RESPONSE_TYPE.ERROR
+
+ def test_5_drop_table(self):
+ res = self.handler.native_query("DROP TABLE IF EXISTS test_hdb")
+ assert res['type'] is not RESPONSE_TYPE.ERROR
+
+ def test_4_create_table(self):
+ res = self.handler.native_query("CREATE TABLE IF NOT EXISTS test_hdb (test_col INT)")
+ assert res['type'] is not RESPONSE_TYPE.ERROR
+
+ def test_6_describe_table(self):
+ described = self.handler.get_columns("test_hdb")
+ assert described['type'] is RESPONSE_TYPE.TABLE
+
+ def test_7_select_query(self):
+ query = "SELECT * FROM test_mdb WHERE foo=238"
+ result = self.handler.query(query)
+ assert result['type'] is RESPONSE_TYPE.TABLE
+
+if __name__ == '__main__':
+ unittest.main()
\ No newline at end of file
| Hive (Hive Server2?) as a Datasource
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Is your feature request related to a problem? Please describe.
A user would like to use Hive as a datasource.
### Describe the solution you'd like.
Be able to train ML models in MindsDB from Hive (server2?)
### Describe an alternate solution.
_No response_
### Anything else? (Additional Context)
This is a feature that a potential paid customer would like.
_We use airbyte to sync data between a Postgres database and a Databricks Spark cluster. We then use DBT to transform those tables into useful data products within Databricks._
| Hi @ZoranPandovski. Can I take this up?
Hey sure @bispaul
Hi @bispaul, did you start on this?
Hi @ZoranPandovski , I have started but couldn't work much due to my workload. I will try to complete this before the end of this month. Thank You. | 2022-09-10T21:11:45Z | [] | [] |
mindsdb/mindsdb | 3,113 | mindsdb__mindsdb-3113 | [
"2744"
] | 909adb267a973a7d6a2848ec6e188ac9e4cf0f33 | diff --git a/mindsdb/integrations/handlers/hana_handler/__about__.py b/mindsdb/integrations/handlers/hana_handler/__about__.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/integrations/handlers/hana_handler/__about__.py
@@ -0,0 +1,9 @@
+__title__ = 'MindsDB SAP HANA handler'
+__package_name__ = 'mindsdb_hana_handler'
+__version__ = '0.0.1'
+__description__ = "MindsDB handler for SAP HANA"
+__author__ = 'Sudipto Ghosh'
+__github__ = 'https://github.com/mindsdb/mindsdb'
+__pypi__ = 'https://pypi.org/project/mindsdb/'
+__license__ = 'GPL-3.0'
+__copyright__ = 'Copyright 2022 - mindsdb'
diff --git a/mindsdb/integrations/handlers/hana_handler/__init__.py b/mindsdb/integrations/handlers/hana_handler/__init__.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/integrations/handlers/hana_handler/__init__.py
@@ -0,0 +1,23 @@
+from mindsdb.integrations.libs.const import HANDLER_TYPE
+
+from .__about__ import __version__ as version, __description__ as description
+try:
+ from .hana_handler import (
+ HanaHandler as Handler,
+ connection_args_example,
+ connection_args
+ )
+ import_error = None
+except Exception as e:
+ Handler = None
+ import_error = e
+
+title = 'SAP HANA'
+name = 'hana'
+type = HANDLER_TYPE.DATA
+icon_path = 'icon.svg'
+
+__all__ = [
+ 'Handler', 'version', 'name', 'type', 'title', 'description',
+ 'connection_args', 'connection_args_example', 'import_error', 'icon_path'
+]
diff --git a/mindsdb/integrations/handlers/hana_handler/hana_handler.py b/mindsdb/integrations/handlers/hana_handler/hana_handler.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/integrations/handlers/hana_handler/hana_handler.py
@@ -0,0 +1,313 @@
+from textwrap import dedent
+from collections import OrderedDict
+
+from pandas import DataFrame
+
+from hdbcli import dbapi
+import sqlalchemy_hana.dialect as hana_dialect
+
+from mindsdb_sql import parse_sql
+from mindsdb_sql.parser.ast.base import ASTNode
+from mindsdb_sql.render.sqlalchemy_render import SqlalchemyRender
+
+from mindsdb.utilities.log import log
+
+from mindsdb.integrations.libs.base_handler import DatabaseHandler
+from mindsdb.integrations.libs.response import (
+ HandlerStatusResponse as StatusResponse,
+ HandlerResponse as Response,
+ RESPONSE_TYPE
+)
+from mindsdb.integrations.libs.const import HANDLER_CONNECTION_ARG_TYPE as ARG_TYPE
+
+
+class HanaHandler(DatabaseHandler):
+ """
+ This handler handles connection and execution of the SAP Hana statements.
+ """
+
+ name = 'hana'
+
+ def __init__(self, name: str, connection_data: dict, **kwargs):
+ super().__init__(name)
+
+ self.dialect = 'hana'
+ self.parser = parse_sql
+ self.connection_data = connection_data
+ self.renderer = SqlalchemyRender(hana_dialect.HANAHDBCLIDialect)
+
+ self.address = self.connection_data.get('host')
+ self.port = self.connection_data.get('port')
+ self.user = self.connection_data.get('user')
+ self.password = self.connection_data.get('password')
+ self.autocommit = self.connection_data.get('autocommit', True)
+ self.properties = self.connection_data.get('properties')
+ self.currentSchema = self.connection_data.get('schema', 'CURRENTUSER')
+ self.databaseName = self.connection_data.get('database')
+ self.encrypt = self.connection_data.get('encrypt', False)
+ self.sslHostNameInCertificate = self.connection_data.get('sslHostNameInCertificate')
+ self.sslValidateCertificate = self.connection_data.get('sslValidateCertificate', False)
+ self.sslCryptoProvider = self.connection_data.get('sslCryptoProvider')
+ self.sslTrustStore = self.connection_data.get('sslTrustStore')
+ self.sslKeyStore = self.connection_data.get('sslKeyStore')
+ self.cseKeyStorePassword = self.connection_data.get('cseKeyStorePassword')
+ self.sslSNIHostname = self.connection_data.get('sslSNIHostname')
+ self.sslSNIRequest = self.connection_data.get('sslSNIRequest', True)
+ self.siteType = self.connection_data.get('siteType')
+ self.splitBatchCommands = self.connection_data.get('splitBatchCommands', True)
+ self.routeDirectExecute = self.connection_data.get('routeDirectExecute', False)
+ self.secondarySessionFallback = self.connection_data.get('secondarySessionFallback', True)
+
+ self.connection = None
+ self.is_connected = False
+
+ def __del__(self):
+ if self.is_connected is True:
+ self.disconnect()
+
+ def connect(self):
+ """
+ Handles the connection to a SAP Hana database insance.
+ """
+
+ if self.is_connected is True:
+ return self.connection
+
+ connection = dbapi.connect(
+ address=self.address,
+ port=self.port,
+ user=self.user,
+ password=self.password,
+ autocommit=self.autocommit,
+ properties=self.properties,
+ currentSchema=self.currentSchema,
+ databaseName=self.databaseName,
+ encrypt=self.encrypt,
+ sslHostNameInCertificate=self.sslHostNameInCertificate,
+ sslValidateCertificate=self.sslValidateCertificate,
+ sslCryptoProvider=self.sslCryptoProvider,
+ sslTrustStore=self.sslTrustStore,
+ sslKeyStore=self.sslKeyStore,
+ cseKeyStorePassword=self.cseKeyStorePassword,
+ sslSNIHostname=self.sslSNIHostname,
+ sslSNIRequest=self.sslSNIRequest,
+ siteType=self.siteType,
+ splitBatchCommands=self.splitBatchCommands,
+ routeDirectExecute=self.routeDirectExecute,
+ secondarySessionFallback=self.secondarySessionFallback
+ )
+
+ self.is_connected = True
+ self.connection = connection
+ return self.connection
+
+ def disconnect(self):
+ """
+ Disconnects from the SAP HANA database
+ """
+
+ if self.is_connected is True:
+ self.connection.close()
+ self.is_connected = False
+
+ def check_connection(self) -> StatusResponse:
+ """
+ Check the connection of the SAP HANA database
+ :return: success status and error message if error occurs
+ """
+
+ response = StatusResponse(False)
+ need_to_close = self.is_connected is False
+
+ try:
+ connection = self.connect()
+ with connection.cursor() as cur:
+ cur.execute('SELECT 1 FROM SYS.DUMMY')
+ response.success = True
+ except dbapi.Error as e:
+ log.error(f'Error connecting to SAP HANA {self.address}, {e}!')
+ response.error_message = e
+
+ if response.success is True and need_to_close:
+ self.disconnect()
+ if response.success is False and self.is_connected is True:
+ self.is_connected = False
+
+ return response
+
+ def native_query(self, query: str) -> Response:
+ """
+ Receive SQL query and runs it
+ :param query: The SQL query to run in SAP HANA
+ :return: returns the records from the current recordset
+ """
+
+ need_to_close = self.is_connected is False
+
+ connection = self.connect()
+ with connection.cursor() as cur:
+ try:
+ cur.execute(query)
+ if not cur.description:
+ response = Response(RESPONSE_TYPE.OK)
+ else:
+ result = cur.fetchall()
+ response = Response(
+ RESPONSE_TYPE.TABLE,
+ DataFrame(
+ result,
+ columns=[x[0] for x in cur.description]
+ )
+ )
+ connection.commit()
+ except Exception as e:
+ log.error(f'Error running query: {query} on {self.address}!')
+ response = Response(
+ RESPONSE_TYPE.ERROR,
+ error_code=0,
+ error_message=str(e)
+ )
+ connection.rollback()
+
+ if need_to_close is True:
+ self.disconnect()
+
+ return response
+
+ def query(self, query: ASTNode) -> Response:
+ """
+ Retrieve the data from the SQL statement with eliminated rows that dont satisfy the WHERE condition
+ """
+
+ query_str = self.renderer.get_string(query, with_failback=True)
+ return self.native_query(query_str)
+
+ def get_tables(self) -> Response:
+ """
+ List all tabese in SAP HANA in the current schema
+ """
+
+ return self.native_query(f"""
+ SELECT SCHEMA_NAME,
+ TABLE_NAME,
+ TABLE_TYPE
+ FROM
+ SYS.TABLES
+ WHERE IS_SYSTEM_TABLE = 'FALSE'
+ AND IS_USER_DEFINED_TYPE = 'FALSE'
+ AND IS_TEMPORARY = 'FALSE'
+ """)
+
+ def get_columns(self, table_name: str) -> Response:
+ """
+ List all columns in a table in SAP HANA in the current schema
+ :param table_name: the table name for which to list the columns
+ :return: returns the columns in the table
+ """
+
+ return self.native_query(f"""
+ SELECT COLUMN_NAME AS "Field",
+ DATA_TYPE_NAME AS "Type"
+ FROM SYS.TABLE_COLUMNS
+ WHERE TABLE_NAME = '{table_name}'
+ """)
+
+
+connection_args = OrderedDict(
+ host={
+ 'type': ARG_TYPE.STR,
+ 'description': 'The IP address/host name of the SAP HANA instance host.'
+ },
+ port={
+ 'type': ARG_TYPE.STR,
+ 'description': 'The port number of the SAP HANA instance.'
+ },
+ user={
+ 'type': ARG_TYPE.STR,
+ 'description': 'Specifies the user name.'
+ },
+ password={
+ 'type': ARG_TYPE.STR,
+ 'description': 'Specifies the password for the user.'
+ },
+ schema={
+ 'type': ARG_TYPE.STR,
+ 'description': 'Sets the current schema, which is used for identifiers without a schema.'
+ },
+ database={
+ 'type': ARG_TYPE.STR,
+ 'description': 'Specifies the name of the database to connect to. (Not used for SAP HANA Cloud)'
+ },
+ autocommit={
+ 'type': ARG_TYPE.BOOL,
+ 'description': 'Sets the autocommit mode for the connection.'
+ },
+ properties={
+ 'type': ARG_TYPE.STR,
+ 'description': 'Additional dictionary with special properties of the connection.'
+ },
+ encrypt={
+ 'type': ARG_TYPE.BOOL,
+ 'description': 'Enables or disables TLS encryption.'
+ },
+ sslHostNameInCertificate={
+ 'type': ARG_TYPE.STR,
+ 'description': 'Specifies the host name used to verify server\'s identity.'
+ },
+ sslValidateCertificate={
+ 'type': ARG_TYPE.BOOL,
+ 'description': 'Specifies whether to validate the server\'s certificate.'
+ },
+ sslCryptoProvider={
+ 'type': ARG_TYPE.STR,
+ 'description': 'Specifies the cryptographic library provider used for TLS communication.'
+ },
+ sslTrustStore={
+ 'type': ARG_TYPE.STR,
+ 'description': 'Specifies the path to a trust store file that contains the server\'s public certificates.'
+ },
+ sslKeyStore={
+ 'type': ARG_TYPE.STR,
+ 'description': 'Specifies the path to the keystore file that contains the client\'s identity.'
+ },
+ cseKeyStorePassword={
+ 'type': ARG_TYPE.STR,
+ 'description': 'Provides the password for the local key store.'
+ },
+ sslSNIHostname={
+ 'type': ARG_TYPE.STR,
+ 'description': dedent("""Specifies the name of the host that is attempting to connect at the start of
+ the TLS handshaking process.""")
+ },
+ sslSNIRequest={
+ 'type': ARG_TYPE.BOOL,
+ 'description': 'Specifies whether SNI requests are enabled for TLS connections: TRUE/FALSE.'
+ },
+ siteType={
+ 'type': ARG_TYPE.STR,
+ 'description': dedent("""Specifies whether the connection is made to either the PRIMARY or SECONDARY
+ site in an Active/Active (read enabled) system.""")
+ },
+ splitBatchCommands={
+ 'type': ARG_TYPE.BOOL,
+ 'description': 'Allows split and parallel execution of batch commands on partitioned tables.'
+ },
+ routeDirectExecute={
+ 'type': ARG_TYPE.BOOL,
+ 'description': dedent("""Converts direct execute into prepare and execute (routed execute) if the
+ number of index servers is more than one and if statement routing is enabled.""")
+ },
+ secondarySessionFallback={
+ 'type': ARG_TYPE.BOOL,
+ 'description': dedent("""Forces the ongoing transaction on a non-anchor connection to fall back
+ to the anchor/primary connection if this connection is dropped by the network or server.""")
+ }
+)
+
+connection_args_example = OrderedDict(
+ host='<uuid>.hana.trial-us10.hanacloud.ondemand.com',
+ port=30013,
+ user='DBADMIN',
+ password='password',
+ schema='MINDSDB',
+)
| diff --git a/mindsdb/integrations/handlers/hana_handler/tests/test_hana_handler.py b/mindsdb/integrations/handlers/hana_handler/tests/test_hana_handler.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/integrations/handlers/hana_handler/tests/test_hana_handler.py
@@ -0,0 +1,64 @@
+import os
+import unittest
+
+from mindsdb.integrations.handlers.hana_handler.hana_handler import HanaHandler
+from mindsdb.api.mysql.mysql_proxy.libs.constants.response_type import RESPONSE_TYPE
+
+
+"""
+create schema MINDSDB;
+
+create table MINDSDB.TEST
+(
+ ID INTEGER not null,
+ NAME NVARCHAR(1),
+ DESCRIPTION NVARCHAR(1)
+);
+
+create unique index MINDSDB.TEST_ID_INDEX
+ on MINDSDB.TEST (ID);
+
+alter table MINDSDB.TEST
+ add constraint TEST_PK
+ primary key (ID);
+
+insert into MINDSDB.TEST
+values (1, 'h', 'w');
+"""
+
+
+class HanaHandlerTest(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ cls.kwargs = {
+ "host": os.environ.get('HANA_HOST', 'localhost'),
+ "port": os.environ.get('HANA_PORT', 30015),
+ "user": "DBADMIN",
+ "password": os.environ.get('HANA_PASSWORD'),
+ "schema": "MINDSDB",
+ }
+ cls.handler = HanaHandler('test_hana_handler', cls.kwargs)
+
+ def test_0_connect(self):
+ assert self.handler.connect()
+
+ def test_1_check_connection(self):
+ assert self.handler.check_connection().success is True
+
+ def test_2_get_columns(self):
+ assert self.handler.get_columns('TEST').resp_type is not RESPONSE_TYPE.ERROR
+
+ def test_3_get_tables(self):
+ assert self.handler.get_tables().resp_type is not RESPONSE_TYPE.ERROR
+
+ def test_4_select_query(self):
+ query = 'SELECT * FROM MINDSDB.TEST WHERE ID=2'
+ assert self.handler.query(query).resp_type is RESPONSE_TYPE.TABLE
+
+ def test_5_update_query(self):
+ query = 'UPDATE MINDSDB.TEST SET NAME=\'s\' WHERE ID=1'
+ assert self.handler.query(query).resp_type is RESPONSE_TYPE.OK
+
+
+if __name__ == "__main__":
+ unittest.main(failfast=True)
| [New Integration]: Integration with SAP HANA
### Is there an existing integration?
- [X] I have searched the existing integrations.
### Use Case
**_SAP HANA (High-performance ANalytic Appliance) is a multi-model database that stores data in its memory instead of keeping it on a disk. The column-oriented in-memory database design allows you to run advanced analytics alongside high-speed transactions – in a single system._**
### Motivation
**SAP HANA is vastly used it could help organization to solve there ML Problems using Mindsdb**
### Implementation
For implementation this could help
- [hdbcli - SAP HANA Python Driver](https://pypi.org/project/hdbcli/)
- [sqlalchemy-hana - Dialect FOR HANA](https://github.com/SAP/sqlalchemy-hana)
- For TESTING [SAP HANA Cloud Trial](https://www.sap.com/cmp/td/sap-hana-cloud-trial.html) OR [express edition Trial](https://www.sap.com/products/technology-platform/hana/express-trial.html)
### Anything else?
_No response_
| Working on this.
Awesome @sudiptog81 | 2022-10-05T07:52:28Z | [] | [] |
mindsdb/mindsdb | 3,150 | mindsdb__mindsdb-3150 | [
"2663"
] | 0537052bd6ae80331142a2df71817b1944d30bc0 | diff --git a/mindsdb/integrations/handlers/teradata_handler/__about__.py b/mindsdb/integrations/handlers/teradata_handler/__about__.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/integrations/handlers/teradata_handler/__about__.py
@@ -0,0 +1,9 @@
+__title__ = 'MindsDB Teradata handler'
+__package_name__ = 'mindsdb_teradata_handler'
+__version__ = '0.0.1'
+__description__ = "MindsDB handler for Teradata"
+__author__ = 'Sudipto Ghosh'
+__github__ = 'https://github.com/mindsdb/mindsdb'
+__pypi__ = 'https://pypi.org/project/mindsdb/'
+__license__ = 'GPL-3.0'
+__copyright__ = 'Copyright 2022 - mindsdb'
diff --git a/mindsdb/integrations/handlers/teradata_handler/__init__.py b/mindsdb/integrations/handlers/teradata_handler/__init__.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/integrations/handlers/teradata_handler/__init__.py
@@ -0,0 +1,23 @@
+from mindsdb.integrations.libs.const import HANDLER_TYPE
+
+from .__about__ import __version__ as version, __description__ as description
+try:
+ from .teradata_handler import (
+ TeradataHandler as Handler,
+ connection_args_example,
+ connection_args
+ )
+ import_error = None
+except Exception as e:
+ Handler = None
+ import_error = e
+
+title = 'Teradata'
+name = 'teradata'
+type = HANDLER_TYPE.DATA
+icon_path = 'icon.svg'
+
+__all__ = [
+ 'Handler', 'version', 'name', 'type', 'title', 'description',
+ 'connection_args', 'connection_args_example', 'import_error', 'icon_path'
+]
diff --git a/mindsdb/integrations/handlers/teradata_handler/teradata_handler.py b/mindsdb/integrations/handlers/teradata_handler/teradata_handler.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/integrations/handlers/teradata_handler/teradata_handler.py
@@ -0,0 +1,262 @@
+from textwrap import dedent
+from collections import OrderedDict
+
+from pandas import DataFrame
+from sqlalchemy import String
+
+from sqlalchemy.sql import text, bindparam
+
+import teradatasql
+import teradatasqlalchemy.dialect as teradata_dialect
+
+from mindsdb_sql import parse_sql
+from mindsdb_sql.parser.ast.base import ASTNode
+from mindsdb_sql.render.sqlalchemy_render import SqlalchemyRender
+
+from mindsdb.utilities.log import log
+
+from mindsdb.integrations.libs.base_handler import DatabaseHandler
+from mindsdb.integrations.libs.response import (
+ HandlerStatusResponse as StatusResponse,
+ HandlerResponse as Response,
+ RESPONSE_TYPE
+)
+from mindsdb.integrations.libs.const import HANDLER_CONNECTION_ARG_TYPE as ARG_TYPE
+
+
+class TeradataHandler(DatabaseHandler):
+ """
+ This handler handles connection and execution of the Teradata statements.
+ """
+
+ name = 'teradata'
+
+ def __init__(self, name: str, connection_data: dict, **kwargs):
+ super().__init__(name)
+
+ self.dialect = 'teradata'
+ self.parser = parse_sql
+ self.connection_data = connection_data
+ self.renderer = SqlalchemyRender(teradata_dialect.TeradataDialect)
+
+ self.host = self.connection_data.get('host')
+ self.database = self.connection_data.get('database')
+
+ self.connection = None
+ self.is_connected = False
+
+ def __del__(self):
+ if self.is_connected is True:
+ self.disconnect()
+
+ def connect(self):
+ """
+ Handles the connection to a Teradata database insance.
+ """
+
+ if self.is_connected is True:
+ return self.connection
+
+ connection = teradatasql.connect(
+ **self.connection_data
+ )
+
+ self.is_connected = True
+ self.connection = connection
+ return self.connection
+
+ def disconnect(self):
+ """
+ Disconnects from the Teradata database
+ """
+
+ if self.is_connected is True:
+ self.connection.close()
+ self.is_connected = False
+
+ def check_connection(self) -> StatusResponse:
+ """
+ Check the connection of the Teradata database
+ :return: success status and error message if error occurs
+ """
+
+ response = StatusResponse(False)
+ need_to_close = self.is_connected is False
+
+ try:
+ connection = self.connect()
+ with connection.cursor() as cur:
+ cur.execute('SELECT 1 FROM (SELECT 1 AS "dual") AS "dual"')
+ response.success = True
+ except teradatasql.Error as e:
+ log.error(f'Error connecting to Teradata {self.host}, {e}!')
+ response.error_message = e
+
+ if response.success is True and need_to_close:
+ self.disconnect()
+ if response.success is False and self.is_connected is True:
+ self.is_connected = False
+
+ return response
+
+ def native_query(self, query: str) -> Response:
+ """
+ Receive SQL query and runs it
+ :param query: The SQL query to run in Teradata
+ :return: returns the records from the current recordset
+ """
+
+ need_to_close = self.is_connected is False
+
+ connection = self.connect()
+ with connection.cursor() as cur:
+ try:
+ cur.execute(query)
+ if not cur.description:
+ response = Response(RESPONSE_TYPE.OK)
+ else:
+ result = cur.fetchall()
+ response = Response(
+ RESPONSE_TYPE.TABLE,
+ DataFrame(
+ result,
+ columns=[x[0] for x in cur.description]
+ )
+ )
+ connection.commit()
+ except Exception as e:
+ log.error(f'Error running query: {query} on {self.host}!')
+ response = Response(
+ RESPONSE_TYPE.ERROR,
+ error_code=0,
+ error_message=str(e)
+ )
+ connection.rollback()
+
+ if need_to_close is True:
+ self.disconnect()
+
+ return response
+
+ def query(self, query: ASTNode) -> Response:
+ """
+ Retrieve the data from the SQL statement with eliminated rows that dont satisfy the WHERE condition
+ """
+
+ query_str = self.renderer.get_string(query, with_failback=True)
+ return self.native_query(query_str)
+
+ def get_tables(self) -> Response:
+ """
+ List all tables in Teradata in the current database
+ """
+
+ return self.native_query(
+ str(text(f"""
+ SELECT DataBaseName,
+ TableName,
+ TableKind
+ FROM DBC.TablesV
+ WHERE DatabaseName = :database
+ AND (TableKind = 'T'
+ OR TableKind = 'O'
+ OR TableKind = 'Q')
+ """).bindparams(
+ bindparam('database', value=self.database, type_=String)
+ ).compile(compile_kwargs={"literal_binds": True}))
+ )
+
+ def get_columns(self, table_name: str) -> Response:
+ """
+ List all columns in a table in Teradata in the current schema
+ :param table_name: the table name for which to list the columns
+ :return: returns the columns in the table
+ """
+
+ return self.native_query(
+ str(text(f"""
+ SELECT ColumnName AS "Field",
+ ColumnType AS "Type"
+ FROM DBC.ColumnsV
+ WHERE DatabaseName (NOT CASESPECIFIC) = :database
+ AND TableName (NOT CASESPECIFIC) = :table_name
+ """).bindparams(
+ bindparam('database', value=self.database, type_=String),
+ bindparam('table_name', value=table_name, type_=String)
+ ).compile(compile_kwargs={"literal_binds": True}))
+ )
+
+
+connection_args = OrderedDict(
+ host={
+ 'type': ARG_TYPE.STR,
+ 'description': 'The IP address/host name of the Teradata instance host.'
+ },
+ user={
+ 'type': ARG_TYPE.STR,
+ 'description': 'Specifies the user name.'
+ },
+ password={
+ 'type': ARG_TYPE.STR,
+ 'description': 'Specifies the password for the user.'
+ },
+ database={
+ 'type': ARG_TYPE.STR,
+ 'description': 'Specifies the initial database to use after logon, instead of the default database.'
+ },
+ dbs_port={
+ 'type': ARG_TYPE.STR,
+ 'description': 'The port number of the Teradata instance.'
+ },
+ encryptdata={
+ 'type': ARG_TYPE.STR,
+ 'description': 'Controls encryption of data exchanged between the driver and the database.'
+ },
+ https_port={
+ 'type': ARG_TYPE.STR,
+ 'description': 'Specifies the database port number for HTTPS/TLS connections.'
+ },
+ sslca={
+ 'type': ARG_TYPE.STR,
+ 'description': 'Specifies the file name of a PEM file that contains Certificate Authority (CA) certificates.'
+ },
+ sslcapath={
+ 'type': ARG_TYPE.STR,
+ 'description': 'Specifies a directory of PEM files that contain Certificate Authority (CA) certificates.'
+ },
+ sslcipher={
+ 'type': ARG_TYPE.STR,
+ 'description': 'Specifies the TLS cipher for HTTPS/TLS connections.'
+ },
+ sslmode={
+ 'type': ARG_TYPE.STR,
+ 'description': 'Specifies the SSL mode for HTTPS/TLS connections.'
+ },
+ sslprotocol={
+ 'type': ARG_TYPE.STR,
+ 'description': 'Specifies the TLS protocol for HTTPS/TLS connections.'
+ },
+ tmode={
+ 'type': ARG_TYPE.STR,
+ 'description': 'Specifies the Teradata transaction mode.'
+ },
+ logmech={
+ 'type': ARG_TYPE.STR,
+ 'description': 'Specifies the login authentication method.'
+ },
+ logdata={
+ 'type': ARG_TYPE.STR,
+ 'description': 'Specifies extra data for the chosen logon authentication method.'
+ },
+ browser={
+ 'type': ARG_TYPE.STR,
+ 'description': 'Specifies the command to open the browser for Browser Authentication.'
+ }
+)
+
+connection_args_example = OrderedDict(
+ host='192.168.0.41',
+ user='dbc',
+ password='dbc',
+ database='HR'
+)
| diff --git a/mindsdb/integrations/handlers/teradata_handler/tests/test_teradata_handler.py b/mindsdb/integrations/handlers/teradata_handler/tests/test_teradata_handler.py
new file mode 100644
--- /dev/null
+++ b/mindsdb/integrations/handlers/teradata_handler/tests/test_teradata_handler.py
@@ -0,0 +1,69 @@
+import os
+import unittest
+
+from mindsdb.integrations.handlers.teradata_handler.teradata_handler import TeradataHandler
+from mindsdb.api.mysql.mysql_proxy.libs.constants.response_type import RESPONSE_TYPE
+
+
+"""
+CREATE
+DATABASE HR
+AS PERMANENT = 60e6, -- 60MB
+ SPOOL = 120e6; -- 120MB
+
+CREATE
+SET TABLE HR.Employees (
+ GlobalID INTEGER,
+ FirstName VARCHAR(30),
+ LastName VARCHAR(30),
+ DateOfBirth DATE FORMAT 'YYYY-MM-DD',
+ JoinedDate DATE FORMAT 'YYYY-MM-DD',
+ DepartmentCode BYTEINT
+)
+UNIQUE PRIMARY INDEX ( GlobalID );
+
+INSERT INTO HR.Employees (GlobalID,
+ FirstName,
+ LastName,
+ DateOfBirth,
+ JoinedDate,
+ DepartmentCode)
+VALUES (101,
+ 'Adam',
+ 'Tworkowski',
+ '1980-01-05',
+ '2004-08-01',
+ 01);
+"""
+
+
+class TeradataHandlerTest(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ cls.kwargs = {
+ "host": os.environ.get('TERADATA_HOST', 'localhost'),
+ "user": "dbc",
+ "password": "dbc",
+ "database": "HR"
+ }
+ cls.handler = TeradataHandler('test_teradata_handler', cls.kwargs)
+
+ def test_0_connect(self):
+ assert self.handler.connect()
+
+ def test_1_check_connection(self):
+ assert self.handler.check_connection().success is True
+
+ def test_2_get_columns(self):
+ assert self.handler.get_columns('Employees').resp_type is not RESPONSE_TYPE.ERROR
+
+ def test_3_get_tables(self):
+ assert self.handler.get_tables().resp_type is not RESPONSE_TYPE.ERROR
+
+ def test_4_select_query(self):
+ query = 'SELECT * FROM HR.Employees WHERE GlobalID=101'
+ assert self.handler.query(query).resp_type is RESPONSE_TYPE.TABLE
+
+
+if __name__ == "__main__":
+ unittest.main(failfast=True)
| [New Integration]: Integration with Teradata
### Is there an existing integration?
- [X] I have searched the existing integrations.
### Use Case
Teradata is amazing data warehouse and used by many MNCs and Developers ,and also have very vast usecases.
### Motivation
**Teradata** is amazing data warehouse and it would help to developer and Company working with teradata to get predictions through **simple SQL queries**.It will also provide vast customer reach to MindsDB.
### Implementation
_No response_
### Anything else?
_No response_
| Adding resources for contributors:
- https://pypi.org/project/teradatasqlalchemy/
- https://quickstarts.teradata.com/
In case no one takes this up, I can work on this during the weekend. | 2022-10-06T04:06:20Z | [] | [] |
castorini/pyserini | 34 | castorini__pyserini-34 | [
"33"
] | d84c59e0f94459c5d0f666d6e25529d26c98d090 | diff --git a/docs/source/conf.py b/docs/source/conf.py
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -14,7 +14,7 @@
import sys
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('..'))
-
+sys.path.insert(0, os.path.abspath('../..'))
# -- Project information -----------------------------------------------------
diff --git a/pyserini/index/pyutils.py b/pyserini/index/pyutils.py
--- a/pyserini/index/pyutils.py
+++ b/pyserini/index/pyutils.py
@@ -14,9 +14,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-'''
+"""
Module for providing python interface to Anserini index reader utils
-'''
+"""
+
from ..pyclass import JIndexReaderUtils, JDocumentVectorWeight, JString
import logging
@@ -25,14 +26,14 @@
class IndexReaderUtils:
- '''
+ """
Wrapper class for Anserini's IndexReaderUtils.
Parameters
----------
index_dir : str
Path to Lucene index directory
- '''
+ """
def __init__(self, index_dir):
self.object = JIndexReaderUtils()
@@ -42,32 +43,17 @@ class DocumentVectorWeight:
NONE = JDocumentVectorWeight.NONE
TF_IDF = JDocumentVectorWeight.TF_IDF
- class Posting:
- '''
- Basic Posting class for Postings List
- '''
- def __init__(self, docid, term_freq, positions):
- self.docid = docid
- self.term_freq = term_freq
- self.positions = positions
-
- def __repr__(self):
- repr = '(' + str(self.docid) + ', ' + str(self.term_freq) + ')'
- if self.positions:
- repr += ' [' + ','.join([str(p) for p in self.positions]) + ']'
- return repr
-
class IndexTerm:
- '''
+ """
Basic IndexTerm class to represent each term in index
- '''
+ """
def __init__(self, term, doc_freq, total_term_freq):
self.term = term
self.doc_freq = doc_freq
self.total_term_freq = total_term_freq
def analyze(self, text):
- '''
+ """
Parameters
----------
term : str
@@ -75,7 +61,7 @@ def analyze(self, text):
-------
result : str
List of stemmed tokens
- '''
+ """
stemmed = self.object.analyze(JString(text))
token_list = []
for token in stemmed.toArray():
@@ -83,16 +69,16 @@ def analyze(self, text):
return token_list
def terms(self):
- '''
+ """
:return: generator over terms
- '''
+ """
term_iterator = self.object.getTerms(self.reader)
while term_iterator.hasNext():
cur_term = term_iterator.next()
yield self.IndexTerm(cur_term.getTerm(), cur_term.getDF(), cur_term.getTotalTF())
def get_term_counts(self, term):
- '''
+ """
Parameters
----------
term : str
@@ -101,12 +87,12 @@ def get_term_counts(self, term):
-------
result : long, long
Collection frequency and document frequency of term
- '''
+ """
term_map = self.object.getTermCounts(self.reader, JString(term))
return term_map.get(JString('collectionFreq')), term_map.get(JString('docFreq'))
def get_postings_list(self, term):
- '''
+ """
Parameters
----------
term : str
@@ -114,15 +100,15 @@ def get_postings_list(self, term):
-------
result : list<Posting>
Postings list for term
- '''
+ """
postings_list = self.object.getPostingsList(self.reader, JString(term))
result = []
for posting in postings_list.toArray():
- result.append(self.Posting(posting.getDocid(), posting.getTF(), posting.getPositions()))
+ result.append(Posting(posting.getDocid(), posting.getTF(), posting.getPositions()))
return result
def get_document_vector(self, docid):
- '''
+ """
Parameters
----------
docid : str
@@ -131,7 +117,7 @@ def get_document_vector(self, docid):
-------
result : dict
Terms and their respective frequencies in document
- '''
+ """
doc_vector_map = self.object.getDocumentVector(self.reader, JString(docid))
doc_vector_dict = {}
for term in doc_vector_map.keySet().toArray():
@@ -139,7 +125,7 @@ def get_document_vector(self, docid):
return doc_vector_dict
def get_raw_document(self, docid):
- '''
+ """
Parameters
----------
docid : str
@@ -147,11 +133,11 @@ def get_raw_document(self, docid):
Returns
-------
result : raw document given its collection docid
- '''
+ """
return self.object.getRawDocument(self.reader, JString(docid))
def get_bm25_term_weight(self, docid, term):
- '''
+ """
Parameters
----------
docid : str
@@ -161,16 +147,70 @@ def get_bm25_term_weight(self, docid, term):
-------
result : float
BM25 score (NaN if no documents match)
- '''
+ """
return self.object.getBM25TermWeight(self.reader, JString(docid), JString(term))
def dump_document_vectors(self, reqDocidsPath, weight):
- '''
+ """
Parameters
----------
reqDocidsPath : str
dumps the document vector for all documents in reqDocidsPath
weight : DocumentVectorWeight
the weight for dumped document vector(s)
- '''
- self.object.dumpDocumentVectors(self.reader, reqDocidsPath, weight)
\ No newline at end of file
+ """
+ self.object.dumpDocumentVectors(self.reader, reqDocidsPath, weight)
+
+ def convert_internal_docid_to_collection_docid(self, docid: int) -> str:
+ """Converts Lucene's internal ``docid`` to its external collection ``docid``.
+
+ Parameters
+ ----------
+ docid : int
+ A Lucene internal ``docid``.
+
+ Returns
+ -------
+ str
+ The external collection ``docid`` corresponding to Lucene's internal ``docid``.
+ """
+ return self.object.convertLuceneDocidToDocid(self.reader, docid)
+
+ def convert_collection_docid_to_internal_docid(self, docid: str) -> int:
+ """Converts an external collection ``docid`` to its Lucene's internal ``docid``.
+
+ Parameters
+ ----------
+ docid : str
+ An external collection ``docid``.
+
+ Returns
+ -------
+ str
+ The Lucene internal ``docid`` corresponding to the external collection ``docid``.
+ """
+ return self.object.convertDocidToLuceneDocid(self.reader, docid)
+
+
+class Posting:
+ """Class representing a posting in a postings list.
+
+ Parameters
+ ----------
+ docid : int
+ The ``docid`` associated with this posting.
+ tf : int
+ The term frequency associated with this posting.
+ positions : List[int]
+ The list of positions associated with this posting.
+ """
+ def __init__(self, docid, tf, positions):
+ self.docid = docid
+ self.tf = tf
+ self.positions = positions
+
+ def __repr__(self):
+ repr = '(' + str(self.docid) + ', ' + str(self.tf) + ')'
+ if self.positions:
+ repr += ' [' + ','.join([str(p) for p in self.positions]) + ']'
+ return repr
\ No newline at end of file
| diff --git a/tests/test_indexutils.py b/tests/test_indexutils.py
--- a/tests/test_indexutils.py
+++ b/tests/test_indexutils.py
@@ -42,10 +42,14 @@ def test_postings(self):
self.assertEqual(len(postings_list), 138)
self.assertEqual(postings_list[0].docid, 238)
- self.assertEqual(postings_list[0].term_freq, 1)
+ self.assertEqual(self.index_utils.convert_internal_docid_to_collection_docid(postings_list[0].docid), 'CACM-0239')
+ self.assertEqual(postings_list[0].tf, 1)
+ self.assertEqual(len(postings_list[0].positions), 1)
self.assertEqual(postings_list[-1].docid, 3168)
- self.assertEqual(postings_list[-1].term_freq, 1)
+ self.assertEqual(self.index_utils.convert_internal_docid_to_collection_docid(postings_list[-1].docid), 'CACM-3169')
+ self.assertEqual(postings_list[-1].tf, 1)
+ self.assertEqual(len(postings_list[-1].positions), 1)
def test_doc_vector(self):
doc_vector = self.index_utils.get_document_vector('CACM-3134')
@@ -53,6 +57,20 @@ def test_doc_vector(self):
self.assertEqual(doc_vector['inform'], 8)
self.assertEqual(doc_vector['retriev'], 7)
+ def test_doc_vector_matches_index(self):
+ # From the document vector, look up the term frequency of "information".
+ doc_vector = self.index_utils.get_document_vector('CACM-3134')
+ self.assertEqual(doc_vector['inform'], 8)
+
+ # Now look up the postings list for "information".
+ term = 'information'
+ postings_list = list(self.index_utils.get_postings_list(term))
+
+ for i in range(len(postings_list)):
+ if self.index_utils.convert_internal_docid_to_collection_docid(postings_list[i].docid) == 'CACM-3134':
+ # The tf values should match.
+ self.assertEqual(postings_list[i].tf, 8)
+
def test_raw_doc(self):
lines = self.index_utils.get_raw_document('CACM-3134').splitlines()
self.assertEqual(len(lines), 55)
@@ -63,6 +81,12 @@ def test_bm25_weight(self):
self.assertAlmostEqual(self.index_utils.get_bm25_term_weight('CACM-3134', 'inform'), 1.925014, places=5)
self.assertAlmostEqual(self.index_utils.get_bm25_term_weight('CACM-3134', 'retriev'), 2.496352, places=5)
+ def test_docid_converstion(self):
+ self.assertEqual(self.index_utils.convert_internal_docid_to_collection_docid(1), 'CACM-0002')
+ self.assertEqual(self.index_utils.convert_collection_docid_to_internal_docid('CACM-0002'), 1)
+ self.assertEqual(self.index_utils.convert_internal_docid_to_collection_docid(1000), 'CACM-1001')
+ self.assertEqual(self.index_utils.convert_collection_docid_to_internal_docid('CACM-1001'), 1000)
+
def tearDown(self):
os.remove(self.tarball_name)
shutil.rmtree(self.index_dir)
| Expose methods to convert between internal and external docid
This is a summary of the issue presented in #32
Consider this fragment:
```
>>> from pyserini.index import pyutils
>>>
>>> index_utils = pyutils.IndexReaderUtils('index-robust04-20191213/')
>>> postings_list = index_utils.get_postings_list('black')
>>>
>>> for i in range(0, 10):
... print('{}'.format(postings_list[i]))
...
(6, 2) [555,606]
(29, 1) [410]
(32, 2) [65,462]
(35, 2) [288,475]
(56, 1) [662]
(60, 1) [69]
(61, 1) [110]
(63, 1) [195]
(74, 2) [230,518]
(96, 1) [107]
```
The docids (e.g., 6 in the first posting), refers to _internal_ Lucene docids, which are different from external docids (i.e., those in the collection).
Use this hidden method `convertLuceneDocidToDocid` to convert, as in:
```
>>> for i in range(0, 10):
... print('{} {}'.format(index_utils.object.convertLuceneDocidToDocid(index_utils.reader, postings_list[i].docid), postings_list[i]))
...
LA111289-0011 (6, 2) [555,606]
LA092890-0052 (29, 1) [410]
LA022489-0041 (32, 2) [65,462]
LA051990-0051 (35, 2) [288,475]
LA092890-0077 (56, 1) [662]
LA022489-0061 (60, 1) [69]
LA021889-0073 (61, 1) [110]
LA110689-0057 (63, 1) [195]
LA080789-0088 (74, 2) [230,518]
LA021889-0117 (96, 1) [107]
```
The TODO is to explicitly expose `convertLuceneDocidToDocid`.
Similarly, we can use `convertDocidToLuceneDocid` to convert an external collection docid into an internal docid:
```
>>> from jnius import autoclass
>>> JString = autoclass('java.lang.String')
>>> index_utils.object.convertDocidToLuceneDocid(index_utils.reader, JString("LA052189-0089"))
200443
```
We can verify as follows:
```
>>> for i in range(len(postings_list)):
... if postings_list[i].docid == 200443:
... print('{} {}'.format(index_utils.object.convertLuceneDocidToDocid(index_utils.reader, postings_list[i].docid), postings_list[i]))
...
LA052189-0089 (200443, 64) [18,133,175,212,225,244,262,273,307,320,344,372,388,431,438,454,464,541,576,583,616,640,772,778,801,831,838,885,891,912,937,952,970,1123,1151,1165,1180,1210,1215,1231,1270,1307,1346,1431,1436,1507,1514,1542,1546,1550,1663,1676,1726,1750,1764,1769,1781,1784,1838,1847,1873,1880,1922,1971]
```
Which matches exactly what we get from `get_document_vector`:
```
>>> index_utils = pyutils.IndexReaderUtils('index-robust04-20191213/')
>>> doc_vector = index_utils.get_document_vector("LA052189-0089")
>>> doc_vector['black']
64
```
| 2020-02-14T15:06:33Z | [] | [] |
|
castorini/pyserini | 48 | castorini__pyserini-48 | [
"44"
] | fcef9d4bba1b4d1357a42d68705a82282f41524c | diff --git a/pyserini/analysis/__init__.py b/pyserini/analysis/__init__.py
new file mode 100644
--- /dev/null
+++ b/pyserini/analysis/__init__.py
@@ -0,0 +1 @@
+
diff --git a/pyserini/analysis/pyanalysis.py b/pyserini/analysis/pyanalysis.py
new file mode 100644
--- /dev/null
+++ b/pyserini/analysis/pyanalysis.py
@@ -0,0 +1,52 @@
+from ..pyclass import JArabicAnalyzer
+from ..pyclass import JBengaliAnalyzer
+from ..pyclass import JCJKAnalyzer
+from ..pyclass import JEnglishStemmingAnalyzer
+from ..pyclass import JFreebaseAnalyzer
+from ..pyclass import JFrenchAnalyzer
+from ..pyclass import JGermanAnalyzer
+from ..pyclass import JHindiAnalyzer
+from ..pyclass import JSpanishAnalyzer
+from ..pyclass import JTokenizeOnlyAnalyzer
+from ..pyclass import JTweetAnalyzer
+
+
+def get_analyzer(analyzer, stemmer='porter'):
+ """
+ Parameters
+ ----------
+ analyzer : String
+ Name of analyzer to get
+
+ stemmer : String
+ Name of stemmer that analyzer needs to use (not all analyzers allow for a stemmer to be provided)
+
+ Returns
+ -------
+ result : org.apache.lucene.document.Analyzer
+ Java Analyzer object
+ """
+ if analyzer == 'arabic':
+ return JArabicAnalyzer()
+ elif analyzer == 'bengali':
+ return JBengaliAnalyzer()
+ elif analyzer == 'cjk':
+ return JCJKAnalyzer()
+ elif analyzer == 'german':
+ return JGermanAnalyzer()
+ elif analyzer == 'spanish':
+ return JSpanishAnalyzer()
+ elif analyzer == 'french':
+ return JFrenchAnalyzer()
+ elif analyzer == 'hindi':
+ return JHindiAnalyzer()
+ elif analyzer == 'english':
+ return JEnglishStemmingAnalyzer(stemmer)
+ elif analyzer == 'freebase':
+ return JFreebaseAnalyzer()
+ elif analyzer == 'tokenize':
+ return JTokenizeOnlyAnalyzer()
+ elif analyzer == 'tweet':
+ return JTweetAnalyzer()
+ else:
+ return JEnglishStemmingAnalyzer('porter')
diff --git a/pyserini/pyclass.py b/pyserini/pyclass.py
--- a/pyserini/pyclass.py
+++ b/pyserini/pyclass.py
@@ -36,6 +36,21 @@
JList = autoclass('java.util.List')
JArrayList = autoclass('java.util.ArrayList')
+### Analysis
+
+JArabicAnalyzer = autoclass('org.apache.lucene.analysis.ar.ArabicAnalyzer')
+JBengaliAnalyzer = autoclass('org.apache.lucene.analysis.bn.BengaliAnalyzer')
+JCJKAnalyzer = autoclass('org.apache.lucene.analysis.cjk.CJKAnalyzer')
+JGermanAnalyzer = autoclass('org.apache.lucene.analysis.de.GermanAnalyzer')
+JSpanishAnalyzer = autoclass('org.apache.lucene.analysis.es.SpanishAnalyzer')
+JFrenchAnalyzer = autoclass('org.apache.lucene.analysis.fr.FrenchAnalyzer')
+JHindiAnalyzer = autoclass('org.apache.lucene.analysis.hi.HindiAnalyzer')
+
+JEnglishStemmingAnalyzer = autoclass('io.anserini.analysis.EnglishStemmingAnalyzer')
+JFreebaseAnalyzer = autoclass('io.anserini.analysis.FreebaseAnalyzer')
+JTokenizeOnlyAnalyzer = autoclass('io.anserini.analysis.TokenizeOnlyAnalyzer')
+JTweetAnalyzer = autoclass('io.anserini.analysis.TweetAnalyzer')
+
### Search
JDocument = autoclass('org.apache.lucene.document.Document')
@@ -70,7 +85,7 @@ class JGenerators(Enum):
LuceneDocumentGenerator = autoclass('io.anserini.index.generator.LuceneDocumentGenerator')
JsoupGenerator = autoclass('io.anserini.index.generator.JsoupGenerator')
TweetGenerator = autoclass('io.anserini.index.generator.TweetGenerator')
- WapoGenerator = autoclass('io.anserini.index.generator.WapoGenerator')
+ WapoGenerator = autoclass('io.anserini.index.generator.WashingtonPostGenerator')
### Collection
diff --git a/pyserini/search/pysearch.py b/pyserini/search/pysearch.py
--- a/pyserini/search/pysearch.py
+++ b/pyserini/search/pysearch.py
@@ -129,6 +129,15 @@ def search_fields(self, q, f, boost, k):
"""
return self.object.searchFields(JString(q), JString(f), float(boost), k)
+ def set_analyzer(self, analyzer):
+ """
+ Parameters
+ ----------
+ analyzer : Analyzer
+ Java analyzer object
+ """
+ self.object.setAnalyzer(analyzer)
+
def set_search_tweets(self, flag):
"""
Parameters
| diff --git a/tests/test_analysis.py b/tests/test_analysis.py
new file mode 100644
--- /dev/null
+++ b/tests/test_analysis.py
@@ -0,0 +1,49 @@
+import os
+import shutil
+import tarfile
+import unittest
+from random import randint
+from urllib.request import urlretrieve
+from pyserini.analysis import pyanalysis
+from pyserini.search import pysearch
+from pyserini.index import pyutils
+from pyserini.pyclass import JString
+
+
+class TestAnalyzers(unittest.TestCase):
+
+ def setUp(self):
+ # Download pre-built CACM index; append a random value to avoid filename clashes.
+ r = randint(0, 10000000)
+ self.collection_url = 'https://github.com/castorini/anserini-data/raw/master/CACM/lucene-index.cacm.tar.gz'
+ self.tarball_name = 'lucene-index.cacm-{}.tar.gz'.format(r)
+ self.index_dir = 'index{}/'.format(r)
+
+ filename, headers = urlretrieve(self.collection_url, self.tarball_name)
+
+ tarball = tarfile.open(self.tarball_name)
+ tarball.extractall(self.index_dir)
+ tarball.close()
+ self.searcher = pysearch.SimpleSearcher(f'{self.index_dir}lucene-index.cacm')
+ self.index_utils = pyutils.IndexReaderUtils(f'{self.index_dir}lucene-index.cacm')
+
+ def test_different_analyzers_are_different(self):
+ self.searcher.set_analyzer(pyanalysis.get_analyzer('tokenize'))
+ hits_first = self.searcher.search('information retrieval')
+ self.searcher.set_analyzer(pyanalysis.get_analyzer(''))
+ hits_second = self.searcher.search('information retrieval')
+ self.assertNotEqual(hits_first, hits_second)
+
+ def test_analyze_with_analyzer(self):
+ tokenizer = pyanalysis.get_analyzer('tokenize')
+ query = JString('information retrieval')
+ only_tokenization = self.index_utils.object.analyzeWithAnalyzer(query, tokenizer)
+ token_list = []
+ for token in only_tokenization.toArray():
+ token_list.append(token)
+ self.assertEqual(token_list, ['information', 'retrieval'])
+
+ def tearDown(self):
+ self.searcher.close()
+ os.remove(self.tarball_name)
+ shutil.rmtree(self.index_dir)
| Clean API to expose Lucene Analyzers
Related to #43 - currently `IndexReaderUtils` exposes `analyze` method:
https://github.com/castorini/pyserini/blob/master/pyserini/index/pyutils.py
This is hard coded to a default. We should think about how to expose arbitrary Lucene analyzers in general... what would the API look like?
| BTW, on the Java end, `SimpleSearcher` already exposes `analyzeWithAnalyzer`, so this can be done on the Python end without a new jar:
https://github.com/castorini/anserini/blob/master/src/main/java/io/anserini/index/IndexReaderUtils.java#L222
In fact, it just dispatches to `AnalyzerUtils.tokenize(analyzer, text)` so the Python end can call that method directly.
I propose an API along the lines of:
```python
analyzer1 = Analyzer() # default
analyzer1.analyze('this is a string')
analyzer2 = Analyzer('whitespace') # specify a custom one
analyzer2.analyze('this is a string')
```
That is, each object is stateful, even though it just dispatches to static methods in Java.
@Chriskamphuis can you work on this?
| 2020-03-06T14:31:32Z | [] | [] |
castorini/pyserini | 51 | castorini__pyserini-51 | [
"47"
] | e4b5acc998d28a73fefc74db1c0925a9850caada | diff --git a/pyserini/index/pyutils.py b/pyserini/index/pyutils.py
--- a/pyserini/index/pyutils.py
+++ b/pyserini/index/pyutils.py
@@ -23,7 +23,7 @@
import logging
from typing import Dict, Iterator, List, Tuple
-from ..pyclass import JIndexReaderUtils, JString
+from ..pyclass import JIndexReaderUtils, JString, JAnalyzerUtils
logger = logging.getLogger(__name__)
@@ -87,24 +87,28 @@ def __init__(self, index_dir):
self.object = JIndexReaderUtils()
self.reader = self.object.getReader(JString(index_dir))
- def analyze(self, text: str) -> List[str]:
- """Applies Anserini's default Lucene ``Analyzer`` to process a piece of text.
+ def analyze(self, text: str, analyzer=None) -> List[str]:
+ """Analyzes a piece of text. Applies Anserini's default Lucene analyzer if analyzer not specified.
Parameters
----------
text : str
The piece of text to analyze.
-
+ analyzer : analyzer
+ The analyzer to apply.
Returns
-------
List[str]
- List of tokens corresponding to the output of the ``Analyzer``.
+ List of tokens corresponding to the output of the analyzer.
"""
- stemmed = self.object.analyze(JString(text))
- token_list = []
- for token in stemmed.toArray():
- token_list.append(token)
- return token_list
+ if analyzer is None:
+ results = JAnalyzerUtils.analyze(JString(text.encode('utf-8')))
+ else:
+ results = JAnalyzerUtils.analyze(analyzer, JString(text.encode('utf-8')))
+ tokens = []
+ for token in results.toArray():
+ tokens.append(token)
+ return tokens
def terms(self) -> Iterator[IndexTerm]:
"""Returns an iterator over (analyzed) terms in the index.
@@ -135,20 +139,28 @@ def get_term_counts(self, term: str) -> Tuple[int, int]:
term_map = self.object.getTermCounts(self.reader, JString(term.encode('utf-8')))
return term_map.get(JString('docFreq')), term_map.get(JString('collectionFreq'))
- def get_postings_list(self, term: str) -> List[Posting]:
+ def get_postings_list(self, term: str, analyze=True) -> List[Posting]:
"""Returns the postings list for a term.
Parameters
----------
term : str
- The raw (unanalyzed) term.
+ The raw term.
+ analyze : Bool
+ Whether or not analyze the term.
Returns
-------
List[Posting]
List of :class:`Posting` objects corresponding to the postings list for the term.
"""
- postings_list = self.object.getPostingsList(self.reader, JString(term.encode('utf-8')))
+ if analyze:
+ postings_list = self.object.getPostingsListForUnanalyzedTerm(self.reader, JString(term.encode('utf-8')))
+ else:
+ postings_list = self.object.getPostingsListForAnalyzedTerm(self.reader, JString(term.encode('utf-8')))
+ if postings_list is None:
+ return None
+
result = []
for posting in postings_list.toArray():
result.append(Posting(posting.getDocid(), posting.getTF(), posting.getPositions()))
diff --git a/pyserini/pyclass.py b/pyserini/pyclass.py
--- a/pyserini/pyclass.py
+++ b/pyserini/pyclass.py
@@ -66,6 +66,8 @@
JIndexReaderUtils = autoclass('io.anserini.index.IndexReaderUtils')
JDocumentVectorWeight = autoclass('io.anserini.index.IndexReaderUtils$DocumentVectorWeight')
+JAnalyzerUtils = autoclass('io.anserini.analysis.AnalyzerUtils')
+
### Generator
class JIndexHelpers:
| diff --git a/tests/test_analysis.py b/tests/test_analysis.py
--- a/tests/test_analysis.py
+++ b/tests/test_analysis.py
@@ -7,7 +7,7 @@
from pyserini.analysis import pyanalysis
from pyserini.search import pysearch
from pyserini.index import pyutils
-from pyserini.pyclass import JString
+from pyserini.pyclass import JString, JAnalyzerUtils
class TestAnalyzers(unittest.TestCase):
@@ -37,7 +37,7 @@ def test_different_analyzers_are_different(self):
def test_analyze_with_analyzer(self):
tokenizer = pyanalysis.get_analyzer('tokenize')
query = JString('information retrieval')
- only_tokenization = self.index_utils.object.analyzeWithAnalyzer(query, tokenizer)
+ only_tokenization = JAnalyzerUtils.analyze(tokenizer, query)
token_list = []
for token in only_tokenization.toArray():
token_list.append(token)
diff --git a/tests/test_indexutils.py b/tests/test_indexutils.py
--- a/tests/test_indexutils.py
+++ b/tests/test_indexutils.py
@@ -21,6 +21,7 @@
from random import randint
from urllib.request import urlretrieve
+from pyserini.analysis import pyanalysis
from pyserini.index import pyutils
from pyserini.pyclass import JString
@@ -62,13 +63,20 @@ def test_analyze(self):
self.assertEqual(' '.join(self.index_utils.analyze('retrieval')), 'retriev')
self.assertEqual(' '.join(self.index_utils.analyze('rapid retrieval, space economy')),
'rapid retriev space economi')
+ tokenizer = pyanalysis.get_analyzer('tokenize')
+ self.assertEqual(' '.join(self.index_utils.analyze('retrieval', analyzer=tokenizer)), 'retrieval')
+ self.assertEqual(' '.join(self.index_utils.analyze('rapid retrieval, space economy', analyzer=tokenizer)),
+ 'rapid retrieval space economy')
+ # Test utf encoding:
+ self.assertEqual(self.index_utils.analyze('zoölogy')[0], 'zoölog')
+ self.assertEqual(self.index_utils.analyze('zoölogy', analyzer=tokenizer)[0], 'zoölogy')
def test_term_stats(self):
df, cf = self.index_utils.get_term_counts('retrieval')
self.assertEqual(df, 138)
self.assertEqual(cf, 275)
- def test_postings(self):
+ def test_postings1(self):
term = 'retrieval'
postings = list(self.index_utils.get_postings_list(term))
self.assertEqual(len(postings), 138)
@@ -83,6 +91,26 @@ def test_postings(self):
self.assertEqual(postings[-1].tf, 1)
self.assertEqual(len(postings[-1].positions), 1)
+ def test_postings2(self):
+ self.assertIsNone(self.index_utils.get_postings_list('asdf'))
+
+ postings = list(self.index_utils.get_postings_list('retrieval', analyze=True))
+ self.assertEqual(len(postings), 138)
+
+ # If we don't analyze, then we can't find the postings list:
+ self.assertIsNone(self.index_utils.get_postings_list('retrieval', analyze=False))
+
+ # Supply the analyzed form directly, and we're good:
+ postings = list(self.index_utils.get_postings_list('retriev', analyze=False))
+ self.assertEqual(len(postings), 138)
+ postings = list(self.index_utils.get_postings_list(self.index_utils.analyze('retrieval')[0], analyze=False))
+ self.assertEqual(len(postings), 138)
+
+ # Test utf encoding:
+ self.assertEqual(self.index_utils.get_postings_list('zoölogy'), None)
+ self.assertEqual(self.index_utils.get_postings_list('zoölogy', analyze=False), None)
+ self.assertEqual(self.index_utils.get_postings_list('zoölogy', analyze=True), None)
+
def test_doc_vector(self):
doc_vector = self.index_utils.get_document_vector('CACM-3134')
self.assertEqual(len(doc_vector), 94)
@@ -123,11 +151,10 @@ def test_docid_converstion(self):
self.assertEqual(self.index_utils.convert_collection_docid_to_internal_docid('CACM-1001'), 1000)
def test_jstring_term(self):
- self.assertEqual(self.index_utils.get_term_counts("zoölogy"), (0, 0))
+ self.assertEqual(self.index_utils.get_term_counts('zoölogy'), (0, 0))
with self.assertRaises(ValueError):
# Should fail when pyjnius has solved this internally.
JString('zoölogy')
-
def tearDown(self):
os.remove(self.tarball_name)
| get_document_vector() and get_postings_list() Stemming ?
Hi @lintool !
I have a new issue :
I created a new index with the dataset "DUC-2001" by mean of this function :
```
sh anserini/target/appassembler/bin/IndexCollection \
-collection TrecCollection \
-generator JsoupGenerator \
-threads 2 \
-input ${EXP}/ \
-index indexes/lucene-index.XXX \
-storePositions -storeDocvectors -storeRawDocs
```
I also installed Luke Toolbox project to understand how the index working.
When i run this code :
```
for id_ in docid:
doc_vector = index_utils.get_document_vector(id_)
bm25_score_one_doc = {}
for term_ in doc_vector:
postings_list = index_utils.get_postings_list(term_)
```
it works for some terms but not for all...
```
Traceback (most recent call last):
File "doc2index_2.py", line 50, in <module>
postings_list = index_utils.get_postings_list(term_)
File "/home/poulain/.local/lib/python3.6/site-packages/pyserini/index/pyutils.py", line 118, in get_postings_list
postings_list = self.object.getPostingsList(self.reader, JString(term))
File "jnius/jnius_export_class.pxi", line 768, in jnius.JavaMethod.__call__
File "jnius/jnius_export_class.pxi", line 934, in jnius.JavaMethod.call_staticmethod
File "jnius/jnius_utils.pxi", line 91, in jnius.check_exception
jnius.JavaException: JVM exception occurred: java.lang.NullPointerException
```
I think there are two different indexes, the first one applies a stemming ( the word "Cherokee" become "cheroke") and the second keeps the word without stemming.
So, how can i stemming the posting index ?
Best regards
| hi @Oulaolay - welcome!
To be clear, you'd want a variant of `get_postings_list` that takes an already analyzed term, right?
There's actually already an outstanding issue:
https://github.com/castorini/anserini/issues/990
I'm not sure when we'll get to it... but you're welcome to send a pull request... | 2020-03-06T20:45:20Z | [] | [] |
castorini/pyserini | 110 | castorini__pyserini-110 | [
"106"
] | fc68aef057df96ebfd28ebc1694a0cf5ba1225b3 | diff --git a/pyserini/index/pyutils.py b/pyserini/index/pyutils.py
--- a/pyserini/index/pyutils.py
+++ b/pyserini/index/pyutils.py
@@ -258,7 +258,7 @@ def doc_contents(self, docid: str) -> str:
"""
return self.object.documentContents(self.reader, JString(docid))
- def compute_bm25_term_weight(self, docid: str, term: str, k1=0.9, b=0.4) -> float:
+ def compute_bm25_term_weight(self, docid: str, term: str, analyzer=get_lucene_analyzer(), k1=0.9, b=0.4) -> float:
"""Computes the BM25 weight of an (analyzed) term in a document. Note that this method takes the analyzed
(i.e., stemmed) form because the most common use case is to take the term from the output of
:func:`get_document_vector`.
@@ -279,8 +279,20 @@ def compute_bm25_term_weight(self, docid: str, term: str, k1=0.9, b=0.4) -> floa
float
The BM25 weight of the term in the document, or 0 if the term does not exist in the document.
"""
- return self.object.getBM25TermWeightWithParameters(self.reader, JString(docid), JString(term.encode('utf-8')),
- float(k1), float(b))
+ if analyzer is None:
+ return self.object.getBM25AnalyzedTermWeightWithParameters(self.reader, JString(docid),
+ JString(term.encode('utf-8')),
+ float(k1), float(b))
+ else:
+ return self.object.getBM25UnanalyzedTermWeightWithParameters(self.reader, JString(docid),
+ JString(term.encode('utf-8')), analyzer,
+ float(k1), float(b))
+
+ def compute_query_document_score(self, docid: str, query: str, similarity=None):
+ if similarity is None:
+ return self.object.computeQueryDocumentScore(self.reader, docid, query)
+ else:
+ return self.object.computeQueryDocumentScoreWithSimilarity(self.reader, docid, query, similarity)
def convert_internal_docid_to_collection_docid(self, docid: int) -> str:
"""Converts Lucene's internal ``docid`` to its external collection ``docid``.
diff --git a/pyserini/pyclass.py b/pyserini/pyclass.py
--- a/pyserini/pyclass.py
+++ b/pyserini/pyclass.py
@@ -121,4 +121,3 @@ class JCollections(Enum):
TweetCollection = autoclass('io.anserini.collection.TweetCollection')
WashingtonPostCollection = autoclass('io.anserini.collection.WashingtonPostCollection')
WikipediaCollection = autoclass('io.anserini.collection.WikipediaCollection')
-
diff --git a/pyserini/search/pysearch.py b/pyserini/search/pysearch.py
--- a/pyserini/search/pysearch.py
+++ b/pyserini/search/pysearch.py
@@ -23,7 +23,7 @@
from typing import Dict, List, Union
from ..pyclass import JSimpleSearcher, JSimpleSearcherResult, JDocument, JString, JArrayList, JTopics, JTopicReader, \
- JQueryGenerator, JSimpleNearestNeighborSearcherResult, JSimpleNearestNeighborSearcher, JQuery
+ JQueryGenerator, JSimpleNearestNeighborSearcherResult, JSimpleNearestNeighborSearcher, JQuery, autoclass
logger = logging.getLogger(__name__)
@@ -80,7 +80,7 @@ def search(self, q: Union[str, JQuery], k: int = 10,
Parameters
----------
q : Union[str, JQuery]
- The query string / The JQuery.
+ The query string or the ``JQuery`` objected.
k : int
The number of hits to return.
query_generator : JQueryGenerator
@@ -94,6 +94,13 @@ def search(self, q: Union[str, JQuery], k: int = 10,
if query_generator:
return self.object.search(query_generator, JString(q), k)
elif isinstance(q, JQuery):
+ # Note that RM3 requires the notion of a query (string) to estimate the appropriate models. If we're just
+ # given a Lucene query, it's unclear what the "query" is for this estimation. One possibility is to extract
+ # all the query terms from the Lucene query, although this might yield unexpected behavior from the user's
+ # perspective. Until we think through what exactly is the "right thing to do", we'll raise an exception
+ # here explicitly.
+ if self.is_using_rm3():
+ raise NotImplementedError('RM3 incompatible with search using a Lucene query.')
return self.object.search(q, k)
else:
return self.object.search(JString(q.encode('utf8')), k)
@@ -133,11 +140,12 @@ def batch_search(self, queries: List[str], qids: List[str], k: int = 10,
return {r.getKey(): r.getValue() for r in results}
def search_fields(self, q, f, boost, k):
- """
+ """Searches the collection, scoring a separate field with a boost weight.
+
Parameters
----------
q : str
- Query string
+ Query string.
f : str
Name of additional field to search over
boost : float
@@ -161,59 +169,58 @@ def set_analyzer(self, analyzer):
"""
self.object.setAnalyzer(analyzer)
- def set_search_tweets(self, flag):
- """
- Parameters
- ----------
- flag : bool
- True if searching over tweets
- """
- self.object.setSearchTweets(flag)
+ def set_rm3(self, fb_terms=10, fb_docs=10, original_query_weight=float(0.5), rm3_output_query=False):
+ """Configures RM3 query expansion.
- def set_rm3_reranker(self, fb_terms=10, fb_docs=10,
- original_query_weight=float(0.5),
- rm3_output_query=False):
- """
Parameters
----------
fb_terms : int
- RM3 parameter for number of expansion terms
+ RM3 parameter for number of expansion terms.
fb_docs : int
- RM3 parameter for number of documents
+ RM3 parameter for number of expansion documents.
original_query_weight : float
- RM3 parameter for weight to assign to the original query
+ RM3 parameter for weight to assign to the original query.
rm3_output_query : bool
- True if we want to print original and expanded queries for RM3
+ Whether we want to print the original and expanded as debug output.
"""
- self.object.setRM3Reranker(fb_terms, fb_docs,
- original_query_weight, rm3_output_query)
+ self.object.setRM3(fb_terms, fb_docs, original_query_weight, rm3_output_query)
- def unset_rm3_reranker(self):
- """
- Parameters
- ----------
+ def unset_rm3(self):
+ """Turns off use of RM3 query expansion.
"""
- self.object.unsetRM3Reranker()
+ self.object.unsetRM3()
- def set_lm_dirichlet_similarity(self, mu):
+ def is_using_rm3(self) -> bool:
+ """Returns whether or not RM3 query expansion is being performed.
"""
+ return self.object.useRM3()
+
+ def set_qld(self, mu=float(1000)):
+ """Configures query likelihood with Dirichlet smoothing as the scoring function.
+
Parameters
----------
mu : float
- Dirichlet smoothing parameter
+ Dirichlet smoothing parameter mu.
"""
- self.object.setLMDirichletSimilarity(float(mu))
+ self.object.setQLD(float(mu))
+
+ def set_bm25(self, k1=float(0.9), b=float(0.4)):
+ """Configures BM25 as the scoring function.
- def set_bm25_similarity(self, k1, b):
- """
Parameters
----------
k1 : float
- BM25 k1 parameter
+ BM25 k1 parameter.
b : float
- BM25 b parameter
+ BM25 b parameter.
"""
- self.object.setBM25Similarity(float(k1), float(b))
+ self.object.setBM25(float(k1), float(b))
+
+ def get_similarity(self):
+ """Returns the Lucene ``Similarity`` used as the scoring function.
+ """
+ return self.object.getSimilarity()
def doc(self, docid: Union[str, int]) -> Document:
"""Returns the :class:`Document` corresponding to ``docid``. The ``docid`` is overloaded: if it is of type
@@ -305,6 +312,16 @@ def get_topics(collection_name):
return t
+class LuceneSimilarities:
+ @staticmethod
+ def bm25(k1=0.9, b=0.4):
+ return autoclass('org.apache.lucene.search.similarities.BM25Similarity')(k1, b)
+
+ @staticmethod
+ def qld(mu=1000):
+ return autoclass('org.apache.lucene.search.similarities.LMDirichletSimilarity')(mu)
+
+
class SimpleNearestNeighborSearcher:
def __init__(self, index_dir: str):
| diff --git a/tests/test_indexutils.py b/tests/test_indexutils.py
--- a/tests/test_indexutils.py
+++ b/tests/test_indexutils.py
@@ -24,6 +24,7 @@
from pyserini.analysis import pyanalysis
from pyserini.index import pyutils
from pyserini.pyclass import JString
+from pyserini.search import pysearch
class TestIndexUtils(unittest.TestCase):
@@ -40,7 +41,8 @@ def setUp(self):
tarball.extractall(self.index_dir)
tarball.close()
- self.index_utils = pyutils.IndexReaderUtils('{}lucene-index.cacm'.format(self.index_dir))
+ self.searcher = pysearch.SimpleSearcher(f'{self.index_dir}lucene-index.cacm')
+ self.index_utils = pyutils.IndexReaderUtils(f'{self.index_dir}lucene-index.cacm')
def test_terms_count(self):
# We're going to iterate through the index and make sure we have the correct number of terms.
@@ -180,14 +182,34 @@ def test_doc_by_field(self):
self.index_utils.doc_by_field('id', 'CACM-3134').docid())
def test_bm25_weight(self):
- self.assertAlmostEqual(self.index_utils.compute_bm25_term_weight('CACM-3134', 'inform', k1=1.2, b=0.75),
- 1.925014, places=5)
- self.assertAlmostEqual(self.index_utils.compute_bm25_term_weight('CACM-3134', 'retriev', k1=1.2, b=0.75),
- 2.496352, places=5)
-
- self.assertAlmostEqual(self.index_utils.compute_bm25_term_weight('CACM-3134', 'inform'), 2.06514, places=5)
- self.assertAlmostEqual(self.index_utils.compute_bm25_term_weight('CACM-3134', 'retriev'), 2.70038, places=5)
-
+ self.assertAlmostEqual(
+ self.index_utils.compute_bm25_term_weight('CACM-3134', 'inform', analyzer=None, k1=1.2, b=0.75),
+ 1.925014, places=5)
+ self.assertAlmostEqual(
+ self.index_utils.compute_bm25_term_weight('CACM-3134', 'information', k1=1.2, b=0.75),
+ 1.925014, places=5)
+ self.assertAlmostEqual(
+ self.index_utils.compute_bm25_term_weight('CACM-3134', 'retriev', analyzer=None, k1=1.2, b=0.75),
+ 2.496352, places=5)
+ self.assertAlmostEqual(
+ self.index_utils.compute_bm25_term_weight('CACM-3134', 'retrieval', k1=1.2, b=0.75),
+ 2.496352, places=5)
+
+ self.assertAlmostEqual(
+ self.index_utils.compute_bm25_term_weight('CACM-3134', 'inform', analyzer=None),
+ 2.06514, places=5)
+ self.assertAlmostEqual(
+ self.index_utils.compute_bm25_term_weight('CACM-3134', 'information'),
+ 2.06514, places=5)
+ self.assertAlmostEqual(
+ self.index_utils.compute_bm25_term_weight('CACM-3134', 'retriev', analyzer=None),
+ 2.70038, places=5)
+ self.assertAlmostEqual(
+ self.index_utils.compute_bm25_term_weight('CACM-3134', 'retrieval'),
+ 2.70038, places=5)
+
+ self.assertAlmostEqual(self.index_utils.compute_bm25_term_weight('CACM-3134', 'fox', analyzer=None),
+ 0., places=5)
self.assertAlmostEqual(self.index_utils.compute_bm25_term_weight('CACM-3134', 'fox'), 0., places=5)
def test_docid_converstion(self):
@@ -202,6 +224,46 @@ def test_jstring_term(self):
# Should fail when pyjnius has solved this internally.
JString('zoölogy')
+ def test_query_doc_score_default(self):
+ queries = ['information retrieval', 'databases']
+
+ for query in queries:
+ hits = self.searcher.search(query)
+
+ # We're going to verify that the score of each hit is about the same as the output of
+ # compute_query_document_score
+ for i in range(0, len(hits)):
+ self.assertAlmostEqual(hits[i].score,
+ self.index_utils.compute_query_document_score(hits[i].docid, query), places=4)
+
+ def test_query_doc_score_custom_similarity(self):
+ custom_bm25 = pysearch.LuceneSimilarities.bm25(0.8, 0.2)
+ queries = ['information retrieval', 'databases']
+ self.searcher.set_bm25(0.8, 0.2)
+
+ for query in queries:
+ hits = self.searcher.search(query)
+
+ # We're going to verify that the score of each hit is about the same as the output of
+ # compute_query_document_score
+ for i in range(0, len(hits)):
+ self.assertAlmostEqual(hits[i].score,
+ self.index_utils.compute_query_document_score(
+ hits[i].docid, query, similarity=custom_bm25), places=4)
+
+ custom_qld = pysearch.LuceneSimilarities.qld(500)
+ self.searcher.set_qld(500)
+
+ for query in queries:
+ hits = self.searcher.search(query)
+
+ # We're going to verify that the score of each hit is about the same as the output of
+ # compute_query_document_score
+ for i in range(0, len(hits)):
+ self.assertAlmostEqual(hits[i].score,
+ self.index_utils.compute_query_document_score(
+ hits[i].docid, query, similarity=custom_qld), places=4)
+
def tearDown(self):
os.remove(self.tarball_name)
shutil.rmtree(self.index_dir)
diff --git a/tests/test_querybuilding.py b/tests/test_querybuilding.py
--- a/tests/test_querybuilding.py
+++ b/tests/test_querybuilding.py
@@ -82,23 +82,35 @@ def testBuildBoostedQuery(self):
self.assertNotEqual(h1.score, h3.score)
def testTermQuery(self):
- term_query1 = pyquerybuilder.get_term_query('information')
- term_query2 = pyquerybuilder.get_term_query('retrieval')
-
should = pyquerybuilder.JBooleanClauseOccur['should'].value
+ query_builder = pyquerybuilder.get_boolean_query_builder()
+ query_builder.add(pyquerybuilder.get_term_query('information'), should)
+ query_builder.add(pyquerybuilder.get_term_query('retrieval'), should)
- boolean_query1 = pyquerybuilder.get_boolean_query_builder()
- boolean_query1.add(term_query1, should)
- boolean_query1.add(term_query2, should)
-
- bq1 = boolean_query1.build()
- hits1 = self.searcher.search(bq1)
+ query = query_builder.build()
+ hits1 = self.searcher.search(query)
hits2 = self.searcher.search('information retrieval')
for h1, h2 in zip(hits1, hits2):
self.assertEqual(h1.docid, h2.docid)
self.assertEqual(h1.score, h2.score)
+ def testIncompatabilityWithRM3(self):
+ should = pyquerybuilder.JBooleanClauseOccur['should'].value
+ query_builder = pyquerybuilder.get_boolean_query_builder()
+ query_builder.add(pyquerybuilder.get_term_query('information'), should)
+ query_builder.add(pyquerybuilder.get_term_query('retrieval'), should)
+
+ query = query_builder.build()
+ hits = self.searcher.search(query)
+ self.assertEqual(10, len(hits))
+
+ self.searcher.set_rm3()
+ self.assertTrue(self.searcher.is_using_rm3())
+
+ with self.assertRaises(NotImplementedError):
+ self.searcher.search(query)
+
def testTermQuery2(self):
term_query1 = pyquerybuilder.get_term_query('inform', analyzer=get_lucene_analyzer(stemming=False))
term_query2 = pyquerybuilder.get_term_query('retriev', analyzer=get_lucene_analyzer(stemming=False))
diff --git a/tests/test_search.py b/tests/test_search.py
--- a/tests/test_search.py
+++ b/tests/test_search.py
@@ -22,7 +22,7 @@
from typing import List, Dict
from urllib.request import urlretrieve
-from pyserini.pyclass import JSimpleSearcherResult
+from pyserini.pyclass import JSimpleSearcherResult, autoclass
from pyserini.search import pysearch
@@ -43,6 +43,8 @@ def setUp(self):
self.searcher = pysearch.SimpleSearcher(f'{self.index_dir}lucene-index.cacm')
def test_basic(self):
+ self.assertTrue(self.searcher.get_similarity().toString().startswith('BM25'))
+
hits = self.searcher.search('information retrieval')
self.assertEqual(3204, self.searcher.num_docs)
@@ -119,6 +121,82 @@ def test_batch_k(self):
self.assertTrue(isinstance(results['q2'][0], JSimpleSearcherResult))
self.assertEqual(len(results['q2']), 100)
+ def test_different_similarity(self):
+ # qld, default mu
+ self.searcher.set_qld()
+ self.assertTrue(self.searcher.get_similarity().toString().startswith('LM Dirichlet'))
+
+ hits = self.searcher.search('information retrieval')
+
+ self.assertEqual(hits[0].docid, 'CACM-3134')
+ self.assertAlmostEqual(hits[0].score, 3.68030, places=5)
+ self.assertEqual(hits[9].docid, 'CACM-1927')
+ self.assertAlmostEqual(hits[9].score, 2.53240, places=5)
+
+ # bm25, default parameters
+ self.searcher.set_bm25()
+ self.assertTrue(self.searcher.get_similarity().toString().startswith('BM25'))
+
+ hits = self.searcher.search('information retrieval')
+
+ self.assertEqual(hits[0].docid, 'CACM-3134')
+ self.assertAlmostEqual(hits[0].score, 4.76550, places=5)
+ self.assertEqual(hits[9].docid, 'CACM-2516')
+ self.assertAlmostEqual(hits[9].score, 4.21740, places=5)
+
+ # qld, custom mu
+ self.searcher.set_qld(100)
+ self.assertTrue(self.searcher.get_similarity().toString().startswith('LM Dirichlet'))
+
+ hits = self.searcher.search('information retrieval')
+
+ self.assertEqual(hits[0].docid, 'CACM-3134')
+ self.assertAlmostEqual(hits[0].score, 6.35580, places=5)
+ self.assertEqual(hits[9].docid, 'CACM-2631')
+ self.assertAlmostEqual(hits[9].score, 5.18960, places=5)
+
+ # bm25, custom parameters
+ self.searcher.set_bm25(0.8, 0.3)
+ self.assertTrue(self.searcher.get_similarity().toString().startswith('BM25'))
+
+ hits = self.searcher.search('information retrieval')
+
+ self.assertEqual(hits[0].docid, 'CACM-3134')
+ self.assertAlmostEqual(hits[0].score, 4.86880, places=5)
+ self.assertEqual(hits[9].docid, 'CACM-2516')
+ self.assertAlmostEqual(hits[9].score, 4.33320, places=5)
+
+ def test_rm3(self):
+ self.searcher.set_rm3()
+ self.assertTrue(self.searcher.is_using_rm3())
+
+ hits = self.searcher.search('information retrieval')
+
+ self.assertEqual(hits[0].docid, 'CACM-3134')
+ self.assertAlmostEqual(hits[0].score, 2.18010, places=5)
+ self.assertEqual(hits[9].docid, 'CACM-2516')
+ self.assertAlmostEqual(hits[9].score, 1.70330, places=5)
+
+ self.searcher.unset_rm3()
+ self.assertFalse(self.searcher.is_using_rm3())
+
+ hits = self.searcher.search('information retrieval')
+
+ self.assertEqual(hits[0].docid, 'CACM-3134')
+ self.assertAlmostEqual(hits[0].score, 4.76550, places=5)
+ self.assertEqual(hits[9].docid, 'CACM-2516')
+ self.assertAlmostEqual(hits[9].score, 4.21740, places=5)
+
+ self.searcher.set_rm3(fb_docs=4, fb_terms=6, original_query_weight=0.3)
+ self.assertTrue(self.searcher.is_using_rm3())
+
+ hits = self.searcher.search('information retrieval')
+
+ self.assertEqual(hits[0].docid, 'CACM-3134')
+ self.assertAlmostEqual(hits[0].score, 2.17190, places=5)
+ self.assertEqual(hits[9].docid, 'CACM-1457')
+ self.assertAlmostEqual(hits[9].score, 1.43700, places=5)
+
def test_doc_int(self):
# The doc method is overloaded: if input is int, it's assumed to be a Lucene internal docid.
doc = self.searcher.doc(1)
| searcher can't take custom constructed query with RM3 set
With RM3, we get an NPE when we try to search with a query build using the querybuilder.
| 2020-05-17T13:36:52Z | [] | [] |
|
castorini/pyserini | 614 | castorini__pyserini-614 | [
"609"
] | f97b4675e3ce36cb517fae1c28aa11143b14e4fe | diff --git a/integrations/run_simplesearcher.py b/integrations/run_simplesearcher.py
--- a/integrations/run_simplesearcher.py
+++ b/integrations/run_simplesearcher.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,11 +14,10 @@
# limitations under the License.
#
+import hashlib
import os
from typing import List
-import hashlib
-
class RunSimpleSearcher:
def __init__(self, index: str, topics: str):
diff --git a/integrations/simplesearcher_anserini_checker.py b/integrations/simplesearcher_anserini_checker.py
--- a/integrations/simplesearcher_anserini_checker.py
+++ b/integrations/simplesearcher_anserini_checker.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/integrations/simplesearcher_score_checker.py b/integrations/simplesearcher_score_checker.py
--- a/integrations/simplesearcher_score_checker.py
+++ b/integrations/simplesearcher_score_checker.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,9 +14,9 @@
# limitations under the License.
#
-import filecmp
import os
from typing import List
+
from integrations.utils import run_command, parse_score
diff --git a/integrations/utils.py b/integrations/utils.py
--- a/integrations/utils.py
+++ b/integrations/utils.py
@@ -1,3 +1,19 @@
+#
+# Pyserini: Reproducible IR research with sparse and dense representations
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
import os
import subprocess
diff --git a/pyserini/analysis/__init__.py b/pyserini/analysis/__init__.py
--- a/pyserini/analysis/__init__.py
+++ b/pyserini/analysis/__init__.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/pyserini/analysis/_base.py b/pyserini/analysis/_base.py
--- a/pyserini/analysis/_base.py
+++ b/pyserini/analysis/_base.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/pyserini/collection/__init__.py b/pyserini/collection/__init__.py
--- a/pyserini/collection/__init__.py
+++ b/pyserini/collection/__init__.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/pyserini/collection/_base.py b/pyserini/collection/_base.py
--- a/pyserini/collection/_base.py
+++ b/pyserini/collection/_base.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/pyserini/collection/_collection_support.py b/pyserini/collection/_collection_support.py
--- a/pyserini/collection/_collection_support.py
+++ b/pyserini/collection/_collection_support.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/pyserini/demo/msmarco.py b/pyserini/demo/msmarco.py
--- a/pyserini/demo/msmarco.py
+++ b/pyserini/demo/msmarco.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/pyserini/dindex/__init__.py b/pyserini/dindex/__init__.py
--- a/pyserini/dindex/__init__.py
+++ b/pyserini/dindex/__init__.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/pyserini/dindex/__main__.py b/pyserini/dindex/__main__.py
--- a/pyserini/dindex/__main__.py
+++ b/pyserini/dindex/__main__.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/pyserini/dindex/_base.py b/pyserini/dindex/_base.py
--- a/pyserini/dindex/_base.py
+++ b/pyserini/dindex/_base.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/pyserini/dsearch/__init__.py b/pyserini/dsearch/__init__.py
--- a/pyserini/dsearch/__init__.py
+++ b/pyserini/dsearch/__init__.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/pyserini/dsearch/__main__.py b/pyserini/dsearch/__main__.py
--- a/pyserini/dsearch/__main__.py
+++ b/pyserini/dsearch/__main__.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/pyserini/dsearch/_dsearcher.py b/pyserini/dsearch/_dsearcher.py
--- a/pyserini/dsearch/_dsearcher.py
+++ b/pyserini/dsearch/_dsearcher.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,10 +13,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
+
"""
This module provides Pyserini's dense search interface to FAISS index.
The main entry point is the ``SimpleDenseSearcher`` class.
"""
+
import os
from dataclasses import dataclass
from typing import Dict, List, Union
diff --git a/pyserini/dsearch/_model.py b/pyserini/dsearch/_model.py
--- a/pyserini/dsearch/_model.py
+++ b/pyserini/dsearch/_model.py
@@ -1,3 +1,19 @@
+#
+# Pyserini: Reproducible IR research with sparse and dense representations
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
from typing import Optional
from transformers import PreTrainedModel, RobertaConfig, RobertaModel
diff --git a/pyserini/encoded_query_info.py b/pyserini/encoded_query_info.py
--- a/pyserini/encoded_query_info.py
+++ b/pyserini/encoded_query_info.py
@@ -1,3 +1,19 @@
+#
+# Pyserini: Reproducible IR research with sparse and dense representations
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
QUERY_INFO = {
"tct_colbert-msmarco-passage-dev-subset": {
"description": "MS MARCO passage dev set queries encoded by TCT-ColBERT",
diff --git a/pyserini/eval/convert_msmarco_run_to_trec_run.py b/pyserini/eval/convert_msmarco_run_to_trec_run.py
--- a/pyserini/eval/convert_msmarco_run_to_trec_run.py
+++ b/pyserini/eval/convert_msmarco_run_to_trec_run.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/pyserini/eval/convert_trec_run_to_dpr_retrieval_run.py b/pyserini/eval/convert_trec_run_to_dpr_retrieval_run.py
--- a/pyserini/eval/convert_trec_run_to_dpr_retrieval_run.py
+++ b/pyserini/eval/convert_trec_run_to_dpr_retrieval_run.py
@@ -1,7 +1,22 @@
+#
+# Pyserini: Reproducible IR research with sparse and dense representations
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
import argparse
import json
import os
-import sys
from tqdm import tqdm
from pyserini.search import SimpleSearcher, get_topics
diff --git a/pyserini/eval/evaluate_dpr_retrieval.py b/pyserini/eval/evaluate_dpr_retrieval.py
--- a/pyserini/eval/evaluate_dpr_retrieval.py
+++ b/pyserini/eval/evaluate_dpr_retrieval.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/pyserini/eval/msmarco_doc_eval.py b/pyserini/eval/msmarco_doc_eval.py
--- a/pyserini/eval/msmarco_doc_eval.py
+++ b/pyserini/eval/msmarco_doc_eval.py
@@ -1,3 +1,19 @@
+#
+# Pyserini: Reproducible IR research with sparse and dense representations
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
import os
import subprocess
import sys
diff --git a/pyserini/eval/msmarco_passage_eval.py b/pyserini/eval/msmarco_passage_eval.py
--- a/pyserini/eval/msmarco_passage_eval.py
+++ b/pyserini/eval/msmarco_passage_eval.py
@@ -1,3 +1,19 @@
+#
+# Pyserini: Reproducible IR research with sparse and dense representations
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
import os
import subprocess
import sys
diff --git a/pyserini/eval/trec_eval.py b/pyserini/eval/trec_eval.py
--- a/pyserini/eval/trec_eval.py
+++ b/pyserini/eval/trec_eval.py
@@ -1,3 +1,19 @@
+#
+# Pyserini: Reproducible IR research with sparse and dense representations
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
import os
import subprocess
import sys
diff --git a/pyserini/evaluate_script_info.py b/pyserini/evaluate_script_info.py
--- a/pyserini/evaluate_script_info.py
+++ b/pyserini/evaluate_script_info.py
@@ -1,3 +1,19 @@
+#
+# Pyserini: Reproducible IR research with sparse and dense representations
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
EVALUATION_INFO = {
"trec_eval": {
"description": "TREC evaluation script",
diff --git a/pyserini/external_query_info.py b/pyserini/external_query_info.py
--- a/pyserini/external_query_info.py
+++ b/pyserini/external_query_info.py
@@ -1,3 +1,19 @@
+#
+# Pyserini: Reproducible IR research with sparse and dense representations
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
KILT_QUERY_INFO = {
"fever-dev-kilt": {
"description": "KILT FEVER dev set",
diff --git a/pyserini/fusion/__init__.py b/pyserini/fusion/__init__.py
--- a/pyserini/fusion/__init__.py
+++ b/pyserini/fusion/__init__.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/pyserini/fusion/__main__.py b/pyserini/fusion/__main__.py
--- a/pyserini/fusion/__main__.py
+++ b/pyserini/fusion/__main__.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/pyserini/fusion/_base.py b/pyserini/fusion/_base.py
--- a/pyserini/fusion/_base.py
+++ b/pyserini/fusion/_base.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/pyserini/hsearch/__init__.py b/pyserini/hsearch/__init__.py
--- a/pyserini/hsearch/__init__.py
+++ b/pyserini/hsearch/__init__.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/pyserini/hsearch/__main__.py b/pyserini/hsearch/__main__.py
--- a/pyserini/hsearch/__main__.py
+++ b/pyserini/hsearch/__main__.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/pyserini/hsearch/_hybrid.py b/pyserini/hsearch/_hybrid.py
--- a/pyserini/hsearch/_hybrid.py
+++ b/pyserini/hsearch/_hybrid.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,9 +13,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
+
"""
This module provides Pyserini's hybrid searcher by Dense + Sparse
"""
+
from typing import List, Dict
from pyserini.search import SimpleSearcher
from pyserini.dsearch import SimpleDenseSearcher, DenseSearchResult
diff --git a/pyserini/index/__init__.py b/pyserini/index/__init__.py
--- a/pyserini/index/__init__.py
+++ b/pyserini/index/__init__.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/pyserini/index/__main__.py b/pyserini/index/__main__.py
--- a/pyserini/index/__main__.py
+++ b/pyserini/index/__main__.py
@@ -1,6 +1,5 @@
-# -*- coding: utf-8 -*-
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,6 +12,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+#
from jnius import autoclass
import sys
diff --git a/pyserini/index/_base.py b/pyserini/index/_base.py
--- a/pyserini/index/_base.py
+++ b/pyserini/index/_base.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/pyserini/ltr/__init__.py b/pyserini/ltr/__init__.py
--- a/pyserini/ltr/__init__.py
+++ b/pyserini/ltr/__init__.py
@@ -1,3 +1,19 @@
+#
+# Pyserini: Reproducible IR research with sparse and dense representations
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
from ._base import FeatureExtractor, BM25Stat, LmDirStat, DfrGl2Stat, DfrInExpB2Stat, DphStat, Proximity, TpScore, TpDist,\
DocSize, MatchingTermCount, QueryLength, SCS, SumMatchingTF, UniqueTermCount, QueryCoverageRatio, \
UnorderedSequentialPairs, OrderedSequentialPairs, UnorderedQueryPairs, OrderedQueryPairs, \
diff --git a/pyserini/ltr/_base.py b/pyserini/ltr/_base.py
--- a/pyserini/ltr/_base.py
+++ b/pyserini/ltr/_base.py
@@ -1,3 +1,19 @@
+#
+# Pyserini: Reproducible IR research with sparse and dense representations
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
from ..pyclass import autoclass, JString, JArrayList
import json
import numpy as np
diff --git a/pyserini/multithreading.py b/pyserini/multithreading.py
--- a/pyserini/multithreading.py
+++ b/pyserini/multithreading.py
@@ -1,4 +1,5 @@
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+#
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -11,6 +12,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+#
import threading
diff --git a/pyserini/output_writer.py b/pyserini/output_writer.py
--- a/pyserini/output_writer.py
+++ b/pyserini/output_writer.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/pyserini/prebuilt_index_info.py b/pyserini/prebuilt_index_info.py
--- a/pyserini/prebuilt_index_info.py
+++ b/pyserini/prebuilt_index_info.py
@@ -1,3 +1,19 @@
+#
+# Pyserini: Reproducible IR research with sparse and dense representations
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
INDEX_INFO = {
"cacm": {
"description": "CACM corpus",
diff --git a/pyserini/pyclass.py b/pyserini/pyclass.py
--- a/pyserini/pyclass.py
+++ b/pyserini/pyclass.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/pyserini/query_iterator.py b/pyserini/query_iterator.py
--- a/pyserini/query_iterator.py
+++ b/pyserini/query_iterator.py
@@ -1,4 +1,5 @@
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+#
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -11,6 +12,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+#
import os
import json
diff --git a/pyserini/query_iterator_order_info.py b/pyserini/query_iterator_order_info.py
--- a/pyserini/query_iterator_order_info.py
+++ b/pyserini/query_iterator_order_info.py
@@ -1,3 +1,19 @@
+#
+# Pyserini: Reproducible IR research with sparse and dense representations
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
QUERY_IDS = {
'msmarco-doc-dev': [174249, 320792, 1090270, 1101279, 201376, 54544, 118457, 178627, 1101278, 68095, 87892, 257309, 1090242, 211691, 165002, 1101276, 264827, 342285, 372586, 89786, 118448, 92542, 206117, 141472, 196232, 352818, 208145, 79891, 208494, 319564, 155234, 14151, 67802, 1090184, 323382, 323998, 289812, 333486, 1090171, 73257, 1090170, 127876, 1090165, 259417, 1101271, 205107, 307118, 335710, 127984, 1090151, 1090146, 1090132, 1090115, 1090110, 1090107, 1090086, 1090077, 1090072, 1090054, 1101259, 1089983, 1089966, 1089964, 1089940, 1089925, 1089896, 1101236, 1089868, 1089846, 1089832, 1089810, 1101228, 1089804, 1089787, 1089776, 1089763, 1089760, 1089719, 1089706, 1089693, 1089691, 1089688, 1089683, 1089674, 1101214, 1089645, 1101211, 1089619, 1089597, 1089576, 1089560, 1089541, 1089511, 1089501, 1089469, 1089443, 1089438, 1089434, 1089414, 1089408, 1089401, 1089355, 1089325, 1089312, 1089293, 1089286, 1089273, 1101173, 1101172, 1101171, 1089177, 1089167, 1089158, 1089156, 1089143, 1089121, 1089093, 1089085, 1089071, 1089051, 1089044, 1089043, 1089036, 1089027, 1089026, 1089022, 1089021, 1089002, 1089001, 1088993, 1088987, 1088973, 1088960, 1088958, 1088947, 1088938, 1088928, 1088903, 1088889, 1088884, 1088869, 1088856, 1088845, 1088832, 1088800, 1101131, 1102300, 1088758, 1088734, 1088718, 1088693, 1101121, 1088685, 1088628, 1088606, 1088541, 1088539, 1088475, 1088453, 1101090, 1088437, 1101088, 1088379, 1088358, 1088349, 1088302, 1088164, 1088153, 1088138, 1088043, 1087999, 1087959, 1101048, 1087915, 1087911, 1087869, 1087858, 1087848, 1087803, 1087795, 1087774, 1087766, 1087736, 1087729, 1087727, 1087722, 1087687, 1087680, 1087675, 1087634, 1101018, 1087589, 1087581, 1087566, 1087556, 1087532, 1087514, 1087492, 1087487, 1087486, 1087484, 1087425, 1087375, 1087361, 1087351, 1087327, 1087317, 1087309, 1087238, 1087226, 1087215, 1100986, 1087204, 1087185, 1087173, 1087171, 1100980, 1087129, 1087122, 1087114, 1087105, 1087077, 1087076, 1087074, 1087066, 1087061, 1087047, 1087046, 1087042, 1087018, 1087014, 1087001, 1086974, 1086933, 1086928, 1086927, 1086917, 1086915, 1086893, 1086886, 1086883, 1086874, 1086860, 1086855, 1086836, 1086834, 1086765, 1086760, 1086715, 1086713, 1100937, 1086708, 1086701, 1086681, 1086679, 1100933, 1086628, 1100930, 1086595, 1086581, 1086565, 1086555, 1086532, 1086477, 1100919, 1086468, 1086439, 1086430, 1086424, 1086391, 1086385, 1086384, 1086326, 1086309, 1086288, 1086281, 1086271, 1086266, 1086248, 1086241, 1086224, 1086200, 1086174, 1086120, 1086075, 1086046, 1086022, 1086014, 1085980, 1085967, 1100875, 1085943, 1085918, 1085889, 1085845, 1085842, 1085812, 1085804, 1085796, 1085780, 1085779, 1085764, 1085762, 1085760, 1085733, 1085697, 1085674, 1100852, 1085658, 1085630, 1085613, 1085586, 1085584, 1085572, 1085550, 1085533, 1085532, 1085521, 1085517, 1100839, 1085510, 1085457, 1085456, 1085454, 1085441, 1085422, 1085421, 1085393, 1085356, 1085348, 1085341, 1085339, 1085327, 1085319, 1085288, 1085279, 1085245, 1085229, 1100816, 1085197, 1085141, 1085048, 1085035, 1085013, 1085008, 1084986, 1084982, 1084971, 1084910, 1084906, 1084898, 1084889, 1084887, 1100783, 1084848, 1084838, 1084814, 1084769, 1084755, 1084722, 1084713, 1084712, 1084686, 1084603, 1084582, 1084518, 1084516, 1084512, 1084478, 1084475, 1084469, 1084408, 1084403, 1084389, 1084383, 1084354, 1084336, 1084330, 1084326, 1084324, 1084308, 1084301, 1100732, 1084276, 1084273, 1084233, 1084230, 1084197, 1100724, 1084192, 1084086, 1084076, 1084075, 1084038, 1083997, 1083948, 1083945, 1083933, 1083926, 1083909, 1083832, 1083831, 1083822, 1083800, 1083797, 1100687, 1083783, 1083727, 1083721, 1083704, 1083690, 1083686, 1083641, 1083627, 1083597, 1083584, 1083535, 1083517, 1083502, 1083500, 1083499, 1083472, 1083443, 1083430, 1083428, 1083410, 1083362, 1083345, 1083341, 1083340, 1083332, 1083307, 1083293, 1100639, 1083285, 1083278, 1083268, 1083267, 1100634, 1083161, 1083158, 1083152, 1083127, 1083125, 1083108, 1083095, 1083092, 1083085, 1083017, 1083010, 1083000, 1082948, 1082947, 1082924, 1082893, 1082870, 1082840, 1082835, 1082807, 1082792, 1082779, 1082759, 1082751, 1082750, 1082730, 1082668, 1082653, 1082622, 1082607, 1100581, 1082536, 1082531, 1082502, 1082455, 1082445, 1082427, 1082384, 1082377, 1082351, 1082341, 1082339, 1082332, 1082281, 1082265, 1082263, 1082242, 1082117, 1100544, 1081946, 1100541, 1081730, 1081609, 1081595, 1081338, 1100537, 1081091, 1081086, 1080970, 1080950, 1100533, 1080555, 1080406, 1080253, 1080031, 1080010, 1102240, 1079535, 1079340, 1079086, 1079050, 1078906, 1078765, 1077844, 1077019, 1077006, 1077002, 1100499, 1076269, 1075980, 1100492, 1075919, 1075741, 1075656, 1100488, 1075608, 1075591, 1075348, 1075313, 1075262, 1074997, 1074995, 1074989, 1074949, 1074883, 1074807, 1074804, 1074001, 1073640, 1073569, 1073365, 1073358, 1072874, 1072750, 1072603, 1072513, 1100458, 1072500, 1100457, 1100455, 1072188, 1100454, 1071992, 1071598, 1071545, 1071534, 1071389, 1071198, 1070546, 1070452, 1070324, 1070131, 1100438, 1069981, 1069717, 1069521, 1069405, 1069344, 1069222, 1069128, 1068952, 1068715, 1068584, 1068290, 1068276, 1067826, 1067772, 1067764, 1067724, 1067659, 1100415, 1067640, 1067587, 1067284, 1067276, 1066971, 1066966, 1066958, 1066916, 1066709, 1100403, 1066161, 1065971, 1065712, 1065650, 1065558, 1065551, 1065494, 1065160, 1064961, 1064808, 1064687, 1064206, 1064195, 1063974, 1063892, 1063777, 1063758, 1063702, 1063644, 1063607, 1063478, 1063349, 1100370, 1062961, 1062928, 1062784, 1062744, 1062687, 1062589, 1062511, 1062350, 1062332, 1062223, 1061762, 1061472, 1061324, 1061237, 1061210, 1061167, 1060881, 1060868, 1060795, 1060623, 1060566, 1060496, 1060462, 1060391, 1060040, 1060039, 1059698, 1059601, 1059421, 1059420, 1059287, 1059077, 1059045, 1100319, 1058952, 1058885, 1058604, 1100308, 1058601, 1058515, 1058470, 1058442, 1058325, 1058271, 1058182, 1058141, 1100299, 1058036, 1057996, 1057937, 1057708, 1057656, 1057539, 1057334, 1057139, 1057112, 1057015, 1056758, 1056742, 1056726, 1056644, 1056580, 1056437, 1056211, 1056163, 1056159, 1056060, 1056057, 1055940, 1055889, 1055717, 1055505, 1055197, 1055125, 1054999, 1054969, 1054958, 1054923, 1054707, 1054468, 1054451, 1054450, 1054440, 1054438, 1054189, 1054023, 1053931, 1053901, 1053896, 1053716, 1100229, 1102206, 1100224, 1053253, 1053219, 1052985, 1052965, 1100218, 1052640, 1052115, 1052089, 1051942, 1051902, 1051808, 1051571, 1051530, 1051520, 1051475, 1051422, 1051372, 1051352, 1051339, 1051307, 1100190, 1051285, 1051229, 1051223, 1051214, 1051211, 1100188, 1051112, 1100187, 1051108, 1050923, 1050778, 1050695, 1050231, 1049955, 1100173, 1049791, 1049774, 1049456, 1100168, 1049329, 1100167, 1049085, 1048917, 1048642, 1048381, 1048359, 1048303, 1048282, 1048281, 1100151, 1048185, 1047917, 1047913, 1047854, 1047843, 1047833, 1047794, 1047738, 1047708, 1047702, 1047700, 1047662, 1047642, 1047629, 1047599, 1100137, 1047548, 1047386, 1047365, 1047160, 1047138, 1047010, 1046969, 1046931, 1046736, 1100119, 1046569, 1046520, 1046475, 1046463, 1046161, 1100106, 1046047, 1046042, 1100105, 1045855, 1045826, 1045717, 1045709, 1045567, 1100094, 1045554, 1045540, 1045527, 1045494, 1045374, 1045347, 1045229, 1045227, 1045208, 1045135, 1045071, 1100077, 1100070, 1043914, 1043815, 1043658, 1043568, 1043545, 1043337, 1043064, 1042978, 1042800, 1042752, 1042626, 1042426, 1042364, 1042158, 1042099, 1041951, 1041948, 1041924, 1100035, 1041520, 1041226, 1041146, 1041043, 1040959, 1040848, 1040703, 1040532, 1040409, 1040312, 1040099, 1040088, 1040082, 1040064, 1100010, 1040038, 1040030, 1039728, 1039521, 1039298, 1099998, 1039002, 1038879, 1038859, 1038830, 1038724, 1038527, 1099985, 1038184, 1099981, 1099980, 1037872, 1037817, 1037689, 1037250, 1036784, 1036782, 1036627, 1102177, 1036385, 1036380, 1036244, 1036214, 1036005, 1035931, 1035874, 1035805, 1035719, 1035535, 1035383, 1035379, 1035367, 1035321, 1035278, 1035247, 1035006, 1034845, 1034761, 1034703, 1034680, 1034666, 1034595, 1034587, 1034446, 1034409, 1034204, 1034136, 1099914, 1033962, 1033927, 1099911, 1033725, 1033718, 1033652, 1033534, 1033398, 1033250, 1033249, 1033205, 1033092, 1032822, 1032758, 1032341, 1032281, 1099888, 1032198, 1032182, 1032019, 1031976, 1031861, 1031684, 1031682, 1031054, 1031033, 1031032, 1030924, 1030823, 1030722, 1030623, 1099859, 1030381, 1099855, 1030378, 1030324, 1030176, 1029909, 1029908, 1029772, 1029617, 1029552, 1029544, 1029492, 1099836, 1099834, 1029124, 1029058, 1029031, 1029030, 1029003, 1028796, 1028755, 1028753, 1099823, 1028711, 1028608, 1028598, 1028538, 1099816, 1028179, 1028098, 1027919, 1027817, 1027812, 1102163, 1027373, 1099806, 1099805, 1027178, 1026991, 1026799, 1026768, 1026148, 1025991, 1025801, 1025624, 1025483, 1025290, 1025270, 1025259, 1024904, 1024893, 1024727, 1024672, 1024667, 1024592, 1024591, 1024528, 1024288, 1024221, 1024166, 1024069, 1099756, 1023025, 1022907, 1022832, 1022782, 1022712, 1022621, 1022442, 1022410, 1022370, 1022359, 1022198, 1022178, 1022124, 1021971, 1099729, 1021931, 1021900, 1099726, 1021797, 1021695, 1021605, 1021327, 1021324, 1021170, 1021065, 1021053, 1020907, 1020724, 1020500, 1099706, 1020244, 1099700, 1019783, 1019724, 1019649, 1019414, 1019262, 1018918, 1018807, 1018658, 1018359, 1099670, 1018056, 1018032, 1017971, 1017952, 1017773, 1017706, 1017692, 1017687, 1017605, 1017529, 1017524, 1017476, 1017276, 1017204, 1016915, 1016879, 1099653, 1016676, 1016611, 1016583, 1016281, 1016154, 1016015, 1016013, 1015641, 1099636, 1015347, 1015307, 1099632, 1014885, 1014264, 1099626, 1014210, 1014132, 1013965, 1013797, 1013615, 1013592, 1013579, 1013570, 1013492, 1013424, 1013304, 1013267, 1013229, 1013114, 1012866, 1012865, 1099595, 1012547, 1012431, 1012026, 1011925, 1011713, 1011618, 1011512, 1011381, 1011328, 1011248, 1011166, 1011140, 1011120, 1011044, 1011021, 1011018, 1010615, 1010527, 1010524, 1010287, 1010057, 1010048, 1009994, 1009961, 1009749, 1009742, 1009724, 1009695, 1009610, 1009527, 1009388, 1009237, 1009183, 1009023, 1008979, 1008977, 1008968, 1008951, 1008947, 1008911, 1099495, 1008515, 1099482, 1007972, 1007959, 1007934, 1007696, 1007691, 1007673, 1007628, 1007606, 1007550, 1007473, 1007242, 1006911, 1006791, 1006751, 1006578, 1006459, 1006199, 1005798, 1099452, 1099451, 1005586, 1005131, 1005113, 1004949, 1004921, 1004258, 1004254, 1004243, 1004240, 1004233, 1004228, 1004167, 1003849, 1003831, 1003590, 1003482, 1003351, 1003319, 1003277, 1003239, 1003003, 1002940, 1002938, 1002889, 1002716, 1099391, 1002585, 1002584, 1002554, 1002482, 1002426, 1002330, 1002274, 1002252, 1002238, 1002197, 1002148, 1002058, 1001999, 1099368, 1001926, 1001903, 1001810, 1001454, 1000864, 1000798, 1000681, 1000574, 1000519, 1000459, 1000319, 1000272, 1102121, 1000083, 1000030, 1000017, 1000006, 1000004, 1000000, 999942, 999836, 999791, 999691, 999685, 999567, 999552, 999550, 999517, 999469, 999439, 999416, 999385, 999356, 1099321, 999192, 999110, 999089, 999086, 998965, 998905, 998891, 998802, 998735, 998681, 998680, 998675, 998658, 998646, 998641, 998591, 998493, 998417, 998309, 998247, 998246, 998174, 998101, 1099290, 998013, 997935, 997932, 1099288, 997878, 997860, 1099284, 997744, 997713, 997649, 997648, 997542, 997481, 997449, 997351, 997086, 997044, 996922, 996835, 996825, 996805, 996623, 996414, 996328, 996272, 996181, 996119, 995806, 995805, 995787, 995756, 995380, 995280, 995221, 995141, 994867, 994830, 1099226, 994792, 994688, 994533, 994479, 994397, 1099219, 994338, 1099217, 994228, 994133, 994085, 994005, 993996, 993987, 993834, 993821, 993795, 993748, 993627, 993320, 993255, 993178, 993153, 993041, 992950, 992949, 992946, 992840, 992839, 992677, 992660, 992659, 992605, 992559, 992535, 992531, 1099178, 992433, 992407, 992367, 992365, 992363, 992257, 992224, 992193, 992191, 992132, 992120, 991854, 991832, 991782, 991762, 991685, 991471, 991342, 991324, 991210, 991207, 991171, 991111, 991044, 991032, 990995, 990938, 990852, 990841, 990763, 990649, 990414, 990375, 990345, 990026, 989894, 989870, 989831, 989573, 989530, 1099105, 989296, 989213, 989042, 988954, 988915, 1102400, 988787, 988745, 988743, 988710, 988653, 988636, 988540, 988504, 1099072, 988253, 988124, 988122, 988121, 988119, 1099065, 987845, 987823, 987822, 987809, 987671, 987573, 987567, 987502, 987309, 987237, 987230, 1099050, 987192, 987183, 987066, 986936, 986852, 986793, 986791, 986733, 986494, 986484, 986427, 986411, 986316, 986210, 986162, 986068, 985461, 985433, 985431, 985372, 985360, 985275, 985259, 985173, 985167, 985165, 985158, 984992, 984948, 984856, 984774, 129837, 241405, 61452, 173001, 197024, 81993, 186446, 86624, 98817, 246626, 373121, 240504, 112035, 141353, 11006, 235832, 96379, 1098967, 298565, 86094, 141694, 320117, 281002, 353623, 1098953, 60357, 58583, 262974, 334754, 36214, 96749, 181394, 296993, 75608, 83448, 270603, 1098927, 183046, 362845, 164528, 244821, 95409, 293401, 1098909, 176015, 323798, 10157, 137919, 8854, 1098905, 89777, 97895, 149447, 239516, 299350, 323535, 1098895, 1098874, 253678, 160562, 282530, 166043, 357162, 328629, 1098860, 122440, 53813, 10312, 88577, 1098846, 109276, 15382, 29097, 185009, 98682, 230082, 194531, 168069, 1098809, 1098806, 1098804, 168238, 242219, 127315, 203688, 176994, 160255, 47864, 292676, 222954, 36965, 272500, 2962, 125545, 1098765, 11133, 1098763, 118365, 172981, 96310, 276338, 80590, 131665, 125996, 27618, 210690, 334916, 136209, 92437, 24979, 277785, 227591, 249321, 136098, 307521, 1098698, 264594, 169778, 158887, 135516, 15607, 237945, 164912, 125627, 20597, 339888, 276298, 234651, 129565, 12903, 145821, 180592, 1098646, 176677, 9926, 1098641, 265960, 172787, 94865, 135386, 234998, 100616, 1098608, 305361, 61882, 338713, 1098600, 102506, 128113, 44072, 128200, 334433, 329901, 153027, 90941, 197964, 49802, 184452, 229325, 231292, 273481, 30188, 244808, 101451, 191971, 108622, 150087, 182393, 181222, 144491, 258485, 292094, 1098523, 277632, 1098520, 97295, 188908, 1098510, 107812, 310853, 208339, 1098497, 149790, 132263, 106508, 314907, 371695, 1098481, 12741, 305333, 28216, 20671, 320320, 86264, 220151, 316803, 70340, 223468, 59217, 276329, 236580, 130932, 139239, 206549, 234821, 93308, 174273, 278863, 199572, 285656, 31432, 347491, 207251, 54531, 56033, 300312, 107077, 160885, 209651, 1098355, 143464, 1098354, 183201, 1098338, 222158, 159667, 61180, 1098322, 99183, 85954, 153794, 239189, 195693, 209764, 190307, 343976, 29169, 86701, 24115, 123975, 167436, 160339, 267644, 119534, 10276, 21765, 119975, 165807, 195582, 114037, 282397, 1102028, 165480, 279718, 1098222, 103125, 181144, 55691, 212236, 359499, 119168, 19457, 162351, 371204, 190212, 183874, 357664, 259128, 1098180, 249792, 232703, 1098169, 133037, 226461, 318073, 277701, 183723, 228474, 62648, 72613, 53814, 17848, 139897, 328611, 154633, 259239, 137411, 22882, 309402, 114638, 324645, 280927, 311067, 1098111, 118702, 1098110, 76770, 201366, 195440, 1098102, 142411, 234165, 1098090, 337073, 167156, 18101, 75342, 267012, 193742, 36473, 15039, 264410, 161224, 286160, 132359, 191632, 1098057, 170788, 1098048, 136700, 242713, 186390, 1102390, 324159, 196111, 78730, 40056, 9454, 295406, 184436, 78076, 265729, 212195, 152519, 126491, 57402, 139929, 131873, 185276, 27932, 160787, 323154, 377805, 1098013, 57882, 1098010, 334904, 20356, 303777, 367290, 284072, 1097999, 236949, 288884, 58801, 200296, 142039, 260762, 253965, 46579, 85904, 92260, 167566, 146244, 338040, 123710, 306105, 299094, 178468, 337209, 1097939, 74759, 202797, 1097937, 47741, 277799, 308687, 58571, 1102001, 323555, 259885, 1097909, 1097906, 255027, 1097905, 132104, 126525, 1097894, 142382, 1097885, 185299, 264150, 39577, 289556, 290499, 107283, 57614, 74356, 310948, 211621, 1101995, 294518, 329958, 157149, 149853, 204924, 144857, 227637, 207595, 337190, 113664, 181301, 237561, 62439, 362076, 1097796, 88284, 139767, 13397, 239511, 214040, 226509, 1097786, 202073, 250367, 313940, 326509, 236427, 32642, 267187, 51276, 145877, 256052, 10205, 112718, 342115, 244902, 212634, 323096, 46095, 1097723, 65583, 1097721, 64179, 139090, 299939, 184235, 196596, 154301, 82293, 267341, 1097674, 17635, 262232, 93649, 285537, 51090, 34039, 1215, 335711, 66154, 276208, 144694, 21861, 250636, 299110, 302337, 163602, 31595, 146598, 70852, 271038, 30956, 193866, 249802, 116939, 69506, 328474, 189466, 1097602, 1101977, 140804, 325292, 203390, 298550, 30860, 143293, 192894, 283154, 551309, 65038, 448630, 459280, 757275, 116820, 128772, 432653, 550565, 573954, 37952, 706950, 492853, 451406, 571103, 699510, 418353, 441409, 438286, 533105, 689851, 694561, 405660, 163860, 559507, 464860, 583234, 392393, 431602, 298940, 560673, 709342, 409854, 398447, 633399, 193581, 559198, 613852, 390484, 656250, 407131, 742822, 606944, 1097508, 433691, 701335, 217246, 455776, 723144, 409143, 444790, 408765, 515335, 758074, 483795, 591898, 468762, 462301, 580411, 1097469, 450921, 632394, 1097461, 557157, 406576, 543849, 60677, 1097448, 663006, 129229, 539957, 615383, 152598, 1097438, 448976, 756949, 719411, 562827, 70709, 575616, 387848, 539601, 463373, 724872, 483521, 257885, 566335, 523621, 466640, 704236, 619087, 1097386, 184105, 1097373, 180902, 675320, 643572, 709560, 1097359, 593732, 453220, 696404, 463443, 591940, 577813, 558263, 442525, 610940, 167229, 433220, 704072, 1097317, 732631, 340712, 755040, 454018, 1097314, 569674, 172062, 587524, 372070, 419692, 1097304, 1097298, 471705, 503381, 400696, 731759, 666694, 456016, 701390, 559959, 699873, 704398, 743046, 717845, 499904, 463133, 455659, 610056, 398258, 669979, 487279, 736347, 653041, 459948, 417040, 400692, 740762, 738165, 556489, 571954, 167994, 648877, 592495, 735387, 1097242, 703268, 480932, 1097236, 6217, 741392, 602957, 528760, 1097223, 412352, 709936, 616045, 437914, 732618, 715189, 189115, 1097213, 446834, 392936, 515317, 710297, 426442, 718444, 565696, 1097195, 587674, 495680, 510867, 679390, 588829, 432874, 470611, 424092, 753479, 412597, 406181, 632825, 417404, 674702, 15063, 496276, 1097154, 687632, 147542, 511101, 523952, 489513, 685091, 565868, 594930, 1097135, 735895, 548254, 748997, 627085, 695238, 1097119, 422624, 1097118, 519145, 395038, 592192, 1097100, 608557, 1097093, 611152, 742667, 1097087, 423608, 448975, 514767, 727551, 1097066, 678176, 432602, 606117, 27743, 694063, 1097040, 485287, 610898, 637208, 517245, 648119, 1097027, 39360, 537526, 1097023, 707670, 1097014, 261098, 517117, 604673, 19940, 1096998, 479284, 646354, 691141, 73788, 601624, 330560, 568841, 594831, 1096964, 442673, 703211, 138640, 457622, 1096958, 750487, 459481, 558448, 189312, 755465, 733422, 404713, 663820, 1096947, 1096945, 418063, 507381, 1096944, 688739, 436844, 605169, 428113, 48417, 745559, 716641, 469873, 509907, 263889, 727837, 54843, 407102, 420304, 634583, 1096911, 416846, 655057, 512405, 568649, 478359, 653187, 445494, 559018, 91345, 690010, 754191, 476807, 112318, 708904, 1096887, 1096886, 467597, 592601, 585344, 679360, 757644, 415500, 635058, 474873, 622100, 685177, 299023, 333700, 427086, 1096870, 580313, 256783, 733186, 747345, 1096866, 684977, 387864, 488676, 575268, 613318, 570068, 147166, 1096850, 747720, 711682, 643561, 1096840, 497132, 1096830, 290779, 1096827, 256192, 635237, 691507, 1096823, 402417, 636853, 469535, 639163, 581521, 751778, 386653, 424509, 454258, 554511, 453270, 586916, 478827, 1096787, 656371, 477648, 649640, 1096776, 365044, 585680, 524722, 685591, 497360, 489374, 568895, 682105, 476483, 634126, 413040, 735343, 433549, 1288, 494730, 659929, 1096742, 696217, 417362, 739599, 710755, 669444, 117683, 633350, 131768, 337864, 495082, 1096712, 686290, 590861, 477380, 415165, 521402, 541135, 389258, 711803, 589564, 497596, 759503, 480064, 461491, 596088, 466774, 406974, 714672, 1096667, 680514, 635125, 1096658, 1096656, 589777, 424449, 645024, 456305, 399364, 1096644, 724947, 1096641, 494086, 626232, 277977, 372378, 1096628, 550331, 303045, 288702, 645604, 539648, 1096620, 503674, 1096610, 1096607, 641156, 505107, 505810, 626462, 397090, 464484, 484551, 617611, 587326, 670142, 243244, 208265, 455862, 666792, 665972, 670022, 660534, 753168, 517386, 470982, 745944, 590945, 422501, 1096557, 642352, 477639, 1096551, 669046, 412982, 399617, 534941, 497470, 1096543, 576822, 745746, 635647, 1096533, 1096532, 647949, 417080, 582146, 1096527, 613233, 524332, 615457, 637234, 567159, 576851, 547301, 456551, 156215, 148761, 168787, 693152, 142782, 1096509, 245921, 75717, 573157, 560245, 718112, 2, 649763, 555850, 18840, 627513, 1101871, 607374, 753517, 54040, 1096479, 1101870, 1096476, 1096475, 84778, 440098, 42361, 567714, 435794, 453869, 482808, 473492, 1101868, 1096457, 464440, 1096454, 453705, 65584, 620992, 556976, 694560, 407869, 484454, 1096431, 1096429, 667373, 527568, 595577, 461601, 693736, 231482, 443081, 282214, 747937, 467683, 551119, 450093, 696918, 540906, 575492, 588888, 687375, 479570, 708781, 578100, 198581, 441128, 748054, 329515, 560059, 1101861, 686469, 731886, 227968, 405867, 1096376, 1096375, 406205, 507086, 97766, 539278, 511367, 1096368, 490505, 458885, 1096360, 273014, 482412, 613694, 660999, 342450, 413079, 420934, 682025, 627323, 82100, 371420, 592672, 493508, 626701, 640232, 638503, 724579, 437165, 352236, 719488, 274981, 547018, 433680, 731902, 412340, 358150, 510858, 505171, 544060, 453451, 632625, 405090, 409207, 477309, 749955, 504306, 436249, 1096268, 448035, 530572, 1096262, 573899, 97972, 185397, 506985, 1096258, 1096257, 1096256, 610716, 19552, 436475, 1096252, 648049, 662436, 652556, 536480, 414714, 1101845, 687615, 505541, 651187, 759062, 754786, 443489, 249821, 506438, 1096207, 499568, 663679, 428773, 421145, 600350, 690606, 657204, 697780, 556248, 427323, 574944, 220761, 408563, 526331, 555558, 440362, 497757, 391481, 1096180, 601684, 585165, 586268, 368229, 734979, 466335, 667535, 504335, 677460, 509730, 115365, 418926, 693469, 427730, 375291, 649200, 614598, 478295, 674914, 659230, 88160, 722413, 261521, 1096126, 738525, 142579, 393881, 574317, 408739, 481961, 397592, 614409, 525660, 463635, 171776, 632536, 630905, 573452, 473319, 605363, 746055, 697983, 403361, 406525, 510444, 755459, 1096087, 669800, 739671, 534305, 695993, 496175, 722981, 288200, 1096065, 744764, 680951, 548099, 588122, 409887, 433685, 593541, 464663, 569939, 434369, 1096049, 758901, 669288, 59204, 1096045, 650076, 1096044, 513061, 66161, 116517, 663950, 384985, 658498, 1096025, 596716, 231109, 561448, 649110, 745402, 743696, 76283, 570979, 544123, 646179, 434462, 160735, 735384, 442593, 225499, 722515, 656859, 566946, 413905, 724121, 1101822, 682365, 1095994, 77424, 714678, 221664, 93234, 1095988, 625022, 652912, 97612, 609104, 1095982, 478220, 575096, 417902, 458110, 1095971, 692955, 422600, 583766, 639560, 624876, 425688, 523062, 1095955, 718782, 456734, 597384, 578783, 460162, 750946, 308032, 671692, 1095942, 556476, 409557, 273449, 358240, 418423, 485558, 392501, 486370, 75698, 738162, 586740, 1095928, 676454, 641284, 123859, 601629, 646623, 1095921, 758720, 548673, 641618, 29416, 528841, 277556, 467612, 437671, 739913, 530611, 503401, 290830, 749267, 1095881, 151547, 547089, 192502, 1095876, 1095874, 540432, 607338, 739743, 1095868, 699872, 599550, 687245, 438455, 1095864, 472448, 543813, 570905, 619159, 282411, 595568, 1095857, 445094, 582557, 595236, 148777, 681264, 1095845, 1101806, 242107, 289276, 732448, 423178, 508316, 483178, 481297, 156889, 525534, 40337, 159078, 153981, 425505, 481387, 266920, 1095807, 1095806, 173391, 451609, 574547, 1095798, 392195, 544319, 742988, 552868, 435412, 163038, 447340, 585378, 272815, 172608, 740624, 129205, 436602, 733510, 507087, 508855, 1095749, 299732, 108037, 662524, 730278, 405036, 59392, 717563, 117113, 1095725, 649893, 522076, 420400, 1095716, 393268, 1095711, 426214, 1095704, 418165, 662016, 1095699, 510229, 575146, 758909, 249618, 645252, 1095687, 249866, 408427, 470001, 77323, 624176, 472359, 75266, 633916, 556166, 551819, 754509, 679658, 625205, 574051, 586790, 1095654, 505152, 537410, 636949, 1095650, 524447, 546956, 444350, 593135, 630318, 688644, 1095641, 581975, 681791, 682626, 1095631, 436924, 683045, 639412, 164282, 149801, 70720, 510158, 108507, 744109, 754166, 389385, 458771, 192579, 583916, 665009, 440802, 422955, 608197, 461078, 393462, 47716, 506579, 576312, 431481, 748771, 711710, 1095571, 1095566, 449244, 426504, 420980, 641293, 1095560, 94953, 1095558, 436100, 727707, 1095555, 148424, 321239, 664138, 647876, 280223, 1095542, 658372, 1095537, 717751, 4947, 399527, 605467, 555590, 581801, 237936, 1095495, 535142, 448123, 634113, 593275, 119761, 177221, 547820, 420867, 1095478, 404202, 415962, 1095469, 384845, 458774, 663131, 459707, 569689, 412532, 437752, 744092, 660479, 8714, 663890, 736713, 153037, 414799, 1095437, 451070, 760367, 556307, 275997, 453175, 22670, 415815, 632106, 138793, 443027, 660803, 124787, 407662, 536995, 670476, 32176, 755907, 474234, 706342, 438058, 511330, 583369, 508104, 682190, 638928, 199776, 403793, 663388, 423878, 413404, 662282, 652961, 451484, 414155, 275528, 734198, 263670, 635079, 604153, 667136, 1095377, 743868, 567759, 1095371, 470385, 728060, 577511, 476977, 568585, 1095360, 1095357, 1095354, 625458, 138127, 507434, 637459, 607599, 758519, 698719, 635626, 450854, 1095335, 704223, 1095332, 428819, 436847, 685717, 682205, 644023, 496717, 686260, 754113, 750421, 647687, 630391, 14963, 436091, 275137, 594793, 1101761, 673984, 1095306, 83621, 659182, 401287, 466738, 1095278, 115930, 455782, 562821, 741977, 660957, 701345, 450851, 635044, 611199, 588627, 302435, 488825, 278542, 476947, 741274, 530602, 737940, 584500, 1095233, 591993, 520627, 43781, 437324, 540306, 700835, 653054, 584727, 146812, 619675, 670829, 604628, 701663, 478054, 490883, 327750, 203458, 700641, 661398, 571237, 592220, 680490, 497536, 2235, 703270, 741970, 37685, 1095165, 194750, 521801, 392488, 749752, 1095155, 584905, 388950, 274175, 730229, 334558, 752473, 746065, 532142, 578607, 522953, 422609, 130825, 471007, 469819, 711811, 449235, 607855, 733591, 302878, 1095126, 1095121, 510018, 570725, 129517, 1095108, 624644, 563652, 677212, 729697, 155086, 466162, 691055, 127098, 77878, 425330, 710914, 690801, 562594, 1101739, 1095066, 418552, 478981, 728460, 430142, 1095055, 604229, 117036, 450681, 731723, 572517, 677672, 321363, 432680, 733692, 667932, 418977, 672429, 726076, 1095012, 278429, 438324, 188134, 387662, 570070, 136157, 1094999, 461281, 1094996, 402427, 391125, 589903, 703383, 633986, 708739, 61623, 1094982, 549738, 587853, 703765, 516029, 759038, 686541, 705681, 1094962, 392350, 329369, 450788, 640103, 430229, 245120, 563347, 597395, 617795, 727224, 748672, 752700, 707513, 672109, 624503, 734426, 612471, 657264, 526984, 543251, 459291, 563943, 728823, 753299, 433579, 583798, 449442, 647503, 622725, 503580, 743675, 580450, 745469, 656376, 589586, 388588, 525868, 174592, 526671, 614047, 393203, 1101723, 1094869, 672433, 276979, 693101, 738484, 535599, 507934, 592235, 466252, 420365, 514851, 742022, 691004, 760512, 212796, 1094840, 748321, 577131, 689885, 617968, 490802, 1094825, 455743, 632726, 603031, 541425, 453856, 661076, 604113, 702792, 722352, 547139, 704080, 694678, 603773, 138266, 582641, 1101714, 584569, 455456, 559009, 711840, 565231, 528117, 729672, 129792, 684459, 626005, 689700, 632923, 1094759, 560419, 574569, 112477, 549135, 1094755, 208610, 421813, 688218, 466202, 714709, 345350, 458235, 724571, 608323, 1094724, 633635, 430985, 559709, 398335, 574002, 712832, 750821, 681514, 607292, 467274, 1101706, 639545, 1094691, 521018, 659247, 1094689, 632055, 516413, 338696, 334867, 518940, 495018, 472024, 261683, 559607, 422893, 622658, 178859, 320051, 743708, 496244, 740876, 421437, 211468, 503390, 612846, 153048, 124128, 1094634, 455273, 92509, 612670, 478691, 576195, 93823, 737512, 243712, 453851, 439375, 728150, 1094612, 73853, 611271, 1094605, 624143, 726098, 567452, 541948, 698445, 671219, 740416, 94782, 250228, 1094578, 210442, 601128, 462979, 411953, 6791, 471983, 1094566, 475402, 231717, 642032, 657091, 679167, 102695, 645343, 1094536, 175251, 498478, 677936, 537825, 549235, 396391, 1094519, 711759, 488345, 549219, 1094501, 721885, 555750, 456443, 568526, 1094477, 708517, 25603, 709559, 191792, 1094469, 391101, 1576, 170982, 753214, 1094460, 341317, 1094453, 693636, 481341, 504044, 489858, 556217, 406923, 430989, 413858, 402318, 633153, 231298, 538333, 618408, 249118, 567630, 393954, 628056, 645892, 1102351, 647260, 1094395, 1094394, 1094389, 537761, 331352, 419326, 425375, 598802, 506181, 248086, 559771, 638849, 744891, 560357, 1094370, 445714, 1094369, 571696, 425072, 473935, 1094364, 602352, 1094361, 635497, 563995, 21741, 419333, 693162, 730626, 593792, 482666, 1101674, 96250, 733892, 409071, 1094316, 223165, 387603, 26207, 680373, 452200, 644658, 384406, 468907, 137440, 540983, 494346, 412319, 171527, 635150, 438316, 623857, 402075, 614069, 285729, 634412, 1101670, 669427, 616447, 65000, 609799, 266760, 146212, 161418, 1101668, 1094271, 454872, 714636, 99556, 631724, 21948, 515813, 409694, 88375, 563359, 602652, 574730, 281704, 543951, 242019, 743693, 445908, 584592, 621419, 24441, 403388, 551860, 124534, 486274, 138223, 524166, 673143, 1094220, 715508, 1094215, 522151, 404051, 394021, 416228, 393420, 563771, 670600, 1094197, 1101661, 1094191, 281702, 649451, 583611, 473394, 408945, 470459, 1094175, 414276, 705279, 623281, 489931, 545450, 694845, 531142, 564707, 753480, 628085, 1094141, 448183, 568709, 614186, 230891, 725047, 700224, 521851, 707721, 715588, 524699, 517516, 543644, 1094110, 702790, 447551, 647872, 760070, 525467, 473886, 1094085, 1094081, 674595, 649294, 591026, 537301, 261650, 509111, 405985, 1094062, 1094056, 427340, 457809, 34015, 639084, 455853, 680250, 544308, 424898, 572286, 426347, 199442, 144254, 82161, 565915, 1094027, 449750, 683193, 728110, 576452, 525779, 610425, 605651, 720013, 582848, 503607, 690508, 1093971, 155056, 483241, 64528, 541969, 1093966, 536791, 86203, 1093962, 527769, 1093959, 609956, 549342, 577167, 731736, 700618, 1093941, 284313, 22479, 507221, 405310, 1093927, 1093926, 571474, 100013, 609628, 690705, 713134, 712545, 604954, 499413, 541274, 479525, 106125, 480504, 400311, 50891, 610190, 1093881, 599524, 457714, 605648, 535421, 487569, 591310, 555458, 4696, 525047, 1093855, 744261, 54199, 536654, 653092, 558548, 961921, 978802, 982481, 775343, 138629, 841165, 841020, 846513, 786021, 1066043, 1093791, 939473, 1058978, 858421, 1093786, 345453, 330450, 821372, 855968, 160671, 1093781, 1036002, 783963, 1093773, 1003445, 315131, 841961, 912898, 771694, 961950, 772928, 776465, 991240, 772055, 917334, 1093750, 853057, 787784, 1041473, 1093732, 783277, 792900, 1093723, 65488, 148564, 792742, 29612, 1093717, 212977, 989855, 936273, 987486, 1019470, 884870, 1005191, 998941, 952378, 95286, 258337, 783433, 1020915, 931905, 808716, 1053111, 1093698, 900924, 1057251, 875787, 907997, 976829, 234114, 888100, 865616, 58130, 789439, 1093682, 763878, 790059, 338637, 810242, 808362, 925571, 944700, 170770, 1011860, 815015, 25025, 930326, 1028131, 1093650, 778890, 978057, 1005520, 900731, 1093637, 960397, 862742, 25534, 1093621, 831601, 900076, 981400, 995176, 852037, 1036542, 971233, 914321, 921173, 942221, 944181, 983708, 804197, 988269, 791629, 804905, 831560, 849561, 913509, 1093570, 230725, 831315, 1037407, 837181, 1011811, 1076078, 303934, 959228, 1093556, 869827, 1093552, 783602, 849337, 1011721, 976941, 950799, 863623, 1031240, 1024034, 812734, 1093540, 1078198, 1093534, 773155, 898714, 1023850, 227317, 825954, 1093507, 956993, 202081, 49435, 884878, 1078731, 780336, 893681, 868598, 1038755, 357340, 1093487, 1093481, 968560, 934235, 1051886, 376537, 906901, 860266, 778139, 831474, 853344, 1093443, 241246, 1093438, 822859, 53897, 884722, 140161, 66908, 992618, 762111, 991138, 64960, 981006, 830040, 944231, 948397, 925951, 835478, 1073972, 147337, 1093419, 800792, 790536, 1093410, 800318, 899869, 1093406, 1081569, 244092, 1093399, 831030, 877810, 798284, 837375, 166748, 1016406, 270140, 893271, 148851, 171370, 897476, 1027669, 848478, 329114, 1093359, 959083, 865426, 947678, 1003114, 856171, 779553, 773998, 1093349, 988988, 939104, 927553, 881695, 1035228, 954455, 778948, 881582, 1093322, 1003329, 904295, 373209, 971633, 132639, 1093312, 952388, 1093305, 767745, 845888, 869035, 1004940, 1017734, 931147, 989994, 792463, 789332, 850919, 950139, 1101576, 942651, 779475, 1004199, 934223, 1093255, 1011529, 1033703, 804103, 827791, 903811, 813675, 1009109, 1093238, 776122, 1093235, 176744, 1101566, 1093231, 894610, 873250, 1026098, 948452, 224314, 1039195, 891082, 917489, 321918, 859274, 972699, 944245, 983438, 109647, 1093202, 1046952, 1093200, 1093196, 793475, 844390, 1093181, 1093179, 1057476, 129228, 1093172, 960265, 254652, 80712, 809933, 971213, 803306, 1048565, 839878, 946428, 766769, 910818, 339934, 1093142, 1022577, 1093128, 831962, 788851, 872347, 796056, 130034, 1093112, 971653, 788702, 803599, 1093104, 827801, 786857, 777297, 206806, 989108, 1093096, 74328, 1101552, 1093094, 42568, 866101, 820267, 362016, 262280, 1046648, 155041, 841521, 1093064, 840061, 965578, 1093042, 1061994, 983499, 810680, 1093038, 879869, 1093031, 839528, 861169, 818819, 914637, 975775, 1101535, 958311, 957607, 1093006, 28352, 1070412, 870348, 338917, 800987, 895263, 1092996, 977770, 820973, 61531, 134239, 855546, 892454, 57270, 890532, 875796, 979133, 1010537, 994112, 1092984, 778857, 1101531, 941865, 1029016, 1092978, 824000, 22231, 1037104, 924895, 368728, 1092952, 908154, 762652, 862345, 920717, 943190, 862856, 10264, 978605, 242863, 1092930, 872081, 853882, 1010277, 1092919, 778095, 1092911, 822937, 91722, 837681, 161828, 987660, 837202, 810324, 283344, 889104, 25294, 839128, 782549, 1056548, 794469, 779674, 903479, 934964, 1092865, 1005163, 919913, 1092863, 761096, 1018525, 991894, 1021907, 944194, 1092858, 1037826, 109819, 993234, 1059820, 819618, 1024312, 1092832, 1092822, 2798, 166403, 893275, 766272, 809556, 359040, 801907, 348136, 1092796, 776080, 878959, 1092792, 991590, 1092791, 874914, 993107, 839137, 809798, 904007, 948797, 795991, 999555, 817349, 96602, 979054, 70504, 980633, 960437, 783687, 782696, 809909, 1092759, 1092757, 786009, 1092756, 341736, 930483, 783098, 1040684, 855050, 956403, 881723, 856568, 1033912, 994918, 1092738, 823203, 1072506, 1075156, 929046, 953351, 1092724, 1003006, 69871, 111377, 968608, 791223, 865518, 1092715, 789037, 866251, 1079868, 897789, 922398, 844211, 846082, 952452, 922335, 869348, 992652, 967106, 926019, 985644, 902657, 880527, 1092665, 766804, 950355, 1063177, 1101503, 190601, 924398, 1078920, 1092643, 300246, 762558, 999028, 885153, 924567, 837467, 850957, 913579, 272605, 891565, 935358, 63548, 884533, 1092605, 144285, 278606, 1078187, 260853, 1060616, 1038592, 780613, 1026372, 941219, 969750, 913568, 1052717, 887395, 1023363, 1092557, 1092551, 354222, 320025, 807880, 869759, 1092543, 960302, 1043702, 1092528, 943170, 72485, 888988, 1092522, 1092517, 944451, 1043413, 908069, 1040353, 320970, 823549, 1003875, 885081, 796812, 1092484, 937427, 984499, 196720, 915769, 1092482, 974808, 998381, 1050007, 792688, 1102335, 333579, 1092474, 885308, 842223, 1092470, 322345, 999637, 865476, 135464, 1031456, 896931, 914707, 915762, 1044041, 1092450, 167371, 820161, 1028742, 1078491, 1092441, 766238, 894161, 885986, 859669, 1069556, 862640, 962731, 370316, 897981, 1050747, 1092422, 914406, 1092417, 860655, 1092416, 970152, 1005678, 339501, 882002, 1028652, 1092394, 891498, 909048, 1092391, 255633, 1017348, 782426, 782079, 865971, 160808, 767248, 910150, 875986, 999261, 945535, 990197, 1092348, 1039361, 952658, 1050253, 78418, 991064, 914368, 1038685, 900450, 990481, 1025348, 914771, 814282, 850820, 905707, 1092327, 840532, 212303, 823596, 762761, 1058165, 999610, 1092311, 1040507, 877453, 998093, 979787, 872632, 975997, 789292, 1039346, 1079785, 886332, 116431, 303790, 984434, 1071722, 928753, 796383, 860942, 940386, 1064518, 1092263, 170581, 1092258, 267566, 1092257, 73106, 909547, 1000097, 952445, 854085, 995825, 951820, 882141, 1028555, 1092238, 297019, 69789, 1092237, 1092236, 1024599, 1065448, 1057270, 953274, 801059, 814699, 1025188, 1033580, 1080419, 128178, 760817, 1101466, 995443, 863187, 902919, 877845, 1092203, 770167, 1049200, 879747, 912961, 808528, 991241, 935952, 1092176, 916901, 930293, 83458, 1092168, 1073721, 878840, 1092165, 776609, 1092161, 855725, 798945, 1058425, 1092143, 1031999, 1062457, 761388, 846806, 72809, 922389, 948351, 1056950, 1039495, 1003334, 990010, 908316, 889289, 47270, 777519, 1070728, 1092120, 917536, 1057446, 849596, 997533, 805900, 822649, 840445, 996653, 1071270, 931940, 828596, 1092095, 889046, 1092093, 937578, 1014911, 975495, 813953, 828779, 908237, 156723, 887392, 1101448, 926980, 947974, 868487, 911605, 900696, 1007875, 900062, 831784, 259763, 801478, 54307, 783781, 1040461, 1092042, 760908, 838453, 977952, 1052948, 849245, 820899, 1019830, 1101443, 887398, 1042399, 1092029, 788431, 992340, 839488, 1092023, 26079, 845790, 73119, 187818, 224548, 1102330, 1092007, 786937, 907127, 930621, 776517, 847726, 17586, 1021554, 792847, 776700, 907538, 806688, 798883, 786375, 1091983, 1001108, 899212, 1091973, 143849, 942354, 842272, 935707, 1001381, 813899, 874876, 257018, 1043587, 989647, 157580, 155700, 1091941, 848432, 893789, 890890, 119089, 775355, 114573, 880766, 924047, 860573, 102627, 825147, 989099, 1011003, 1075713, 782381, 824542, 960566, 990784, 1076030, 860078, 874299, 810660, 1057488, 1059646, 1091850, 1014115, 794625, 780993, 1059619, 854862, 1091833, 931772, 1066116, 200062, 922024, 974670, 885433, 825583, 962443, 870544, 769630, 800652, 900599, 803237, 846291, 815243, 244011, 1079141, 829425, 936182, 98847, 927989, 189174, 785176, 1065118, 841919, 906126, 775457, 1091765, 1006922, 991419, 1046750, 1048876, 351820, 947466, 287912, 963788, 830551, 766301, 986325, 1091749, 1006987, 771314, 842070, 1052421, 994941, 842596, 837740, 115833, 963564, 982348, 830531, 1062603, 1056482, 1005500, 773858, 1021241, 59084, 1091719, 822585, 765147, 802634, 987657, 1042676, 1091692, 868410, 1091690, 214771, 1091688, 810210, 849142, 1091681, 803861, 874691, 888777, 1048995, 372674, 842221, 1091667, 909273, 1091665, 913286, 761032, 1056850, 1091661, 1091659, 1091654, 898686, 1065032, 938066, 985653, 896383, 1091643, 928567, 837372, 1091633, 824920, 1091630, 824938, 1031118, 873914, 987720, 935364, 1037341, 1044755, 1004493, 800243, 1091595, 859229, 771170, 1091576, 770604, 1091569, 897240, 58234, 891719, 955117, 918324, 1024950, 905479, 196963, 864507, 933946, 964577, 852179, 128633, 842333, 989644, 1014697, 161117, 834848, 1091529, 7968, 1015766, 257335, 1029681, 145569, 1091522, 818842, 1091520, 831302, 1091513, 900164, 1010700, 900077, 245416, 1032011, 908489, 850555, 810270, 948532, 832508, 868184, 813605, 939020, 964152, 911056, 251172, 920885, 995029, 991598, 988149, 1051095, 1049368, 1074499, 1091471, 764139, 970242, 831871, 1091467, 770233, 1091461, 873886, 1047088, 1063709, 955093, 784549, 902855, 1091450, 970824, 940916, 912899, 780297, 991383, 988294, 792977, 998482, 860071, 984930, 77391, 75335, 866428, 1091421, 853646, 1015556, 772129, 47588, 1021446, 932878, 859376, 980789, 1102325, 937947, 143025, 872869, 1080968, 968004, 1091384, 765583, 1042488, 806574, 886382, 921348, 836832, 808235, 882982, 1073980, 853471, 180693, 1091360, 1038871, 960003, 1049221, 1033759, 903097, 1010607, 1091340, 785721, 1091337, 957688, 1040694, 194724, 1009959, 1091330, 788035, 46081, 1058100, 301061, 973917, 1038849, 969264, 953355, 860542, 175625, 802794, 194870, 888911, 1057168, 1101374, 995526, 1016869, 1047592, 298444, 1051990, 985905, 1091269, 830649, 850450, 870875, 357519, 1091264, 1023838, 855031, 958993, 1091255, 795951, 939744, 954711, 200600, 851490, 1063659, 932495, 994582, 1091246, 236708, 1091234, 238886, 804687, 149670, 897401, 812190, 842108, 1045203, 868919, 1041714, 1060305, 968310, 1009668, 1036800, 860462, 1091206, 164946, 769085, 904727, 844128, 1059442, 1091194, 155119, 864905, 1091189, 97964, 798469, 761963, 326410, 1016703, 1059801, 1091177, 970830, 786674, 16860, 1091173, 1091163, 888796, 909506, 1091158, 991938, 905766, 794319, 795540, 829025, 952520, 909176, 872655, 783843, 1003997, 1050670, 1016790, 1080495, 956624, 1017892, 815320, 1013367, 1091115, 1091112, 811852, 1091108, 913137, 1057757, 831380, 929473, 921812, 1079434, 1021679, 822642, 248385, 946747, 1091082, 51054, 909886, 996042, 869891, 879150, 1091068, 883861, 925059, 1058822, 790178, 781877, 1091059, 1081321, 1049867, 993876, 91790, 131925, 855029, 907173, 1058717, 828093, 1091048, 898631, 829050, 59190, 760930, 865660, 979713, 36388, 1068408, 1036999, 984075, 1022762, 918424, 969974, 843140, 768133, 941749, 879155, 798253, 980726, 832188, 1091015, 999897, 878367, 1032074, 783822, 845719, 764691, 904389, 926064, 910375, 70787, 1090987, 792187, 863817, 1070867, 980168, 909221, 203274, 885184, 763084, 994311, 25036, 1101341, 1090965, 84520, 952047, 854785, 863738, 808200, 818612, 935362, 980811, 919712, 1027785, 999641, 846438, 772864, 948829, 989543, 1090924, 888559, 804996, 995654, 993544, 969066, 220495, 1090910, 992729, 787255, 1015055, 938773, 286915, 942915, 1067990, 1090887, 1090886, 917022, 826518, 847415, 874455, 193422, 804523, 1090877, 780850, 958142, 953332, 988960, 885505, 969023, 1053997, 1068924, 1090869, 946825, 993419, 788484, 1090861, 972064, 829087, 1080537, 786477, 1065985, 773924, 1090842, 1090841, 1077000, 1090838, 1090833, 1001981, 865384, 810394, 355458, 311540, 867947, 93311, 858391, 1090808, 917015, 1025895, 885301, 791140, 1071061, 1090796, 822218, 820027, 1090791, 833507, 995789, 1090789, 153739, 156052, 1049767, 1090758, 789997, 792789, 857943, 49943, 1027865, 905604, 931726, 278658, 1090742, 784961, 993501, 818421, 851813, 1090730, 135079, 775297, 1090727, 995212, 888934, 815891, 961048, 818798, 97652, 1078752, 878817, 833268, 1090701, 1090700, 867490, 898318, 323592, 938359, 843409, 875417, 1064155, 996317, 816483, 348994, 823421, 798967, 149767, 240489, 28442, 205741, 240584, 62411, 264284, 138492, 186727, 38608, 144028, 275534, 54235, 135633, 236582, 348594, 58409, 1090624, 358455, 42555, 1090613, 26485, 55848, 113826, 126821, 60339, 375891, 1090596, 184916, 247717, 50833, 168175, 340006, 326190, 99461, 84473, 166625, 38098, 128158, 197542, 108287, 153588, 156688, 1090558, 227992, 75801, 1090550, 326637, 194430, 1090542, 1090540, 1090537, 1090530, 59426, 1101300, 165335, 176065, 166784, 53109, 49234, 1090513, 174344, 370979, 1101298, 166111, 72435, 260172, 59030, 243139, 79763, 156251, 291396, 186265, 169305, 307504, 355484, 137508, 186063, 1101296, 289586, 277737, 249176, 344955, 1090458, 81649, 203039, 56188, 35996, 36703, 44686, 25344, 121017, 72398, 129491, 30039, 148016, 1090413, 57411, 54819, 57258, 199837, 300306, 1090400, 1090399, 149161, 215603, 165135, 59654, 296441, 160312, 149221, 1090377, 100661, 196250, 206762, 283141, 111995, 55682, 1090358, 1090352, 1090350, 81945, 188714, 291248, 205251, 305650, 100250, 1090329, 82842, 288139, 180887, 88831, 171906, 1090311, 290091, 1101282, 326719, 147073, 243761, 162662, 247194, 195199],
'msmarco-doc-test': [355339, 1035339, 943613, 1051868, 876108, 770613, 84901, 928755, 895787, 920435, 1009016, 923221, 1126106, 988661, 975821, 154441, 802079, 184355, 938754, 1008125, 794144, 51577, 1126090, 1073795, 767703, 937603, 876154, 853437, 275951, 773040, 813841, 1135894, 117487, 927093, 1126069, 804764, 832389, 1126064, 808540, 963267, 909560, 334918, 1010426, 955087, 886048, 767404, 898402, 849376, 970577, 7869, 1126035, 889718, 82412, 809339, 68610, 891083, 346202, 835783, 1024923, 973416, 67937, 1019366, 857232, 989398, 787957, 889757, 907576, 1037969, 957990, 1037871, 1073093, 848496, 147746, 960571, 932412, 993677, 997024, 800980, 927899, 1053885, 1018475, 943566, 929863, 1079280, 943913, 839899, 128604, 1061763, 809683, 1036759, 798642, 57068, 977156, 1125956, 913435, 1125954, 60634, 276099, 913041, 993117, 789981, 941780, 1125930, 904849, 1125924, 1024250, 128322, 1125920, 1125919, 845751, 943412, 1056425, 978017, 796563, 53330, 1054994, 794701, 990049, 305251, 1074350, 773878, 1135875, 855288, 952866, 792115, 848431, 996851, 1056576, 1031850, 1035354, 1125864, 271481, 936261, 789305, 1416, 842609, 900690, 793245, 983196, 816456, 851318, 899008, 811001, 903976, 1125841, 902410, 931576, 959564, 955763, 1125829, 1013640, 65752, 828649, 992308, 105367, 1054386, 1125820, 58374, 763534, 957181, 823034, 858790, 28453, 984702, 8234, 871720, 778062, 1079501, 1008523, 188803, 761705, 1006025, 991361, 803911, 1125782, 982127, 812148, 34925, 965627, 1001465, 168906, 1125763, 1070541, 203578, 788201, 1032694, 903643, 822527, 810947, 323665, 909052, 912272, 902935, 1125745, 980205, 880930, 950222, 1011337, 803862, 1046316, 1056710, 816915, 996237, 968847, 1135859, 912165, 1125706, 1074076, 944345, 301180, 966925, 784039, 1125694, 971998, 76945, 1125690, 1125688, 779295, 905638, 875806, 1125684, 54659, 1125680, 1008285, 1135856, 898753, 20530, 1003074, 875793, 341529, 841979, 1056910, 857280, 792006, 23822, 842753, 1043433, 128291, 1125651, 1026391, 869721, 1074859, 997654, 1125645, 972896, 839707, 318781, 1050274, 1019841, 1125633, 855922, 886682, 1033989, 1125628, 275173, 765070, 1125626, 230103, 796808, 280245, 851807, 790280, 1077110, 1019506, 1048410, 1029806, 1125599, 124943, 824765, 906203, 1125596, 198905, 166606, 1125592, 1125591, 1125590, 1010376, 1125587, 965313, 1125583, 803296, 1000865, 1135848, 1019720, 928128, 991278, 991583, 128365, 788278, 863720, 769008, 1018290, 1125559, 760825, 122724, 261830, 1125556, 1125555, 842923, 775366, 900867, 989196, 1030770, 767991, 882803, 1005440, 841671, 943638, 1060768, 864153, 806779, 819755, 799323, 1003747, 253837, 893530, 859732, 921193, 1066463, 1125519, 902014, 939096, 946406, 1125510, 24480, 798003, 1135841, 1125495, 368106, 853522, 204701, 928453, 924975, 854766, 814340, 886243, 1059906, 1125481, 1078827, 989869, 1073638, 837433, 962187, 1036844, 983787, 861435, 18793, 311494, 1125459, 1125455, 1064450, 947416, 809209, 972396, 306216, 1125443, 931357, 790199, 132469, 1044869, 836655, 881324, 885159, 949501, 1058138, 340540, 1076490, 909115, 919673, 1125409, 1125406, 1023476, 971528, 1076183, 840845, 994762, 1125395, 980406, 1125394, 204957, 354123, 775487, 762865, 808343, 788151, 964554, 1067743, 836007, 840770, 825151, 871767, 222861, 299781, 153374, 1125352, 219844, 208394, 261661, 355519, 146170, 1125347, 180979, 220352, 153239, 323874, 175228, 61240, 49429, 121109, 165037, 282280, 336011, 121113, 171824, 116659, 9904, 9975, 282050, 233856, 78181, 1135818, 199508, 57774, 224261, 206738, 228769, 1125306, 21227, 141078, 1125292, 190377, 184333, 297682, 130610, 1125273, 1125272, 68896, 96597, 202664, 128757, 307758, 278239, 1125260, 334263, 303070, 1125251, 145104, 208344, 236824, 1125245, 77398, 84713, 24841, 110736, 1125238, 148515, 319235, 50800, 1125227, 300375, 310642, 281106, 307403, 190070, 161474, 136578, 189365, 20440, 14448, 249267, 28862, 1125194, 60902, 204851, 17077, 336236, 161434, 112638, 282352, 21075, 341207, 156479, 1125153, 329704, 261295, 114048, 213365, 374724, 340145, 122049, 92622, 314064, 247025, 168854, 11304, 233178, 76591, 157744, 31548, 1125111, 150926, 92713, 276665, 150029, 178677, 194563, 237689, 378218, 1125086, 324242, 235280, 105183, 1125079, 1135796, 24280, 1125075, 79457, 88200, 174034, 130306, 139285, 111573, 102366, 198015, 158054, 205433, 91055, 343439, 275968, 254923, 23367, 12166, 1125041, 115254, 28653, 300384, 184640, 219898, 50782, 84257, 1136966, 281922, 319757, 313747, 81842, 37122, 23986, 1125015, 231134, 1125013, 179395, 158569, 176276, 275413, 120398, 55454, 253834, 118372, 1124998, 323294, 305205, 85798, 143889, 230878, 1124990, 1124989, 121025, 49984, 118151, 1124982, 276525, 180091, 307344, 186484, 82578, 246327, 53422, 96443, 255889, 1124958, 87592, 1124957, 144952, 1124953, 144498, 1135780, 52199, 285049, 198444, 302038, 122795, 122298, 235309, 360650, 60301, 59722, 25398, 202245, 1124926, 130951, 1124915, 1124882, 1124872, 1124863, 1124803, 1124767, 1124753, 1124703, 1124699, 1124663, 1124621, 1124573, 1124569, 1124549, 1124542, 1124534, 1136837, 1135738, 1124531, 1124522, 1124504, 1124501, 1124480, 1135727, 1124472, 1124469, 1124462, 1124451, 1135722, 1124391, 1124388, 1124373, 1124369, 1124335, 1124324, 1124306, 1124300, 1124276, 1124251, 1124226, 1124221, 1124198, 1124194, 1124171, 1124170, 1124160, 1124159, 1124122, 1124114, 1124093, 1124090, 1124087, 1124067, 1124059, 1123997, 1136830, 1123971, 1123969, 1123968, 1123953, 1123930, 1123917, 1123915, 1123888, 1123840, 1123837, 1123822, 1123776, 1123765, 1123761, 1123721, 1123709, 1123636, 1123626, 1135625, 1123603, 1123584, 1123544, 1123492, 1123488, 1123469, 1123465, 1135606, 1135605, 1123435, 1123397, 1123383, 1123337, 1123298, 1123211, 1123209, 1123191, 1123168, 1123112, 1123103, 1123090, 1123074, 1123057, 1123055, 1123052, 1135570, 1123034, 1123028, 1135568, 1135563, 1122957, 1122936, 1122915, 1122908, 1122892, 1122859, 1135553, 1122853, 1136818, 1122792, 1122785, 1122776, 1122772, 1122760, 1122745, 1122706, 1122695, 1122690, 1122686, 1122662, 1122658, 1122652, 1122648, 1122643, 1135533, 1122610, 1122606, 1122601, 1122594, 1122593, 1122591, 1122586, 1122584, 1122569, 1135525, 1122504, 1135522, 1122501, 1122488, 1122476, 1122471, 1122446, 1122442, 1122409, 1122352, 1122348, 1122343, 1122342, 1122341, 1122336, 1122334, 1135498, 1122316, 1122306, 1122305, 1122283, 1122271, 1122267, 1122255, 1122247, 1122242, 1122237, 1122234, 1122233, 1122222, 1122220, 1122212, 1122168, 1122155, 1122087, 1122082, 1122064, 1136811, 1121993, 1121967, 1121963, 1121941, 1121931, 1121922, 1135448, 1121892, 1121875, 1121861, 1121860, 1135438, 1121830, 1121817, 1121814, 1121799, 1121794, 1121759, 1121748, 1121673, 1121667, 1121642, 1121641, 1121631, 1121618, 1121576, 1121566, 1121532, 1121523, 1121474, 1121466, 1121459, 1135397, 1135395, 1121426, 1121424, 1121412, 1121380, 1121374, 1121369, 1121333, 1121327, 1121309, 1121268, 1121260, 1121251, 1121249, 1121191, 1121167, 1121162, 1121156, 1121118, 1135362, 1121083, 1121082, 1121068, 1121050, 1121044, 1121022, 1121000, 1120994, 1120986, 1120982, 1120963, 1120945, 1120926, 1120919, 1120904, 1120891, 1120887, 1120867, 1120842, 1120835, 1120834, 1120776, 1120775, 1120773, 1120744, 1120726, 1120706, 1120689, 1120685, 1120678, 1120676, 1120672, 1120668, 1135301, 1120633, 1120621, 1120619, 1120606, 1120599, 1120576, 1120574, 1120564, 1120563, 1120559, 1120541, 1120537, 1120519, 1120515, 1120466, 1120462, 1120453, 1135280, 1120399, 1120395, 1135274, 1120391, 1120375, 1120361, 1120348, 1120328, 1120316, 1120268, 1120261, 1135262, 1120253, 1120248, 1120236, 1120189, 1120187, 1120180, 1120167, 1120089, 1120084, 1120049, 1120041, 1120019, 1120006, 1135238, 1135234, 1119953, 1119943, 1119904, 1119884, 1119872, 1119862, 1119828, 1119764, 1119744, 1119740, 1119695, 1119627, 1119620, 1119603, 1119597, 1119593, 1119531, 1119529, 1119514, 1119501, 1135190, 1119444, 1119384, 1119374, 1119355, 1119347, 1119338, 1119316, 1119307, 1119305, 1119280, 1119271, 1119259, 1119230, 1135165, 1119189, 1119179, 1119169, 1119168, 1119167, 1119132, 1119128, 1119112, 1119110, 1119097, 1119076, 1135150, 1119040, 1119038, 1119021, 1119015, 1119013, 1119006, 1135142, 1118976, 1118974, 1118954, 1118953, 1118941, 1118927, 1118926, 1118921, 1118889, 1118884, 1118879, 1118871, 1118869, 1118868, 1118820, 1118806, 1118799, 1118797, 1118793, 1118792, 1135121, 1118768, 1118759, 1118734, 1118677, 1118676, 1118671, 1118659, 1118651, 1118641, 1118627, 1135106, 1118595, 1118585, 1136771, 1135094, 1118456, 1118455, 1118435, 1118434, 1118429, 1118416, 1118388, 1135081, 1118310, 1118294, 1118293, 1118286, 1118259, 1118230, 1118229, 1118227, 1118226, 1118209, 1118199, 1135052, 1118176, 1118172, 1118169, 1118145, 1118140, 1135042, 1135039, 1118042, 1118014, 1118012, 1135028, 1117935, 1117925, 1117901, 1117875, 1117872, 1117858, 1117826, 1117798, 1117787, 1117771, 1117767, 1117765, 1136763, 1134998, 1117740, 1117725, 1117709, 1117708, 1117700, 1117691, 1117689, 1117672, 1134987, 1117650, 1117623, 1117616, 1117589, 1117584, 1117581, 1117579, 1134978, 1117566, 1117542, 1117505, 1117495, 1134967, 1117451, 1117450, 1117446, 1117405, 1117402, 1117398, 1117394, 1117375, 1117361, 1117357, 1117350, 1117343, 1117337, 1117331, 1117313, 1117307, 1117299, 1117295, 1117294, 1134949, 1117271, 1117263, 1117261, 1134945, 1117235, 1117206, 1117183, 1117182, 1117178, 1117154, 1117150, 1117148, 1134931, 1117066, 1117062, 1117055, 1117033, 1134926, 1116996, 1136756, 1116903, 1116896, 1116877, 1116871, 1116867, 1116864, 1116862, 1116846, 1116845, 1116821, 1116816, 1116776, 1116775, 1116763, 1116728, 1116706, 1116702, 1116695, 1116694, 1116663, 1116657, 1116643, 1116633, 1116612, 1116606, 1116592, 1116554, 1116553, 1116537, 1116531, 1116467, 1116452, 1116433, 1116429, 1116419, 1116402, 1134871, 1116369, 1116368, 1116353, 1116324, 1116304, 1134862, 1116301, 1116273, 1116265, 1116264, 1116260, 1116242, 1116234, 1116228, 1116221, 1134853, 1116211, 1116201, 1134850, 1116180, 1116169, 1116168, 1116164, 1116162, 1116161, 1134846, 1116139, 1116134, 1116121, 1116112, 1116103, 1116096, 1116092, 1116090, 1134839, 1134838, 1116037, 1116028, 1116025, 1116021, 1116019, 1116016, 1116015, 1134835, 1115983, 1115970, 1115961, 1115949, 1115933, 1115929, 1115881, 1115870, 1115819, 1115784, 1115783, 1115760, 1115748, 1115716, 1134807, 1134806, 1115693, 1115677, 1115660, 1115656, 1115651, 1115650, 1115649, 1115617, 1115599, 1115595, 1115586, 1115585, 1115584, 1115539, 1115526, 1115511, 1115485, 1134784, 1115462, 1115432, 1115425, 1115423, 1115388, 1115372, 1115339, 1115334, 1115332, 1115325, 1134769, 1115281, 1115255, 1115248, 1115206, 1115197, 1115191, 1115172, 1134752, 1115154, 1115118, 1115109, 1115106, 1115100, 1115097, 1115086, 1115072, 1115030, 1115021, 1115004, 1114979, 1114974, 1114962, 1114947, 1114905, 1114901, 1114882, 1114870, 1134723, 1114838, 1114828, 1114805, 1114782, 1114757, 1114753, 1114743, 1114739, 1114700, 1114690, 1114669, 1114660, 1114655, 1114654, 1114650, 1114634, 1114589, 1114588, 1114585, 1114584, 1114542, 1114524, 1114510, 1114502, 1114498, 1114495, 1114488, 1114476, 1114471, 1114460, 1134676, 1114428, 1114423, 1114420, 1114402, 1114383, 1114358, 1134666, 1114290, 1114275, 1134656, 1114236, 1114206, 1114200, 1114198, 1114188, 1114185, 1114164, 1114149, 1114131, 1114125, 1114108, 1114099, 1114093, 1114092, 1134639, 1114078, 1114066, 1136728, 1114055, 1114047, 1114044, 1113970, 1113959, 1113944, 1113877, 1113870, 1113861, 1113847, 1113840, 1136726, 1134614, 1113808, 1113802, 1113796, 1113792, 1113767, 1113756, 1113751, 1113724, 1113709, 1113699, 1113683, 1113654, 1113622, 1113608, 1136724, 1113597, 1113545, 1113528, 1113526, 1113520, 1134583, 1113506, 1113498, 1113496, 1113461, 1113439, 1113429, 1113425, 1113416, 1113398, 1134572, 1113381, 1113380, 1113353, 1113352, 1113347, 1113318, 1113307, 1113304, 1113269, 1113258, 1134560, 1134558, 1113231, 1134557, 1113201, 1113175, 1113170, 1113163, 1113158, 1113148, 1113147, 1134552, 1113125, 1113092, 1113090, 1134539, 1113073, 1113041, 1112954, 1112947, 1112944, 1112939, 1112928, 1112908, 1112897, 1112847, 1112838, 1112827, 1112819, 1134500, 1134499, 1112770, 1112709, 1112705, 1112663, 1112658, 1112656, 1112614, 1112606, 1112596, 1112568, 1112514, 1112506, 1112487, 1112486, 1112442, 1112396, 1112390, 1112384, 1112382, 1112375, 1112366, 1112327, 1112324, 1112313, 1134449, 1112302, 1112297, 1112291, 1134444, 1112250, 1112240, 1112234, 1112210, 1134436, 1112203, 1112154, 1112152, 1112141, 1112107, 1112105, 1112100, 1112089, 1112061, 1134422, 1134420, 1112044, 1112018, 1112014, 1111987, 1111969, 1111957, 1111908, 1111902, 1111898, 1111892, 1111890, 1134405, 1111874, 1111813, 1111802, 1111791, 1111790, 1134394, 1111760, 1111710, 1111705, 1111702, 1111678, 1111668, 1111662, 1111650, 1111605, 1111581, 1111580, 1111576, 1111564, 1111504, 1111502, 1111472, 1111470, 1111460, 1111439, 1111417, 1111400, 1111396, 1111392, 1111377, 1111345, 1111338, 1111316, 1111313, 1111306, 1134343, 1111275, 1111241, 1111214, 1111188, 1111156, 1111132, 1111119, 1111099, 1111071, 1111049, 1111030, 1111024, 1111023, 1110997, 1134309, 1110964, 1134306, 1110929, 1110927, 1110905, 1110903, 1110874, 1110868, 1110849, 1110836, 1110794, 1110776, 1110730, 1134281, 1110698, 1134277, 1110643, 1110605, 1134272, 1110576, 1134266, 1110531, 1110512, 1134263, 1110498, 1110470, 1110468, 1110426, 1110423, 1110410, 1110401, 1134251, 1110392, 1110391, 1110357, 1110353, 1110344, 1110337, 1110326, 1110322, 1110321, 1110314, 1110295, 1110284, 1110275, 1110264, 1110246, 1110234, 1110217, 1110215, 1110213, 1110196, 1110190, 1134221, 1110189, 1110163, 1110129, 1134212, 1110081, 1134203, 1110001, 1109974, 1109969, 1109917, 1134188, 1109872, 1109853, 1134184, 1109822, 1109805, 1109794, 1109788, 1109784, 1109768, 1109722, 1109701, 1109694, 1109658, 1109657, 1109628, 1109615, 1109599, 1109579, 1109571, 1134157, 1109546, 1109542, 1109540, 1109537, 1109525, 1109496, 1109487, 1109477, 1109474, 1109473, 1109471, 1109464, 1109463, 1109462, 1134140, 1109436, 1109413, 1109408, 1109407, 1109397, 1109396, 1134135, 1109381, 1109379, 1109365, 1109319, 1109311, 1109288, 1109238, 1109215, 1109201, 1109190, 1109171, 1109110, 1134109, 1109050, 1109048, 1109040, 1109022, 1109002, 1108993, 1108985, 1108975, 1108961, 1108959, 1108953, 1108935, 1108922, 1108914, 1108911, 1108875, 1108874, 1108867, 1108847, 1108821, 1108811, 1108809, 1108799, 1108789, 1136676, 1108763, 1108735, 1108658, 1108645, 1108637, 1108636, 1108632, 1108629, 1108607, 1134057, 1108564, 1108526, 1108523, 1108516, 1108510, 1134049, 1108492, 1108487, 1108481, 1108478, 1108472, 1108462, 1108406, 1108400, 1108332, 1134030, 1108268, 1134028, 1108241, 1108227, 1134024, 1108216, 1108203, 1108199, 1108147, 1108131, 1108121, 1108099, 1108075, 1108071, 1134001, 1108011, 1108009, 1107991, 1107982, 1107970, 1107953, 1107919, 1107915, 1107898, 1107885, 1133988, 1107845, 1133986, 1107834, 1133983, 1107749, 1107748, 1107745, 1107702, 1107677, 1107646, 1107618, 1107602, 1107593, 1107568, 1107563, 1107450, 1107401, 1107399, 1107381, 1107364, 1107344, 1107336, 1107308, 1107299, 1133931, 1107245, 1107210, 1107207, 1107193, 1107192, 1107189, 1107171, 1107141, 1107132, 1107123, 1107117, 1107112, 1107108, 1107092, 1107091, 1107085, 1107057, 1133907, 1107033, 1107015, 1133902, 1106978, 1106920, 1106912, 1106873, 1106858, 1106850, 1106840, 1106834, 1106797, 1106764, 1106756, 1106686, 1106680, 1106676, 1106672, 1106658, 1106652, 1106642, 1106607, 1106589, 1133854, 1106543, 1106539, 1106537, 1106535, 1106533, 1106521, 1106516, 1106508, 1106502, 1106450, 1106421, 1106414, 1106408, 1106389, 1106381, 1106377, 1106348, 1106343, 1106335, 1106306, 1106291, 1106290, 1133827, 1106234, 1106230, 1106216, 1106200, 1106196, 1106159, 1106126, 1106125, 1106099, 1106089, 1133812, 1106079, 1133809, 1106027, 1106011, 1133799, 1133798, 1105989, 1105982, 1105978, 1133792, 1105897, 1105882, 1105853, 1105850, 1105831, 1105817, 1105816, 1133780, 1105805, 1105798, 1105797, 1105766, 1105761, 1105753, 1105700, 1105689, 1105666, 1105625, 1105617, 1105614, 1105594, 1105593, 1105582, 1105571, 1105565, 1133757, 1105526, 1105506, 1105498, 1105489, 1105485, 1105441, 1105432, 1105431, 1105427, 1105422, 1133744, 1105381, 1105364, 1105358, 1105337, 1105298, 1105287, 1105276, 1105275, 1105253, 1105248, 1105239, 1105202, 1105190, 1105169, 1105163, 1133721, 1105148, 1105146, 1105144, 1105142, 1105110, 1105108, 1105100, 1105086, 1105073, 1105046, 1105042, 1133710, 1105021, 1105017, 1105013, 1104984, 1136634, 1104957, 1104950, 1104949, 1104942, 1104915, 1104781, 1104773, 1104763, 1104725, 1104720, 1104712, 1104704, 1104699, 1104698, 1104685, 1104640, 1104633, 1104630, 1104557, 1104513, 1104509, 1104506, 1104497, 1104468, 1104458, 1104454, 1133658, 1104406, 1104403, 1104339, 1133644, 1104279, 1104252, 1104250, 1104235, 1104223, 1104221, 1104198, 1104175, 1104124, 1104118, 1104105, 1104099, 1104087, 1133620, 1104071, 1104064, 1104036, 1104022, 1104020, 1104005, 1133611, 1103987, 1103974, 1103969, 1103966, 1103921, 1103911, 1103910, 1103888, 1103879, 1103828, 1103826, 1103816, 1103798, 1103793, 1103787, 1103776, 1103766, 1103759, 1103690, 1103687, 1103684, 1103679, 1103651, 1103601, 1103579, 1103561, 1103555, 1103553, 1103537, 1103535, 1133558, 1103511, 1133557, 1103468, 1103467, 1103446, 1103416, 1103387, 1103355, 1103322, 1103314, 1103303, 1103290, 1103289, 1133535, 1103260, 1103257, 1133533, 1103250, 1103182, 1103136, 1103121, 1103093, 1103091, 1103089, 1103084, 1103076, 1103019, 1103009, 1102998, 1102989, 1102979, 1102895, 1102892, 1102869, 1102862, 1102854, 1102849, 1102839, 1102827, 1102811, 1102803, 1102768, 1133474, 1102714, 1102704, 1102693, 1102667, 1102617, 1102590, 1102589, 1102579, 1102578, 1102498, 1102488, 1102477, 1102474, 1102456, 1133442, 138157, 2610, 1133431, 149979, 377304, 1133428, 216736, 359286, 62525, 2663, 272951, 306421, 42476, 139000, 201194, 204951, 209797, 1937, 1133376, 285032, 1133374, 236763, 121746, 246076, 176953, 147537, 1133366, 360488, 175123, 160276, 159922, 131617, 318841, 80372, 63246, 146783, 349622, 288566, 1133349, 326787, 65809, 356260, 100154, 132133, 74274, 228670, 134903, 260302, 108391, 103402, 103595, 49439, 242042, 339286, 233529, 40228, 19684, 59230, 262686, 90139, 213353, 32202, 56678, 144051, 1133288, 48170, 137662, 49381, 346023, 180370, 132575, 49810, 24093, 1133258, 131405, 102330, 332859, 1133254, 266752, 252103, 1133252, 92176, 54747, 369873, 78332, 76140, 170498, 236254, 160010, 285797, 1133231, 370985, 201444, 239147, 95448, 48846, 122011, 274306, 209497, 173661, 1133202, 1136591, 132938, 1133194, 295928, 1133190, 212146, 318302, 107701, 1133187, 178610, 21792, 23963, 271748, 291553, 137712, 127150, 152627, 1133171, 187371, 130467, 148898, 103328, 347583, 120219, 345861, 126866, 237441, 162657, 323393, 62845, 54246, 333489, 150595, 180956, 1133122, 119400, 183919, 1136584, 100777, 198536, 142148, 1133113, 134628, 117174, 137468, 125791, 38087, 334251, 1133092, 272071, 57674, 28684, 78497, 129183, 31825, 1425, 284067, 243874, 161766, 278827, 266488, 89633, 94173, 194893, 257783, 354466, 187317, 139175, 67222, 1133057, 236776, 195958, 165977, 347294, 262636, 128874, 1133036, 308617, 331343, 30163, 43707, 30649, 27310, 2045, 330504, 83320, 271835, 315683, 178575, 135634, 200918, 1132996, 34366, 1132991, 33137, 20892, 135821, 131850, 1132977, 207703, 340377, 185119, 83959, 378632, 328464, 32278, 1132965, 1132959, 271881, 1132952, 287159, 275737, 274797, 1132945, 309926, 268574, 289499, 1132925, 248362, 1132921, 100932, 204904, 56894, 91565, 280019, 1132913, 106320, 1132903, 182350, 355540, 132495, 323018, 62577, 1132890, 37185, 1136559, 65052, 205954, 225986, 161346, 159992, 1132847, 239971, 113269, 43476, 134905, 326921, 158752, 68626, 334222, 75286, 1132834, 96857, 225703, 134469, 232008, 140770, 50797, 59725, 172940, 130168, 1132815, 183696, 121488, 245833, 87730, 216731, 224688, 1132796, 68618, 38122, 64384, 1132790, 190054, 191625, 202250, 310488, 222133, 247819, 1136550, 247506, 54958, 229260, 200144, 163053, 95594, 175258, 80718, 219809, 320340, 1132754, 85348, 293041, 72577, 334716, 41184, 186086, 50626, 29725, 1132735, 239245, 1132734, 75799, 124895, 1132717, 261101, 91157, 44340, 67422, 92143, 31192, 276343, 49482, 124291, 227104, 57710, 143012, 74057, 94039, 272269, 299381, 143955, 268235, 261207, 124798, 260080, 242796, 117115, 299709, 165393, 1214, 157565, 1132651, 245620, 55727, 313766, 25026, 57218, 344937, 67147, 313491, 114725, 50189, 70108, 303874, 160801, 1132592, 24041, 88882, 289943, 1132583, 22364, 112541, 226572, 200782, 1132564, 63290, 156302, 63795, 184833, 156548, 268010, 227516, 90708, 1132549, 208493, 82973, 147064, 56808, 1132529, 357336, 160313, 278403, 176124, 188784, 37547, 30359, 347113, 293516, 233185, 64430, 127812, 364094, 12761, 72904, 271435, 277093, 166683, 167974, 160574, 301352, 207754, 181479, 117965, 169257, 1132444, 259070, 135347, 153663, 296378, 60870, 184223, 23531, 27528, 98675, 125929, 1132409, 326797, 225419, 359463, 112864, 259437, 340815, 71908, 88808, 1132399, 43167, 42055, 187763, 115594, 95449, 40124, 196233, 202384, 81184, 213758, 138933, 1132360, 343640, 293069, 289801, 273773, 176499, 333375, 1132352, 1132347, 309040, 297010, 295730, 320086, 166508, 1132312, 148209, 13101, 25465, 129457, 109587, 277868, 199407, 36299, 13912, 294614, 160309, 65904, 59911, 200228, 267705, 191894, 76102, 174722, 71138, 321703, 11863, 64535, 123648, 1132255, 188166, 92670, 22836, 137674, 62078, 83401, 373795, 76154, 166325, 274555, 36951, 164940, 266150, 153592, 200695, 239250, 207572, 181305, 303706, 97834, 195677, 201381, 153123, 159867, 189529, 174157, 203720, 320700, 39325, 1136491, 134127, 256066, 39660, 335114, 209769, 1132162, 312826, 455425, 538026, 67225, 162696, 1132147, 689120, 508870, 394040, 404889, 490071, 495618, 549190, 49387, 673689, 735360, 208702, 487934, 131247, 563898, 513591, 502104, 752441, 150443, 485594, 586761, 57, 4776, 214625, 1136482, 493900, 609252, 1132087, 732756, 654897, 179955, 461950, 660426, 685004, 734466, 368900, 156776, 628564, 344155, 473182, 386934, 692151, 706780, 482382, 666238, 720868, 146574, 162946, 728829, 466657, 580605, 129695, 1132047, 398127, 473495, 675920, 506768, 415155, 736117, 525069, 466400, 671117, 653909, 643328, 383847, 517763, 713301, 220290, 181644, 389541, 1132006, 699218, 537744, 745784, 369105, 723486, 1131983, 642800, 452336, 756790, 734758, 334219, 255251, 702598, 450543, 613422, 626761, 89634, 596136, 167533, 727181, 429906, 608124, 713357, 573701, 655939, 684502, 538143, 662108, 265494, 120593, 722220, 445502, 597686, 544220, 537188, 718364, 653077, 624210, 710887, 691798, 169584, 705174, 1131909, 209531, 452924, 451826, 647597, 463021, 709056, 490982, 643870, 144842, 466338, 1131892, 91778, 203783, 756681, 433786, 756829, 366342, 728735, 1131884, 538878, 508254, 575461, 504751, 229045, 628808, 108500, 703554, 599137, 671829, 720949, 696201, 554585, 389908, 745278, 1131840, 464930, 674566, 700756, 486173, 1131830, 21838, 353333, 1131821, 731129, 1131818, 680613, 1131813, 730149, 556637, 430755, 727943, 20616, 599673, 455256, 548054, 539566, 501894, 585888, 443964, 644356, 611442, 521254, 713278, 1136443, 547374, 741173, 1131777, 606672, 483253, 588308, 725726, 244472, 238804, 490000, 478255, 708144, 236269, 650476, 1131754, 465990, 647887, 435864, 714849, 660983, 415438, 543638, 1131738, 1131735, 534684, 733956, 643361, 463180, 671027, 1131724, 618695, 396098, 190164, 84797, 477552, 1131713, 399730, 1131703, 1131699, 642647, 192397, 370635, 466536, 549327, 658273, 522054, 707645, 386213, 463137, 562352, 440144, 169166, 595808, 508476, 450426, 588762, 680190, 636417, 618349, 629913, 422890, 277177, 642252, 447697, 515005, 503164, 259924, 584017, 558978, 694851, 672262, 78640, 537267, 581447, 1131613, 636093, 417895, 1131609, 180593, 739636, 625256, 731251, 188053, 449539, 475394, 738829, 419729, 1131593, 121843, 4383, 582098, 489238, 722550, 704182, 431159, 473020, 615000, 65692, 535699, 533613, 589875, 635379, 1131559, 1136425, 1131557, 464864, 746963, 563962, 1131554, 409031, 743777, 582128, 496927, 617223, 1131543, 677292, 540318, 243941, 747597, 578356, 1131533, 576964, 734678, 707689, 501442, 561499, 692815, 100940, 1131512, 473028, 1131510, 497632, 1131507, 383831, 566937, 290290, 49850, 637080, 614540, 689461, 747285, 614620, 694270, 633073, 391077, 603796, 676426, 1131478, 415661, 402595, 515273, 1131467, 737266, 526352, 460855, 485891, 726929, 96565, 582339, 1131446, 586754, 505992, 251774, 588712, 677133, 455371, 710756, 342150, 604332, 679482, 17199, 451643, 755461, 259312, 1131415, 1131411, 725828, 564310, 608244, 327855, 675569, 706373, 724657, 283924, 702722, 220398, 598348, 150347, 1131396, 413054, 516429, 730062, 710329, 479687, 1131383, 737913, 469566, 591326, 429876, 405298, 21744, 551851, 507424, 630264, 499666, 561538, 316436, 623112, 494786, 515775, 641164, 1131343, 574337, 614338, 562030, 455513, 562904, 753220, 562697, 738248, 230808, 1131320, 484350, 198610, 718133, 685661, 402991, 607582, 500775, 134875, 747566, 429205, 707853, 1131307, 1136401, 1131301, 521329, 392124, 393611, 1131295, 622262, 527745, 227967, 399503, 441204, 1131278, 580227, 592329, 693494, 618024, 664540, 697374, 1131260, 1136397, 43548, 632020, 750167, 410387, 187330, 685998, 743021, 613923, 446160, 528174, 1131240, 707577, 567878, 240102, 619013, 458674, 1131227, 1131222, 412136, 515123, 571215, 429182, 1131216, 674956, 1131209, 438344, 403520, 632935, 112928, 651821, 696173, 1131192, 674691, 633998, 1131182, 565366, 533428, 273443, 1131173, 464548, 681173, 451150, 737112, 389501, 579601, 387851, 593455, 534021, 452761, 643223, 456807, 566216, 83666, 1131155, 454824, 634650, 242583, 488073, 47269, 522358, 430258, 711802, 455957, 538373, 577234, 714881, 535009, 716082, 521367, 486431, 490752, 1131104, 442307, 709522, 694739, 703736, 1131092, 692238, 708094, 405684, 1131075, 736703, 632755, 144050, 463660, 534279, 493020, 692201, 723457, 527398, 557952, 1131049, 1131048, 699817, 241937, 1131046, 390313, 540655, 494111, 457426, 479475, 709726, 701898, 430704, 613827, 754589, 688208, 352420, 508510, 150505, 418883, 595641, 153809, 427372, 1131013, 589844, 1131008, 517135, 1131005, 1131004, 540951, 661717, 671630, 28661, 1130996, 406838, 629420, 1130994, 719438, 1130988, 671720, 702952, 644204, 563542, 437910, 716995, 673666, 604500, 575378, 573622, 484886, 676791, 515531, 708693, 370750, 644678, 1130937, 753089, 724623, 219723, 1130935, 590560, 1130933, 571087, 515785, 555353, 684994, 603617, 503963, 397564, 226012, 262542, 405974, 603714, 177610, 389739, 682567, 1130897, 674513, 567881, 531311, 544191, 455561, 426367, 380561, 245295, 663156, 431674, 493829, 129347, 686392, 718295, 74000, 514264, 678466, 503613, 127437, 739166, 583772, 722441, 188318, 719371, 626218, 258390, 717111, 1130849, 418725, 595910, 513838, 674571, 1130837, 533312, 29811, 466878, 459503, 623603, 1130830, 694106, 593611, 569902, 725715, 668648, 402832, 530079, 534250, 327062, 1136350, 204088, 457951, 416646, 511466, 576357, 72015, 1130806, 568405, 1130805, 742695, 538393, 412073, 637960, 598934, 561834, 747004, 200289, 562002, 524438, 682859, 360822, 394208, 1130772, 78501, 628136, 445026, 689657, 486716, 576292, 682902, 435130, 721273, 563938, 446290, 128543, 699279, 529769, 721661, 717849, 626517, 475408, 493845, 587923, 639288, 1130732, 674504, 599504, 1130728, 543290, 1130726, 444598, 439766, 621550, 417717, 552319, 706900, 468021, 412410, 455359, 161162, 414393, 1130684, 527633, 720395, 576305, 484467, 619408, 515064, 452385, 686422, 567899, 1130672, 715765, 663755, 709494, 55079, 727291, 669130, 1130667, 428479, 706985, 570789, 262878, 616705, 83712, 612831, 319218, 619655, 396122, 522212, 729058, 745317, 438891, 1130640, 1130635, 740366, 452155, 734746, 409853, 624662, 403035, 593489, 420872, 436325, 557806, 1130608, 488887, 596699, 344368, 416672, 614567, 554792, 575621, 456383, 547426, 547711, 521791, 672626, 148977, 514360, 25802, 456029, 510645, 506278, 642473, 73619, 552459, 1130575, 433683, 391829, 642144, 509654, 408986, 755381, 1130558, 546404, 561461, 665022, 1130548, 493826, 406237, 1130536, 680480, 1130524, 662687, 731691, 748843, 1130511, 414899, 491017, 569409, 560815, 412865, 712140, 388319, 491200, 297146, 695196, 499188, 115952, 524469, 14244, 130858, 593986, 489013, 541229, 29667, 126582, 458638, 497813, 418389, 123525, 675245, 592333, 640857, 1130449, 464240, 502221, 409003, 700345, 695697, 661986, 1130435, 578560, 1130431, 382254, 586049, 554435, 461052, 410169, 502453, 554031, 159535, 695737, 426684, 473062, 646245, 402135, 573471, 557944, 691188, 523197, 481998, 94642, 135936, 397550, 511215, 234583, 643749, 490936, 499385, 1130383, 644600, 1130378, 625691, 675823, 232508, 512536, 642301, 574114, 717873, 235560, 485567, 739828, 416823, 567870, 521342, 271620, 1130345, 636141, 30677, 444485, 621989, 1130340, 558003, 1130335, 594086, 405780, 1130327, 323085, 487275, 747961, 59330, 399595, 510004, 1130312, 696381, 435223, 517928, 1130307, 684324, 729023, 179219, 638898, 1130296, 585585, 706401, 326176, 730832, 546217, 531470, 514029, 599538, 1130277, 663182, 453350, 358771, 413075, 197375, 542015, 179066, 323565, 439146, 749326, 1130240, 50863, 1130232, 556923, 70752, 537610, 571009, 750029, 616483, 639157, 760171, 552670, 392181, 579092, 497356, 744538, 1130193, 498576, 413335, 444318, 558963, 644392, 584499, 474468, 61526, 246118, 46683, 712006, 406640, 494285, 587145, 1130156, 216656, 695643, 647777, 529272, 591718, 151408, 148503, 663916, 429843, 526266, 696148, 408157, 234277, 645349, 402799, 1130123, 703298, 464465, 6113, 727572, 495159, 68164, 596533, 1130115, 648174, 529043, 278691, 354076, 415184, 396312, 493439, 617033, 703240, 427221, 631852, 147550, 144138, 1130095, 479495, 501645, 79438, 412750, 30282, 1130084, 375206, 407457, 582138, 581229, 649335, 498021, 728112, 656101, 403914, 633411, 110375, 159767, 557592, 622238, 630123, 513303, 739171, 631665, 1130023, 749976, 1130015, 1130013, 611400, 600638, 1130006, 486312, 538718, 323959, 699896, 466456, 696519, 489166, 509832, 683410, 443766, 582390, 558809, 242115, 437191, 1129981, 416692, 427311, 520202, 278034, 727154, 670360, 1129973, 620882, 705905, 549722, 709492, 744835, 1129959, 659901, 473029, 668190, 480320, 671862, 396967, 566618, 629337, 444688, 497659, 693353, 662982, 655097, 715661, 430114, 428503, 596837, 394148, 506577, 1129896, 418615, 677304, 547770, 1129888, 6055, 407007, 1129886, 724767, 634856, 581844, 1129878, 596613, 721708, 169442, 749813, 664034, 611049, 547771, 729755, 626823, 397579, 62517, 500646, 721882, 490612, 284910, 594476, 241399, 585220, 394980, 732794, 1129841, 474961, 1129838, 745872, 582183, 410391, 1129835, 414970, 389506, 412503, 559629, 511806, 504057, 601649, 12553, 1129822, 418501, 535288, 407383, 743489, 500680, 393238, 712468, 399701, 736364, 536101, 467932, 738788, 469356, 193676, 668999, 400681, 516185, 532352, 691956, 177167, 424318, 422938, 423807, 572976, 530295, 369311, 448042, 680004, 536822, 748434, 451642, 1129769, 177775, 614001, 477793, 513836, 408908, 482580, 463421, 108813, 466454, 540572, 436582, 601783, 470680, 670004, 497483, 410329, 484646, 1129733, 221987, 1129731, 535067, 516941, 725044, 656422, 1136245, 555674, 700590, 667607, 334662, 703134, 412395, 143721, 565173, 236793, 1129700, 691711, 641617, 567820, 651708, 637695, 664605, 625633, 658427, 62521, 61519, 636208, 593007, 722260, 89360, 488839, 325057, 684536, 553161, 600524, 411822, 516087, 542582, 1129650, 636814, 495608, 577746, 553571, 1129642, 662028, 431988, 650966, 500574, 230013, 1129630, 515217, 725969, 737654, 714335, 678724, 606791, 427033, 466865, 466968, 500832, 674396, 661623, 693880, 462794, 181329, 751571, 645149, 570875, 439879, 1136233, 460506, 514421, 602653, 544978, 594835, 686436, 111077, 534202, 631789, 583527, 706624, 675316, 404221, 549110, 388265, 549237, 633137, 716717, 661990, 132345, 600959, 726834, 642841, 249364, 645818, 752488, 705551, 368124, 458832, 418832, 1129518, 486839, 61075, 1129514, 463679, 680225, 739996, 1129508, 512264, 595689, 686229, 435526, 1129499, 734836, 237465, 617192, 89149, 596659, 620810, 188273, 547627, 548020, 402132, 426403, 204520, 508162, 536931, 745373, 624304, 1129483, 477474, 249814, 663878, 621190, 383220, 255025, 432162, 564558, 617318, 450640, 547661, 426435, 726971, 569053, 718490, 412407, 659825, 490358, 723295, 1136215, 1129452, 521026, 1129448, 448305, 733585, 1129444, 637187, 661763, 754567, 715756, 706501, 579403, 400491, 1129436, 581075, 416561, 1129433, 479358, 434008, 658865, 1136212, 630092, 524266, 533379, 35150, 552364, 430536, 1129422, 488571, 759514, 754609, 409736, 680230, 688646, 465156, 269425, 571432, 553530, 632671, 469121, 552458, 610414, 724606, 759101, 465920, 1129357, 1129356, 407302, 538850, 425694, 425438, 648925, 439929, 497477, 423254, 129008, 530174, 511841, 625685, 397803, 1129315, 756934, 603085, 645080, 512985, 552563, 628657, 388981, 18075, 321226, 758918, 213726, 453857, 567505, 598973, 684899, 412357, 643181, 617733, 92974, 686139, 657594, 653662, 571431, 510514, 626866, 591784, 595422, 594352, 613576, 1136198, 702508, 731662, 199831, 590445, 679625, 412182, 677485, 395326, 519027, 1129241, 723897, 599724, 442810, 1129232, 701050, 606110, 60900, 721942, 460002, 689019, 518796, 81017, 687671, 628109, 457609, 717538, 590726, 535449, 418157, 497919, 390770, 681047, 456674, 653450, 642760, 1129167, 541708, 738451, 599178, 36033, 452422, 703281, 733738, 687792, 202310, 752388, 432012, 1129145, 149491, 535668, 582756, 612251, 541429, 642328, 588848, 662167, 742808, 558303, 718489, 750114, 499364, 653517, 1129103, 400435, 533677, 648975, 1136890, 415479, 632456, 154904, 659458, 1136183, 580040, 1129085, 1129084, 521869, 516532, 691028, 755688, 726379, 713127, 432503, 593386, 1129067, 437986, 458616, 688815, 410946, 612158, 98415, 630845, 583249, 697861, 1129055, 611027, 1136180, 156707, 722996, 469976, 386091, 1129042, 396974, 747939, 675811, 172986, 749215, 468437, 173834, 264602, 566732, 643303, 556581, 664751, 57292, 461190, 461659, 124607, 89100, 734136, 92742, 635081, 14571, 518785, 196450, 675430, 613222, 1128995, 581535, 412, 598286, 403328, 653560, 474875, 602263, 416161, 1128981, 589171, 1128976, 742238, 404407, 750926, 544890, 511401, 448000, 608870, 637004, 1128958, 1128956, 503949, 208417, 1128954, 333182, 1128949, 419783, 591543, 637698, 402300, 1128939, 668300, 566407, 553988, 486939, 1128927, 537223, 176781, 748579, 1128917, 532494, 472232, 1128914, 647637, 1128903, 1128900, 79203, 271544, 96933, 1128888, 545791, 748018, 68360, 383029, 395660, 493218, 423863, 518578, 669798, 72298, 424280, 655607, 1128872, 561064, 1128860, 378866, 569527, 462111, 702919, 596645, 626166, 252887, 730156, 389192, 584757, 611785, 588563, 696261, 535610, 730541, 1128840, 399884, 389808, 470372, 294538, 411600, 487606, 428836, 609922, 64259, 1128821, 1128818, 427505, 495243, 576792, 631266, 202726, 582484, 395665, 693097, 714863, 567976, 392882, 450290, 357777, 583824, 573221, 335374, 525433, 604884, 622467, 524369, 609071, 613079, 622187, 569862, 269960, 187574, 460663, 1128760, 418801, 604102, 1128752, 142656, 69841, 572432, 581303, 744230, 637909, 1128738, 450498, 640885, 505647, 1136152, 1128726, 154164, 493723, 477176, 327812, 651679, 453305, 697773, 121431, 415021, 400908, 581377, 236105, 735502, 459153, 1128691, 1128689, 322610, 646814, 449717, 680854, 250927, 541155, 1128681, 197174, 403824, 512569, 526597, 1128676, 738859, 569047, 1128668, 97873, 549663, 596007, 678053, 688140, 653413, 620231, 197875, 734529, 416738, 509003, 164798, 567024, 413801, 366118, 520273, 557046, 424856, 566615, 122690, 572446, 1128624, 543631, 1128620, 634302, 610923, 755062, 608491, 561885, 575606, 555772, 1128567, 480646, 706291, 472957, 564382, 44537, 1128562, 740168, 274605, 653579, 407816, 634055, 651969, 280704, 1128549, 659297, 447912, 631766, 408905, 173181, 522821, 624426, 1128539, 407575, 409057, 409708, 644637, 395821, 21003, 413172, 404156, 1128524, 412817, 440973, 576361, 626536, 345102, 655332, 1128511, 552956, 1128508, 708745, 409157, 1128494, 1128492, 22256, 406582, 1128484, 156181, 700430, 481345, 690869, 759007, 587999, 677421, 514241, 408686, 561991, 572170, 604126, 741514, 1128450, 610645, 725355, 751255, 636603, 1128432, 417905, 1057246, 812967, 1128427, 771994, 337952, 64882, 779025, 1128417, 818265, 771555, 845740, 320499, 820021, 946751, 783583, 1013322, 975809, 70472, 1037917, 968788, 1022698, 373224, 875528, 1128385, 1128380, 964482, 245463, 873986, 1029871, 1032729, 1037279, 149975, 1128346, 881533, 795754, 901007, 970549, 1128337, 959256, 1068313, 289517, 89877, 836062, 777839, 961525, 1078498, 242603, 986960, 1128319, 985840, 1030163, 985900, 857963, 797562, 1012478, 1128297, 1037088, 796223, 977852, 832897, 1128291, 1128287, 936422, 881142, 876885, 918446, 769310, 165237, 1128280, 795757, 947066, 840136, 1128276, 95167, 875518, 868055, 893658, 1136113, 854924, 1128264, 995898, 892353, 1045190, 1053031, 327873, 1011812, 838440, 132007, 1041905, 248407, 878415, 1018792, 874523, 774158, 1008502, 997671, 1076615, 227230, 1020019, 827488, 810830, 165287, 866615, 899741, 1000993, 937626, 1057030, 1136108, 1057241, 1014933, 1008191, 1055448, 22817, 809594, 136476, 899268, 933594, 999612, 1019246, 1128198, 887883, 947119, 917298, 860972, 1050241, 968631, 229107, 812644, 1034759, 857520, 929366, 982144, 843768, 42541, 229677, 1128166, 835940, 1128164, 1128160, 984952, 1032680, 894139, 926094, 992867, 936255, 1074449, 186071, 90368, 72476, 802667, 795872, 859387, 188445, 820387, 1033642, 1128119, 761430, 236359, 800142, 1005146, 998479, 1034491, 800703, 1049419, 167873, 772409, 1046115, 828068, 1054959, 805998, 978459, 1128087, 980023, 1128080, 916768, 1044449, 925041, 962160, 1128071, 191143, 1056313, 826354, 781539, 304380, 203790, 1128064, 797200, 22372, 803115, 957479, 768114, 991332, 897222, 809570, 295077, 786761, 243076, 796050, 1064020, 1128040, 999644, 239143, 1013556, 1128028, 1032719, 956231, 887242, 879329, 907807, 1020489, 967278, 893358, 1078015, 861865, 81693, 784092, 205959, 768939, 65305, 1127990, 1057936, 1040752, 935962, 107205, 825453, 949738, 952722, 924092, 1012829, 943014, 258617, 774027, 912931, 1127969, 893657, 829295, 1064659, 1127959, 772836, 932294, 71557, 1037376, 802776, 1127938, 856861, 1036776, 1016732, 1127932, 815939, 1003514, 1031609, 161027, 254740, 1024432, 903268, 1127914, 1127912, 136726, 1058856, 1043229, 121484, 1013679, 859955, 987306, 864853, 893756, 1028701, 1127897, 1127896, 845232, 839841, 899014, 916107, 1063739, 1010069, 842886, 767589, 1064344, 905050, 811245, 880110, 1054797, 1070930, 252441, 841870, 763443, 849720, 1026733, 996146, 883176, 937486, 791397, 158817, 999522, 8718, 895721, 1034305, 824384, 877676, 1127822, 322709, 907310, 1127810, 24636, 833302, 811447, 858395, 1127802, 978096, 331648, 1034015, 27705, 56067, 148159, 925169, 834934, 1046166, 1078080, 771730, 1055227, 871301, 1030994, 67359, 958846, 1033381, 885018, 1127754, 892490, 788960, 1127753, 891987, 1127752, 174305, 78352, 1127741, 230824, 919556, 174039, 1127730, 112175, 885095, 71390, 839912, 990480, 901678, 1127718, 838709, 890075, 917100, 314262, 951320, 1034261, 1037159, 941866, 1001968, 673041, 869918, 978121, 1127697, 1127695, 20734, 779540, 830234, 960998, 916453, 931401, 972092, 854884, 233900, 226741, 253406, 1071255, 856393, 1066186, 171431, 809913, 1127674, 870172, 1127668, 859431, 763641, 1003630, 1032978, 767490, 766142, 1072559, 811974, 1036830, 252314, 821619, 168786, 770534, 67379, 844464, 8452, 954096, 815308, 1127621, 856978, 844433, 1010270, 761225, 803948, 867262, 968186, 988542, 1059674, 1063049, 792925, 1064595, 143062, 159842, 987894, 853267, 150207, 1127588, 994761, 975140, 352949, 1070417, 865909, 86290, 804916, 1127570, 1127567, 980996, 1127562, 1127561, 1065678, 1068306, 1127554, 935643, 883929, 1040730, 1127547, 946839, 941232, 968238, 836003, 860643, 1048716, 1073975, 847831, 870184, 797815, 890953, 920289, 925119, 979571, 841302, 8136, 1031152, 976102, 118484, 865206, 765659, 770356, 72956, 1136042, 793699, 864818, 932299, 949516, 1061852, 1068587, 981837, 827381, 859101, 979007, 902790, 871016, 779201, 918750, 1127498, 279176, 838421, 885663, 60764, 1127493, 1061326, 1127489, 1067801, 791513, 20010, 865218, 874196, 939698, 934193, 1127468, 790118, 923070, 1005149, 1021302, 1002690, 952938, 1059177, 1068025, 1127462, 864864, 911889, 1025104, 24462, 828518, 1127448, 1061590, 998480, 1127444, 54818, 903790, 988595, 180298, 894466, 1127434, 993838, 1127425, 1051206, 1015668, 764766, 890643, 919771, 1127411, 797565, 908897, 795460, 880092, 907635, 979086, 914186, 187585, 1040212, 1127398, 1013228, 192284, 991798, 916050, 907301, 872978, 1136028, 1080183, 831024, 789014, 1127378, 150873, 1127364, 925292, 1054749, 766379, 129435, 858696, 798239, 960142, 1127351, 1005502, 919760, 804755, 813370, 1049877, 919707, 1070083, 995599, 772833, 888413, 1127325, 947430, 843139, 761907, 1055761, 264434, 814908, 929714, 761014, 775430, 932859, 824644, 37706, 842042, 115718, 864762, 762072, 101478, 903661, 854655, 72228, 875937, 937168, 1050808, 869292, 783335, 976771, 761313, 979890, 868913, 981240, 1015949, 1037830, 1018202, 982019, 35370, 816794, 1008453, 982696, 129700, 838235, 1127195, 865638, 917436, 881246, 762434, 806450, 1030502, 1127188, 1127184, 65383, 904565, 1136013, 967706, 1053723, 1023767, 31169, 1043151, 976293, 1127177, 1014055, 297058, 1049092, 1127162, 834856, 858461, 1025444, 968552, 902969, 844140, 982810, 77565, 881067, 340541, 319123, 781808, 1136008, 977828, 266611, 899428, 800348, 1043976, 1127110, 316262, 295361, 791170, 995778, 1040530, 357297, 56962, 982229, 1072522, 1025072, 168655, 1127084, 1127081, 884436, 989526, 864694, 187675, 799293, 292227, 892584, 1127044, 1030954, 1011348, 942728, 861724, 966614, 966679, 18164, 1071370, 976827, 1127025, 226190, 782125, 937753, 971564, 955228, 955028, 1052076, 1009291, 960734, 952306, 1126994, 1017930, 896746, 1126981, 145391, 1049202, 902586, 1126971, 878401, 1049390, 1126963, 855667, 985913, 994564, 913285, 1126958, 888689, 891634, 1013904, 916214, 975875, 1126948, 981828, 1126945, 1014189, 283348, 1126935, 881738, 1046757, 1126932, 996236, 1126931, 1080843, 1126925, 1081591, 1075711, 860145, 830462, 1126914, 240792, 1126910, 266390, 986693, 1001492, 73882, 1126880, 941093, 979044, 810631, 1126875, 1042543, 1007481, 87762, 1065739, 292284, 980185, 796451, 909549, 812006, 304449, 116653, 905706, 788034, 1002287, 1126817, 982967, 1126815, 896446, 870582, 1126807, 1019607, 1060142, 917813, 95381, 1073430, 1041628, 197487, 903975, 1049177, 233881, 1005869, 1052835, 877076, 1068315, 1077039, 999829, 1060669, 931621, 1126761, 974727, 1135966, 869887, 888762, 1126750, 1051205, 1035340, 819279, 1126742, 136473, 1126736, 858242, 776547, 1126711, 800274, 852966, 1051326, 803633, 984476, 12954, 911480, 855901, 981948, 929693, 761941, 1078365, 770648, 767499, 253693, 203321, 836888, 1126691, 278684, 931678, 1078766, 12048, 1126673, 899891, 123919, 904461, 798979, 798354, 1126658, 1065534, 873607, 1126654, 825961, 1126651, 955453, 331284, 92008, 908665, 1030949, 316302, 777578, 1033997, 1027534, 956670, 897892, 999910, 1000893, 1046684, 1126609, 821676, 793432, 325310, 1126587, 1076289, 935870, 980263, 799784, 858159, 892768, 856417, 992595, 795622, 1065206, 276928, 1053061, 833797, 881070, 1126545, 772968, 949129, 41048, 1049736, 1126533, 855243, 773025, 1016486, 1126531, 992162, 830306, 963943, 990969, 761883, 952768, 865754, 826731, 833544, 794347, 1126517, 853533, 811650, 1049180, 158469, 1126507, 303585, 271672, 1126499, 910699, 1135935, 1126491, 1002572, 939521, 1136859, 1135933, 122010, 927216, 868111, 250673, 823415, 1004948, 993055, 814791, 1078745, 966542, 915433, 8356, 1037981, 836498, 803596, 931027, 837140, 1065923, 1126425, 1061433, 852842, 1126416, 1126414, 802817, 910246, 1126403, 772885, 968995, 789140, 905574, 1070533, 25179, 869486, 867346, 930833, 981606, 833432, 367519, 1126380, 51514, 1126377, 1126374, 1081455, 971331, 903073, 921267, 8008, 995529, 879076, 1004774, 1126361, 908077, 845321, 960803, 981207, 883184, 935437, 917606, 65416, 906238, 232919, 963471, 858085, 888024, 1009408, 1056764, 9082, 131573, 1126331, 1081930, 762035, 811758, 975774, 810958, 1126317, 887806, 976678, 849869, 304430, 893117, 831794, 783751, 67545, 88116, 834181, 836636, 1032658, 932447, 767549, 959723, 866276, 881767, 1041743, 1035658, 1043138, 1016027, 232609, 958435, 998270, 1043346, 1126267, 845725, 840782, 807974, 1028772, 11258, 876934, 831217, 870157, 906391, 1052164, 831882, 1068326, 299461, 781689, 1126245, 1126244, 972647, 983987, 968667, 783989, 119263, 847301, 856149, 272863, 939453, 1050794, 954363, 1126223, 996876, 814920, 1126215, 974485, 908101, 922237, 961305, 1126209, 992904, 971415, 786171, 1022554, 896672, 807223, 899876, 964608, 792143, 59381, 1126178, 13823, 904780, 1041217, 842703, 1126146, 952165, 332824, 9129, 1049881, 877556, 1108939, 1112389, 792752, 1119729, 1105095, 1105103, 1128373, 1127622, 1124979, 885490, 1119827, 190044, 500575, 883785, 264403, 1108100, 421756, 1108307, 966413, 1111546, 156493, 1124145, 1110199, 1056204, 199143, 835929, 1063750, 1104031, 398483, 432930, 478605, 1044797, 1124464, 1107988, 130510, 1127893, 1135377, 1126206, 645693, 1133328, 646207, 1012021, 489204, 1119075, 573724, 600573, 1120447, 574575, 1055865, 494835, 1126814, 168216, 100983, 194013, 1119092, 1133167, 1133418, 427578, 324211, 11096, 1134787, 89928, 499920, 527433, 40578, 694342, 1125225, 1136427, 1128856, 719381, 53175, 131651, 1037798, 915593, 264014, 1121402, 962179, 1117099, 744366, 277780, 1114563, 1014126, 1117346, 148538, 451602, 474735, 359349, 903469, 1115776, 1104492, 315637, 1112341, 588587, 706080, 117831, 1120868, 1111906, 523270, 133358, 67262, 1121166, 805321, 1129828, 131843, 104861, 833860, 207786, 691330, 1103528, 1132213, 335594, 1134138, 138632, 671071, 705609, 1114819, 855434, 1134463, 747511, 502261, 183378, 654723, 1117387, 479871, 541571, 1106007, 60235, 180442, 710347, 1124210, 287683, 490595, 291865, 794725, 1103812, 436600, 1047259, 964223, 564054, 87181, 1116052, 554515, 443396, 1123581, 714453, 972007, 929033, 433234, 1121709, 88073, 87452, 1005165, 1133249, 953067, 101169, 855410, 1121276, 1114646, 19335, 789700, 47923, 301524, 405717, 165633, 952774, 766511, 1106293, 452431, 1109818, 1047902, 306076, 551040, 1059231, 182539, 1115569, 351697, 904965, 292906, 662372, 364142, 20455, 1119058, 203318, 1126813, 240053, 1115392, 1113437, 1122461, 1116341, 1129237, 912070, 278813, 423273, 507445, 25129, 146187, 634428, 1121986, 321441, 532603, 1030303, 1037496, 1043135, 1045109, 1049519, 1051399, 1056416, 1064670, 1065636, 1071750, 1103153, 1103791, 1104501, 1105792, 1105860, 1106928, 1106979, 1107315, 1107440, 1108450, 1108466, 1108473, 1108651, 1108729, 1109699, 1109707, 1109850, 1110678, 1112142, 1113042, 1113256, 1114166, 1114286, 1114993, 1115210, 1116380, 1117817, 1117886, 1118370, 1118426, 1119118, 1119543, 1120588, 1121353, 1121879, 1122138, 1122767, 1122843, 1123657, 1124552, 1125632, 1125755, 1126523, 1126738, 1127004, 1127233, 1127540, 1128456, 1129081, 1130705, 1130734, 1130847, 1131069, 1132044, 1132247, 1132532, 1132842, 1132943, 1132950, 1133485, 1133579, 1134094, 1134207, 1134431, 1134680, 1134939, 1134988, 1135268, 1135283, 1135413, 1135626, 1136043, 1136047, 1136769, 1136962, 118440, 119821, 121171, 125659, 135802, 141630, 144862, 156498, 166046, 169208, 174463, 175920, 177604, 181626, 197312, 206106, 227873, 23849, 240158, 245052, 246883, 253749, 256942, 257119, 258062, 26703, 273695, 302846, 318362, 324585, 330501, 330975, 332593, 336901, 3505, 360721, 384356, 390360, 405163, 42255, 425632, 426175, 42752, 435548, 436707, 444389, 449367, 452915, 463271, 469589, 47210, 482726, 48792, 50122, 514096, 519025, 53233, 537060, 537817, 543273, 545355, 555530, 583468, 586148, 590019, 605127, 610265, 611953, 640502, 64647, 653399, 655526, 655914, 660198, 67316, 673670, 701453, 703782, 708979, 716113, 730539, 735482, 735922, 75198, 768208, 779302, 792635, 794223, 794429, 801118, 804066, 808400, 809525, 814183, 819983, 849550, 85020, 850358, 86606, 877809, 883915, 88495, 911232, 914916, 91576, 918162, 938400, 940547, 945835, 978031, 985594, 99005, 997622, 999466, 132622],
diff --git a/pyserini/search/__init__.py b/pyserini/search/__init__.py
--- a/pyserini/search/__init__.py
+++ b/pyserini/search/__init__.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/pyserini/search/__main__.py b/pyserini/search/__main__.py
--- a/pyserini/search/__main__.py
+++ b/pyserini/search/__main__.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -16,16 +16,16 @@
import argparse
import os
-from typing import Tuple, List, TextIO
+
+from tqdm import tqdm
from transformers import AutoTokenizer
-from pyserini.pyclass import autoclass
from pyserini.analysis import JDefaultEnglishAnalyzer, JWhiteSpaceAnalyzer
+from pyserini.output_writer import OutputFormat, get_output_writer
+from pyserini.pyclass import autoclass
+from pyserini.query_iterator import get_query_iterator, TopicsFormat
from pyserini.search import SimpleSearcher, JDisjunctionMaxQueryGenerator
from pyserini.search.reranker import ClassifierType, PseudoRelevanceClassifierReranker
-from pyserini.query_iterator import get_query_iterator, TopicsFormat
-from pyserini.output_writer import OutputFormat, get_output_writer
-from tqdm import tqdm
def set_bm25_parameters(searcher, index, k1=None, b=None):
diff --git a/pyserini/search/_base.py b/pyserini/search/_base.py
--- a/pyserini/search/_base.py
+++ b/pyserini/search/_base.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -19,11 +19,11 @@
class, which wraps the Java class with the same name in Anserini.
"""
-import os
import logging
+import os
-from ..pyclass import autoclass, JPaths
from pyserini.util import get_cache_home
+from ..pyclass import autoclass
logger = logging.getLogger(__name__)
diff --git a/pyserini/search/_nearest_neighbor.py b/pyserini/search/_nearest_neighbor.py
--- a/pyserini/search/_nearest_neighbor.py
+++ b/pyserini/search/_nearest_neighbor.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/pyserini/search/_searcher.py b/pyserini/search/_searcher.py
--- a/pyserini/search/_searcher.py
+++ b/pyserini/search/_searcher.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/pyserini/search/querybuilder.py b/pyserini/search/querybuilder.py
--- a/pyserini/search/querybuilder.py
+++ b/pyserini/search/querybuilder.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/pyserini/search/reranker.py b/pyserini/search/reranker.py
--- a/pyserini/search/reranker.py
+++ b/pyserini/search/reranker.py
@@ -1,4 +1,5 @@
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+#
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -11,6 +12,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+#
import enum
import importlib
diff --git a/pyserini/setup.py b/pyserini/setup.py
--- a/pyserini/setup.py
+++ b/pyserini/setup.py
@@ -1,4 +1,5 @@
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+#
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -11,6 +12,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+#
"""
Module for adding Anserini jar to classpath for pyjnius usage
diff --git a/pyserini/tokenize_json_collection.py b/pyserini/tokenize_json_collection.py
--- a/pyserini/tokenize_json_collection.py
+++ b/pyserini/tokenize_json_collection.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,11 +14,12 @@
# limitations under the License.
#
-from transformers import BertTokenizer, T5Tokenizer
import argparse
import json
import os
+from transformers import BertTokenizer, T5Tokenizer
+
def write_to_file(tokenizer, input, output):
with open(input, encoding='utf-8') as f:
diff --git a/pyserini/trectools/__init__.py b/pyserini/trectools/__init__.py
--- a/pyserini/trectools/__init__.py
+++ b/pyserini/trectools/__init__.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/pyserini/trectools/_base.py b/pyserini/trectools/_base.py
--- a/pyserini/trectools/_base.py
+++ b/pyserini/trectools/_base.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -21,7 +21,7 @@
from concurrent.futures import ThreadPoolExecutor
from copy import deepcopy
from enum import Enum
-from typing import Dict, List, Set, Tuple
+from typing import List, Set, Tuple
class AggregationMethod(Enum):
diff --git a/pyserini/util.py b/pyserini/util.py
--- a/pyserini/util.py
+++ b/pyserini/util.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -15,17 +15,19 @@
#
import hashlib
-import re
import os
+import re
import shutil
import tarfile
-from tqdm import tqdm
-from urllib.request import urlretrieve
from urllib.error import HTTPError, URLError
+from urllib.request import urlretrieve
+
import pandas as pd
-from pyserini.prebuilt_index_info import INDEX_INFO, DINDEX_INFO
+from tqdm import tqdm
+
from pyserini.encoded_query_info import QUERY_INFO
from pyserini.evaluate_script_info import EVALUATION_INFO
+from pyserini.prebuilt_index_info import INDEX_INFO, DINDEX_INFO
# https://gist.github.com/leimao/37ff6e990b3226c2c9670a2cd1e4a6f5
diff --git a/pyserini/vectorizer/__init__.py b/pyserini/vectorizer/__init__.py
--- a/pyserini/vectorizer/__init__.py
+++ b/pyserini/vectorizer/__init__.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/pyserini/vectorizer/_base.py b/pyserini/vectorizer/_base.py
--- a/pyserini/vectorizer/_base.py
+++ b/pyserini/vectorizer/_base.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/scripts/20newsgroups-replication.py b/scripts/20newsgroups-replication.py
--- a/scripts/20newsgroups-replication.py
+++ b/scripts/20newsgroups-replication.py
@@ -1,4 +1,5 @@
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+#
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -11,6 +12,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+#
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
diff --git a/scripts/ance/encode_corpus_msmarco_doc.py b/scripts/ance/encode_corpus_msmarco_doc.py
--- a/scripts/ance/encode_corpus_msmarco_doc.py
+++ b/scripts/ance/encode_corpus_msmarco_doc.py
@@ -1,3 +1,19 @@
+#
+# Pyserini: Reproducible IR research with sparse and dense representations
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
import argparse
import json
import os
diff --git a/scripts/ance/encode_corpus_msmarco_passage.py b/scripts/ance/encode_corpus_msmarco_passage.py
--- a/scripts/ance/encode_corpus_msmarco_passage.py
+++ b/scripts/ance/encode_corpus_msmarco_passage.py
@@ -1,3 +1,19 @@
+#
+# Pyserini: Reproducible IR research with sparse and dense representations
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
import argparse
import json
import os
diff --git a/scripts/ance/encode_corpus_wiki.py b/scripts/ance/encode_corpus_wiki.py
--- a/scripts/ance/encode_corpus_wiki.py
+++ b/scripts/ance/encode_corpus_wiki.py
@@ -1,3 +1,19 @@
+#
+# Pyserini: Reproducible IR research with sparse and dense representations
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
import argparse
import json
import os
diff --git a/scripts/ance/encode_queries_msmarco_passage.py b/scripts/ance/encode_queries_msmarco_passage.py
--- a/scripts/ance/encode_queries_msmarco_passage.py
+++ b/scripts/ance/encode_queries_msmarco_passage.py
@@ -1,3 +1,19 @@
+#
+# Pyserini: Reproducible IR research with sparse and dense representations
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
import argparse
import pandas as pd
from tqdm import tqdm
@@ -10,6 +26,7 @@
from pyserini.dsearch import AnceQueryEncoder
+
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--encoder', type=str, help='encoder name or path', required=True)
diff --git a/scripts/check_download_links.py b/scripts/check_download_links.py
--- a/scripts/check_download_links.py
+++ b/scripts/check_download_links.py
@@ -1,4 +1,5 @@
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+#
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -11,6 +12,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+#
# Starting point for writing this script
# https://stackoverflow.com/questions/13129618/histogram-values-of-a-pandas-series
diff --git a/scripts/classifier_prf/cross_validate.py b/scripts/classifier_prf/cross_validate.py
--- a/scripts/classifier_prf/cross_validate.py
+++ b/scripts/classifier_prf/cross_validate.py
@@ -1,4 +1,5 @@
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+#
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -11,6 +12,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+#
import argparse
import json
diff --git a/scripts/classifier_prf/rank_trec_covid.py b/scripts/classifier_prf/rank_trec_covid.py
--- a/scripts/classifier_prf/rank_trec_covid.py
+++ b/scripts/classifier_prf/rank_trec_covid.py
@@ -1,3 +1,19 @@
+#
+# Pyserini: Reproducible IR research with sparse and dense representations
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
import argparse
import os
import json
diff --git a/scripts/compute_doc_length_histogram.py b/scripts/compute_doc_length_histogram.py
--- a/scripts/compute_doc_length_histogram.py
+++ b/scripts/compute_doc_length_histogram.py
@@ -1,4 +1,5 @@
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+#
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -11,6 +12,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+#
# Starting point for writing this script
# https://stackoverflow.com/questions/13129618/histogram-values-of-a-pandas-series
diff --git a/scripts/cord19/extract_citation_graph.py b/scripts/cord19/extract_citation_graph.py
--- a/scripts/cord19/extract_citation_graph.py
+++ b/scripts/cord19/extract_citation_graph.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,6 +12,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+#
import argparse
import os
diff --git a/scripts/cord19/extract_df.py b/scripts/cord19/extract_df.py
--- a/scripts/cord19/extract_df.py
+++ b/scripts/cord19/extract_df.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,6 +12,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+#
import argparse
import pandas as pd
diff --git a/scripts/cord19/find_cord19_length_outlier.py b/scripts/cord19/find_cord19_length_outlier.py
--- a/scripts/cord19/find_cord19_length_outlier.py
+++ b/scripts/cord19/find_cord19_length_outlier.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,6 +12,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+#
import sys
import argparse
diff --git a/scripts/cord19/temporal_analysis.py b/scripts/cord19/temporal_analysis.py
--- a/scripts/cord19/temporal_analysis.py
+++ b/scripts/cord19/temporal_analysis.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,6 +12,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+#
import argparse
import pandas as pd
diff --git a/scripts/distilbert_kd/encode_corpus_msmarco_passage.py b/scripts/distilbert_kd/encode_corpus_msmarco_passage.py
--- a/scripts/distilbert_kd/encode_corpus_msmarco_passage.py
+++ b/scripts/distilbert_kd/encode_corpus_msmarco_passage.py
@@ -1,8 +1,25 @@
+#
+# Pyserini: Reproducible IR research with sparse and dense representations
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
import argparse
import json
import os
-import numpy as np
+
import faiss
+import numpy as np
from tqdm import tqdm
from transformers import AutoTokenizer, AutoModel
diff --git a/scripts/dpr/convert_qas_csv_to_topic_json.py b/scripts/dpr/convert_qas_csv_to_topic_json.py
--- a/scripts/dpr/convert_qas_csv_to_topic_json.py
+++ b/scripts/dpr/convert_qas_csv_to_topic_json.py
@@ -1,5 +1,21 @@
-import csv
+#
+# Pyserini: Reproducible IR research with sparse and dense representations
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
import argparse
+import csv
import json
diff --git a/scripts/dpr/encode_queries.py b/scripts/dpr/encode_queries.py
--- a/scripts/dpr/encode_queries.py
+++ b/scripts/dpr/encode_queries.py
@@ -1,6 +1,23 @@
+#
+# Pyserini: Reproducible IR research with sparse and dense representations
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
import argparse
import csv
import json
+
import pandas as pd
from tqdm import tqdm
from transformers import DPRQuestionEncoder, DPRQuestionEncoderTokenizer
diff --git a/scripts/dpr/interpolate_runs.py b/scripts/dpr/interpolate_runs.py
--- a/scripts/dpr/interpolate_runs.py
+++ b/scripts/dpr/interpolate_runs.py
@@ -1,6 +1,23 @@
-import os
-import json
+#
+# Pyserini: Reproducible IR research with sparse and dense representations
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
import argparse
+import json
+import os
+
import numpy as np
from tqdm import tqdm
diff --git a/scripts/encode_queries.py b/scripts/encode_queries.py
--- a/scripts/encode_queries.py
+++ b/scripts/encode_queries.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/scripts/entity_linking.py b/scripts/entity_linking.py
--- a/scripts/entity_linking.py
+++ b/scripts/entity_linking.py
@@ -1,3 +1,19 @@
+#
+# Pyserini: Reproducible IR research with sparse and dense representations
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
import argparse
import jsonlines
import spacy
diff --git a/scripts/kilt/anserini_retriever.py b/scripts/kilt/anserini_retriever.py
--- a/scripts/kilt/anserini_retriever.py
+++ b/scripts/kilt/anserini_retriever.py
@@ -1,3 +1,19 @@
+#
+# Pyserini: Reproducible IR research with sparse and dense representations
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
import multiprocessing
from multiprocessing.pool import ThreadPool
diff --git a/scripts/kilt/convert_kilt_100w_passage_tsv_to_jsonl.py b/scripts/kilt/convert_kilt_100w_passage_tsv_to_jsonl.py
--- a/scripts/kilt/convert_kilt_100w_passage_tsv_to_jsonl.py
+++ b/scripts/kilt/convert_kilt_100w_passage_tsv_to_jsonl.py
@@ -1,3 +1,19 @@
+#
+# Pyserini: Reproducible IR research with sparse and dense representations
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
import json
import argparse
import pickle
diff --git a/scripts/kilt/convert_kilt_dpr_to_pyserini_format.py b/scripts/kilt/convert_kilt_dpr_to_pyserini_format.py
--- a/scripts/kilt/convert_kilt_dpr_to_pyserini_format.py
+++ b/scripts/kilt/convert_kilt_dpr_to_pyserini_format.py
@@ -1,3 +1,19 @@
+#
+# Pyserini: Reproducible IR research with sparse and dense representations
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
import argparse
import pickle
import csv
diff --git a/scripts/kilt/convert_kilt_to_document_jsonl.py b/scripts/kilt/convert_kilt_to_document_jsonl.py
--- a/scripts/kilt/convert_kilt_to_document_jsonl.py
+++ b/scripts/kilt/convert_kilt_to_document_jsonl.py
@@ -1,3 +1,19 @@
+#
+# Pyserini: Reproducible IR research with sparse and dense representations
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
import json
import argparse
import string
diff --git a/scripts/kilt/convert_kilt_to_passage_jsonl.py b/scripts/kilt/convert_kilt_to_passage_jsonl.py
--- a/scripts/kilt/convert_kilt_to_passage_jsonl.py
+++ b/scripts/kilt/convert_kilt_to_passage_jsonl.py
@@ -1,3 +1,19 @@
+#
+# Pyserini: Reproducible IR research with sparse and dense representations
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
import json
import argparse
from tqdm import tqdm
diff --git a/scripts/kilt/encode_kilt_topics.py b/scripts/kilt/encode_kilt_topics.py
--- a/scripts/kilt/encode_kilt_topics.py
+++ b/scripts/kilt/encode_kilt_topics.py
@@ -1,10 +1,26 @@
+#
+# Pyserini: Reproducible IR research with sparse and dense representations
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
import argparse
-import pandas as pd
+import pandas as pd
from tqdm import tqdm
-from pyserini.query_iterator import get_query_iterator, TopicsFormat
-from pyserini.dsearch import DprQueryEncoder
+from pyserini.dsearch import DprQueryEncoder
+from pyserini.query_iterator import get_query_iterator, TopicsFormat
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Compute embeddings for KILT topics')
diff --git a/scripts/kilt/run_drqa_retrieval.py b/scripts/kilt/run_drqa_retrieval.py
--- a/scripts/kilt/run_drqa_retrieval.py
+++ b/scripts/kilt/run_drqa_retrieval.py
@@ -1,3 +1,19 @@
+#
+# Pyserini: Reproducible IR research with sparse and dense representations
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
import json
import argparse
diff --git a/scripts/kilt/run_retrieval.py b/scripts/kilt/run_retrieval.py
--- a/scripts/kilt/run_retrieval.py
+++ b/scripts/kilt/run_retrieval.py
@@ -1,3 +1,19 @@
+#
+# Pyserini: Reproducible IR research with sparse and dense representations
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
import json
import argparse
diff --git a/scripts/ltr_msmarco-passage/append_d2q_to_collection_jsonl.py b/scripts/ltr_msmarco-passage/append_d2q_to_collection_jsonl.py
--- a/scripts/ltr_msmarco-passage/append_d2q_to_collection_jsonl.py
+++ b/scripts/ltr_msmarco-passage/append_d2q_to_collection_jsonl.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -12,6 +12,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
+#
import argparse
import json
diff --git a/scripts/ltr_msmarco-passage/convert_collection_to_jsonl.py b/scripts/ltr_msmarco-passage/convert_collection_to_jsonl.py
--- a/scripts/ltr_msmarco-passage/convert_collection_to_jsonl.py
+++ b/scripts/ltr_msmarco-passage/convert_collection_to_jsonl.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/scripts/ltr_msmarco-passage/convert_common.py b/scripts/ltr_msmarco-passage/convert_common.py
--- a/scripts/ltr_msmarco-passage/convert_common.py
+++ b/scripts/ltr_msmarco-passage/convert_common.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,6 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
+
import re
import spacy
"""
diff --git a/scripts/ltr_msmarco-passage/convert_passage.py b/scripts/ltr_msmarco-passage/convert_passage.py
--- a/scripts/ltr_msmarco-passage/convert_passage.py
+++ b/scripts/ltr_msmarco-passage/convert_passage.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/scripts/ltr_msmarco-passage/convert_queries.py b/scripts/ltr_msmarco-passage/convert_queries.py
--- a/scripts/ltr_msmarco-passage/convert_queries.py
+++ b/scripts/ltr_msmarco-passage/convert_queries.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,7 +13,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-# Convert MSMARCO queries
+
+"""Convert MSMARCO queries"""
import sys
diff --git a/scripts/ltr_msmarco-passage/rerank_with_ltr_model.py b/scripts/ltr_msmarco-passage/rerank_with_ltr_model.py
--- a/scripts/ltr_msmarco-passage/rerank_with_ltr_model.py
+++ b/scripts/ltr_msmarco-passage/rerank_with_ltr_model.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/scripts/ltr_msmarco-passage/train_ltr_model.py b/scripts/ltr_msmarco-passage/train_ltr_model.py
--- a/scripts/ltr_msmarco-passage/train_ltr_model.py
+++ b/scripts/ltr_msmarco-passage/train_ltr_model.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,6 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
+
import datetime
import glob
import hashlib
diff --git a/scripts/msmarco-passage/build_hnsw_index.py b/scripts/msmarco-passage/build_hnsw_index.py
--- a/scripts/msmarco-passage/build_hnsw_index.py
+++ b/scripts/msmarco-passage/build_hnsw_index.py
@@ -1,3 +1,19 @@
+#
+# Pyserini: Reproducible IR research with sparse and dense representations
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
import argparse
import os
diff --git a/scripts/msmarco-passage/encode_corpus.py b/scripts/msmarco-passage/encode_corpus.py
--- a/scripts/msmarco-passage/encode_corpus.py
+++ b/scripts/msmarco-passage/encode_corpus.py
@@ -1,3 +1,19 @@
+#
+# Pyserini: Reproducible IR research with sparse and dense representations
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
import argparse
import json
import os
diff --git a/scripts/msmarco-passage/encode_queries.py b/scripts/msmarco-passage/encode_queries.py
--- a/scripts/msmarco-passage/encode_queries.py
+++ b/scripts/msmarco-passage/encode_queries.py
@@ -1,3 +1,19 @@
+#
+# Pyserini: Reproducible IR research with sparse and dense representations
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
import argparse
import numpy as np
diff --git a/scripts/msmarco-passage/interpolate_runs.py b/scripts/msmarco-passage/interpolate_runs.py
--- a/scripts/msmarco-passage/interpolate_runs.py
+++ b/scripts/msmarco-passage/interpolate_runs.py
@@ -1,3 +1,19 @@
+#
+# Pyserini: Reproducible IR research with sparse and dense representations
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
import argparse
import os
import numpy as np
diff --git a/scripts/msmarco-passage/lookup_docs_from_qrels.py b/scripts/msmarco-passage/lookup_docs_from_qrels.py
--- a/scripts/msmarco-passage/lookup_docs_from_qrels.py
+++ b/scripts/msmarco-passage/lookup_docs_from_qrels.py
@@ -1,3 +1,19 @@
+#
+# Pyserini: Reproducible IR research with sparse and dense representations
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
import argparse
import json
import sys
diff --git a/scripts/sbert/encode_corpus_msmarco_passage.py b/scripts/sbert/encode_corpus_msmarco_passage.py
--- a/scripts/sbert/encode_corpus_msmarco_passage.py
+++ b/scripts/sbert/encode_corpus_msmarco_passage.py
@@ -1,3 +1,19 @@
+#
+# Pyserini: Reproducible IR research with sparse and dense representations
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
import argparse
import json
import os
diff --git a/scripts/trec-covid-ranker.py b/scripts/trec-covid-ranker.py
--- a/scripts/trec-covid-ranker.py
+++ b/scripts/trec-covid-ranker.py
@@ -1,3 +1,19 @@
+#
+# Pyserini: Reproducible IR research with sparse and dense representations
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
import argparse
import os
import json
| diff --git a/integrations/clprf/test_clprf.py b/integrations/clprf/test_clprf.py
--- a/integrations/clprf/test_clprf.py
+++ b/integrations/clprf/test_clprf.py
@@ -1,5 +1,5 @@
#
-# Pyserini: python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -17,10 +17,10 @@
import os
import shutil
import unittest
-
from random import randint
-from integrations.utils import run_command, parse_score
+
from integrations.simplesearcher_score_checker import SimpleSearcherScoreChecker
+from integrations.utils import run_command, parse_score
class TestSearchIntegration(unittest.TestCase):
diff --git a/integrations/clprf/test_trec_covid_r3.py b/integrations/clprf/test_trec_covid_r3.py
--- a/integrations/clprf/test_trec_covid_r3.py
+++ b/integrations/clprf/test_trec_covid_r3.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,14 +13,14 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-import hashlib
+
+import json
import os
import re
import shutil
import unittest
-import json
-import gzip
from random import randint
+
from pyserini.util import download_url, download_prebuilt_index
diff --git a/integrations/clprf/test_trec_covid_r4.py b/integrations/clprf/test_trec_covid_r4.py
--- a/integrations/clprf/test_trec_covid_r4.py
+++ b/integrations/clprf/test_trec_covid_r4.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,14 +13,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-import hashlib
+
+import gzip
+import json
import os
import re
import shutil
import unittest
-import json
-import gzip
from random import randint
+
from pyserini.util import download_url, download_prebuilt_index
diff --git a/integrations/dense/test_ance.py b/integrations/dense/test_ance.py
--- a/integrations/dense/test_ance.py
+++ b/integrations/dense/test_ance.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -19,9 +19,11 @@
import os
import socket
import unittest
+
from integrations.utils import clean_files, run_command, parse_score
-from pyserini.search import get_topics
from pyserini.dsearch import QueryEncoder
+from pyserini.search import get_topics
+
class TestSearchIntegration(unittest.TestCase):
def setUp(self):
diff --git a/integrations/dense/test_distilbert_kd.py b/integrations/dense/test_distilbert_kd.py
--- a/integrations/dense/test_distilbert_kd.py
+++ b/integrations/dense/test_distilbert_kd.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -19,9 +19,10 @@
import os
import socket
import unittest
+
from integrations.utils import clean_files, run_command, parse_score
-from pyserini.search import get_topics
from pyserini.dsearch import QueryEncoder
+from pyserini.search import get_topics
class TestSearchIntegration(unittest.TestCase):
diff --git a/integrations/dense/test_dpr.py b/integrations/dense/test_dpr.py
--- a/integrations/dense/test_dpr.py
+++ b/integrations/dense/test_dpr.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -19,9 +19,10 @@
import os
import socket
import unittest
+
from integrations.utils import clean_files, run_command, parse_score
-from pyserini.search import get_topics
from pyserini.dsearch import QueryEncoder
+from pyserini.search import get_topics
class TestSearchIntegration(unittest.TestCase):
diff --git a/integrations/dense/test_kilt.py b/integrations/dense/test_kilt.py
--- a/integrations/dense/test_kilt.py
+++ b/integrations/dense/test_kilt.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -17,12 +17,11 @@
"""Integration tests for KILT integration."""
import os
+import re
import socket
import unittest
-import re
+
from integrations.utils import clean_files, run_command
-from pyserini.search import get_topics
-from pyserini.dsearch import QueryEncoder
def parse_kilt_score(output, metric, digits=4):
diff --git a/integrations/dense/test_sbert.py b/integrations/dense/test_sbert.py
--- a/integrations/dense/test_sbert.py
+++ b/integrations/dense/test_sbert.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -19,9 +19,10 @@
import os
import socket
import unittest
+
from integrations.utils import clean_files, run_command, parse_score
-from pyserini.search import get_topics
from pyserini.dsearch import QueryEncoder
+from pyserini.search import get_topics
class TestSearchIntegration(unittest.TestCase):
diff --git a/integrations/dense/test_tct_colbert.py b/integrations/dense/test_tct_colbert.py
--- a/integrations/dense/test_tct_colbert.py
+++ b/integrations/dense/test_tct_colbert.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -19,9 +19,10 @@
import os
import socket
import unittest
+
from integrations.utils import clean_files, run_command, parse_score
-from pyserini.search import get_topics
from pyserini.dsearch import QueryEncoder
+from pyserini.search import get_topics
class TestSearchIntegration(unittest.TestCase):
diff --git a/integrations/sparse/test_ltr_msmarco_passage.py b/integrations/sparse/test_ltr_msmarco_passage.py
--- a/integrations/sparse/test_ltr_msmarco_passage.py
+++ b/integrations/sparse/test_ltr_msmarco_passage.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/integrations/sparse/test_search_pretokenized.py b/integrations/sparse/test_search_pretokenized.py
--- a/integrations/sparse/test_search_pretokenized.py
+++ b/integrations/sparse/test_search_pretokenized.py
@@ -1,5 +1,5 @@
#
-# Pyserini: python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/integrations/sparse/test_simple_fusion_search_integration.py b/integrations/sparse/test_simple_fusion_search_integration.py
--- a/integrations/sparse/test_simple_fusion_search_integration.py
+++ b/integrations/sparse/test_simple_fusion_search_integration.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/integrations/sparse/test_simplesearcher_check_core17.py b/integrations/sparse/test_simplesearcher_check_core17.py
--- a/integrations/sparse/test_simplesearcher_check_core17.py
+++ b/integrations/sparse/test_simplesearcher_check_core17.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/integrations/sparse/test_simplesearcher_check_core18.py b/integrations/sparse/test_simplesearcher_check_core18.py
--- a/integrations/sparse/test_simplesearcher_check_core18.py
+++ b/integrations/sparse/test_simplesearcher_check_core18.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/integrations/sparse/test_simplesearcher_check_robust04.py b/integrations/sparse/test_simplesearcher_check_robust04.py
--- a/integrations/sparse/test_simplesearcher_check_robust04.py
+++ b/integrations/sparse/test_simplesearcher_check_robust04.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/integrations/sparse/test_simplesearcher_check_robust05.py b/integrations/sparse/test_simplesearcher_check_robust05.py
--- a/integrations/sparse/test_simplesearcher_check_robust05.py
+++ b/integrations/sparse/test_simplesearcher_check_robust05.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/integrations/sparse/test_simplesearcher_multithread.py b/integrations/sparse/test_simplesearcher_multithread.py
--- a/integrations/sparse/test_simplesearcher_multithread.py
+++ b/integrations/sparse/test_simplesearcher_multithread.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/integrations/sparse/test_trec_covid_stored_runs.py b/integrations/sparse/test_trec_covid_stored_runs.py
--- a/integrations/sparse/test_trec_covid_stored_runs.py
+++ b/integrations/sparse/test_trec_covid_stored_runs.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/integrations/sparse/test_verify_rrf.py b/integrations/sparse/test_verify_rrf.py
--- a/integrations/sparse/test_verify_rrf.py
+++ b/integrations/sparse/test_verify_rrf.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/tests/test_analysis.py b/tests/test_analysis.py
--- a/tests/test_analysis.py
+++ b/tests/test_analysis.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/tests/test_collection.py b/tests/test_collection.py
--- a/tests/test_collection.py
+++ b/tests/test_collection.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/tests/test_encoded_queries.py b/tests/test_encoded_queries.py
--- a/tests/test_encoded_queries.py
+++ b/tests/test_encoded_queries.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,11 +14,12 @@
# limitations under the License.
#
-import unittest
import os
import shutil
-from pyserini.search import get_topics
+import unittest
+
from pyserini.dsearch import QueryEncoder
+from pyserini.search import get_topics
class TestEncodedQueries(unittest.TestCase):
diff --git a/tests/test_fusion.py b/tests/test_fusion.py
--- a/tests/test_fusion.py
+++ b/tests/test_fusion.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/tests/test_index_download.py b/tests/test_index_download.py
--- a/tests/test_index_download.py
+++ b/tests/test_index_download.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,9 +14,10 @@
# limitations under the License.
#
-import unittest
import os
import shutil
+import unittest
+
from pyserini.search import SimpleSearcher
diff --git a/tests/test_index_reader.py b/tests/test_index_reader.py
--- a/tests/test_index_reader.py
+++ b/tests/test_index_reader.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -25,7 +25,6 @@
from sklearn.naive_bayes import MultinomialNB
from pyserini import analysis, index, search
-from pyserini.pyclass import JString
from pyserini.vectorizer import BM25Vectorizer, TfidfVectorizer
diff --git a/tests/test_load_qrels.py b/tests/test_load_qrels.py
--- a/tests/test_load_qrels.py
+++ b/tests/test_load_qrels.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -13,6 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
+
import os
import shutil
import unittest
diff --git a/tests/test_load_topics_qrels.py b/tests/test_load_topics_qrels.py
--- a/tests/test_load_topics_qrels.py
+++ b/tests/test_load_topics_qrels.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/tests/test_nnsearch.py b/tests/test_nnsearch.py
--- a/tests/test_nnsearch.py
+++ b/tests/test_nnsearch.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/tests/test_querybuilder.py b/tests/test_querybuilder.py
--- a/tests/test_querybuilder.py
+++ b/tests/test_querybuilder.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/tests/test_search.py b/tests/test_search.py
--- a/tests/test_search.py
+++ b/tests/test_search.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/tests/test_tokenization.py b/tests/test_tokenization.py
--- a/tests/test_tokenization.py
+++ b/tests/test_tokenization.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,9 +14,10 @@
# limitations under the License.
#
-from transformers import BertTokenizer, T5Tokenizer, AutoTokenizer
import unittest
+from transformers import BertTokenizer, T5Tokenizer, AutoTokenizer
+
class TestTokenization(unittest.TestCase):
def setUp(self):
diff --git a/tests/test_tokenize_json.py b/tests/test_tokenize_json.py
--- a/tests/test_tokenize_json.py
+++ b/tests/test_tokenize_json.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,10 +14,9 @@
# limitations under the License.
#
-import unittest
-import pyserini.tokenize_json_collection as tok_json
import json
import os
+import unittest
from shutil import rmtree
diff --git a/tests/test_topics_order.py b/tests/test_topics_order.py
--- a/tests/test_topics_order.py
+++ b/tests/test_topics_order.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -15,8 +15,6 @@
#
import unittest
-import os
-import shutil
from pyserini.query_iterator import DefaultQueryIterator
diff --git a/tests/test_trectools.py b/tests/test_trectools.py
--- a/tests/test_trectools.py
+++ b/tests/test_trectools.py
@@ -1,5 +1,5 @@
#
-# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
+# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -16,9 +16,10 @@
import filecmp
import os
-from pyserini.trectools import TrecRun, Qrels, RescoreMethod
import unittest
+from pyserini.trectools import TrecRun, Qrels, RescoreMethod
+
class TestTrecTools(unittest.TestCase):
def setUp(self):
| Tweak tagline in boilerplate
Currently, we have in our (source code) boilerplate:
```
#
# Pyserini: Python interface to the Anserini IR toolkit built on Lucene
#
```
This was back when Pyserini was just a wrapper around Anserini... but it's evolved into something way more...
The "tagline" we converged on in the Pyserini SIGIR resource paper was: "Pyserini is a Python toolkit for reproducible information retrieval research with sparse and dense representations."
This is too long for the boilerplate, and line wrapping looks odd.
The best I could come up with is:
```
#
# Pyserini: Reproducible IR research with sparse and dense representations
#
```
I figured that "Python" was kinda redundant since the boilerplate is on source code...
Thoughts? @MXueguang @jacklin64 @justram @rodrigonogueira4 @ronakice ?
| I think it makes sense. | 2021-05-26T01:02:23Z | [] | [] |
castorini/pyserini | 852 | castorini__pyserini-852 | [
"745"
] | 0c1838e830ae52d050d907f851ac3a26d99bafc3 | diff --git a/pyserini/index/__main__.py b/pyserini/index/__main__.py
--- a/pyserini/index/__main__.py
+++ b/pyserini/index/__main__.py
@@ -22,6 +22,9 @@
# argument check
for i in range(len(args)):
+ # Convert double hyphen args into single hyphen args for Java: e.g., --input becomes -input
+ if args[i].startswith('--'):
+ args[i] = args[i][1:]
if args[i] == '-input':
collection_dir = args[i+1]
if os.path.isfile(collection_dir):
diff --git a/pyserini/search/_base.py b/pyserini/search/_base.py
--- a/pyserini/search/_base.py
+++ b/pyserini/search/_base.py
@@ -327,6 +327,8 @@ def get_qrels_file(collection_name):
qrels = JQrels.TREC2018_BL
elif collection_name == 'trec2019-bl':
qrels = JQrels.TREC2019_BL
+ elif collection_name == 'trec2020-bl':
+ qrels = JQrels.TREC2020_BL
if qrels:
target_path = os.path.join(get_cache_home(), qrels.path)
if os.path.exists(target_path):
| diff --git a/tests/test_load_topics_qrels.py b/tests/test_load_topics_qrels.py
--- a/tests/test_load_topics_qrels.py
+++ b/tests/test_load_topics_qrels.py
@@ -24,423 +24,517 @@ class TestLoadTopics(unittest.TestCase):
def test_trec1_adhoc(self):
topics = search.get_topics('trec1-adhoc')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 50)
self.assertTrue(isinstance(next(iter(topics.keys())), int))
- def test_trec1_adhoc_qrels(self):
qrels = search.get_qrels('trec1-adhoc')
+ self.assertIsNotNone(qrels)
self.assertEqual(len(qrels), 50)
self.assertTrue(isinstance(next(iter(qrels.keys())), int))
def test_trec2_adhoc(self):
topics = search.get_topics('trec2-adhoc')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 50)
self.assertTrue(isinstance(next(iter(topics.keys())), int))
- def test_trec2_adhoc_qrels(self):
qrels = search.get_qrels('trec2-adhoc')
+ self.assertIsNotNone(qrels)
self.assertEqual(len(qrels), 50)
self.assertTrue(isinstance(next(iter(qrels.keys())), int))
def test_trec3_adhoc(self):
topics = search.get_topics('trec3-adhoc')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 50)
self.assertTrue(isinstance(next(iter(topics.keys())), int))
- def test_trec3_adhoc_qrels(self):
qrels = search.get_qrels('trec3-adhoc')
+ self.assertIsNotNone(qrels)
self.assertEqual(len(qrels), 50)
self.assertTrue(isinstance(next(iter(qrels.keys())), int))
def test_robust04(self):
topics = search.get_topics('robust04')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 250)
self.assertTrue(isinstance(next(iter(topics.keys())), int))
- def test_robust04_qrels(self):
qrels = search.get_qrels('robust04')
+ self.assertIsNotNone(qrels)
self.assertEqual(len(qrels), 249)
self.assertTrue(isinstance(next(iter(qrels.keys())), int))
def test_robust05(self):
topics = search.get_topics('robust05')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 50)
self.assertTrue(isinstance(next(iter(topics.keys())), int))
- def test_robust05_qrels(self):
qrels = search.get_qrels('robust05')
+ self.assertIsNotNone(qrels)
self.assertEqual(len(qrels), 50)
self.assertTrue(isinstance(next(iter(qrels.keys())), int))
def test_core17(self):
topics = search.get_topics('core17')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 50)
self.assertTrue(isinstance(next(iter(topics.keys())), int))
- def test_core17_qrels(self):
qrels = search.get_qrels('core17')
+ self.assertIsNotNone(qrels)
self.assertEqual(len(qrels), 50)
self.assertTrue(isinstance(next(iter(qrels.keys())), int))
def test_core18(self):
topics = search.get_topics('core18')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 50)
self.assertTrue(isinstance(next(iter(topics.keys())), int))
- def test_core18_qrels(self):
qrels = search.get_qrels('core18')
+ self.assertIsNotNone(qrels)
self.assertEqual(len(qrels), 50)
self.assertTrue(isinstance(next(iter(qrels.keys())), int))
def test_wt10g(self):
topics = search.get_topics('wt10g')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 100)
self.assertTrue(isinstance(next(iter(topics.keys())), int))
- def test_wt10g_qrels(self):
qrels = search.get_qrels('wt10g')
+ self.assertIsNotNone(qrels)
self.assertEqual(len(qrels), 100)
self.assertTrue(isinstance(next(iter(qrels.keys())), int))
def test_trec2004_terabyte(self):
topics = search.get_topics('trec2004-terabyte')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 50)
self.assertTrue(isinstance(next(iter(topics.keys())), int))
- def test_trec2004_terabyte_qrels(self):
qrels = search.get_qrels('trec2004-terabyte')
+ self.assertIsNotNone(qrels)
self.assertEqual(len(qrels), 49)
self.assertTrue(isinstance(next(iter(qrels.keys())), int))
def test_trec2005_terabyte(self):
topics = search.get_topics('trec2005-terabyte')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 50)
self.assertTrue(isinstance(next(iter(topics.keys())), int))
- def test_trec2005_terabyte_qrels(self):
qrels = search.get_qrels('trec2005-terabyte')
+ self.assertIsNotNone(qrels)
self.assertEqual(len(qrels), 50)
self.assertTrue(isinstance(next(iter(qrels.keys())), int))
def test_trec2006_terabyte(self):
topics = search.get_topics('trec2006-terabyte')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 50)
self.assertTrue(isinstance(next(iter(topics.keys())), int))
- def test_trec2006_terabyte_qrels(self):
qrels = search.get_qrels('trec2006-terabyte')
+ self.assertIsNotNone(qrels)
self.assertEqual(len(qrels), 50)
self.assertTrue(isinstance(next(iter(qrels.keys())), int))
def test_trec2007_million_query(self):
topics = search.get_topics('trec2007-million-query')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 10000)
self.assertTrue(isinstance(next(iter(topics.keys())), int))
def test_trec2008_million_query(self):
topics = search.get_topics('trec2008-million-query')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 10000)
self.assertTrue(isinstance(next(iter(topics.keys())), int))
def test_trec2009_million_query(self):
topics = search.get_topics('trec2009-million-query')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 40000)
self.assertTrue(isinstance(next(iter(topics.keys())), int))
def test_trec2010_web(self):
topics = search.get_topics('trec2010-web')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 50)
self.assertTrue(isinstance(next(iter(topics.keys())), int))
def test_trec2011_web(self):
topics = search.get_topics('trec2011-web')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 50)
self.assertTrue(isinstance(next(iter(topics.keys())), int))
- def test_trec2011_web_qrels(self):
qrels = search.get_qrels('trec2011-web')
+ self.assertIsNotNone(qrels)
self.assertEqual(len(qrels), 50)
self.assertTrue(isinstance(next(iter(qrels.keys())), int))
def test_trec2012_web(self):
topics = search.get_topics('trec2012-web')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 50)
self.assertTrue(isinstance(next(iter(topics.keys())), int))
- def test_trec2012_web_qrels(self):
qrels = search.get_qrels('trec2012-web')
+ self.assertIsNotNone(qrels)
self.assertEqual(len(qrels), 50)
self.assertTrue(isinstance(next(iter(qrels.keys())), int))
def test_trec2013_web(self):
topics = search.get_topics('trec2013-web')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 50)
self.assertTrue(isinstance(next(iter(topics.keys())), int))
- def test_trec2013_web_qrels(self):
qrels = search.get_topics('trec2013-web')
+ self.assertIsNotNone(qrels)
self.assertEqual(len(qrels), 50)
self.assertTrue(isinstance(next(iter(qrels.keys())), int))
def test_trec2014_web(self):
topics = search.get_topics('trec2014-web')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 50)
self.assertTrue(isinstance(next(iter(topics.keys())), int))
- def test_trec2014_web_qrels(self):
qrels = search.get_qrels('trec2014-web')
+ self.assertIsNotNone(qrels)
self.assertEqual(len(qrels), 50)
self.assertTrue(isinstance(next(iter(qrels.keys())), int))
def test_mb11(self):
topics = search.get_topics('mb11')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 50)
self.assertTrue(isinstance(next(iter(topics.keys())), int))
- def test_mb11_qrels(self):
qrels = search.get_qrels('mb11')
+ self.assertIsNotNone(qrels)
self.assertEqual(len(qrels), 49)
self.assertTrue(isinstance(next(iter(qrels.keys())), int))
def test_mb12(self):
topics = search.get_topics('mb12')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 60)
self.assertTrue(isinstance(next(iter(topics.keys())), int))
- def test_mb12_qrels(self):
qrels = search.get_qrels('mb12')
+ self.assertIsNotNone(qrels)
self.assertEqual(len(qrels), 59)
self.assertTrue(isinstance(next(iter(qrels.keys())), int))
def test_mb13(self):
topics = search.get_topics('mb13')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 60)
self.assertTrue(isinstance(next(iter(topics.keys())), int))
- def test_mb13_qrels(self):
qrels = search.get_qrels('mb13')
+ self.assertIsNotNone(qrels)
self.assertEqual(len(qrels), 60)
self.assertTrue(isinstance(next(iter(qrels.keys())), int))
def test_mb14(self):
topics = search.get_topics('mb14')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 55)
self.assertTrue(isinstance(next(iter(topics.keys())), int))
- def test_mb14_qrels(self):
qrels = search.get_qrels('mb14')
+ self.assertIsNotNone(qrels)
self.assertEqual(len(qrels), 55)
self.assertTrue(isinstance(next(iter(qrels.keys())), int))
def test_car15(self):
topics = search.get_topics('car17v1.5-benchmarkY1test')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 2125)
self.assertFalse(isinstance(next(iter(topics.keys())), int))
- def test_car15_qrels(self):
qrels = search.get_qrels('car17v1.5-benchmarkY1test')
+ self.assertIsNotNone(qrels)
self.assertEqual(len(qrels), 2125)
self.assertFalse(isinstance(next(iter(qrels.keys())), int))
def test_car20(self):
topics = search.get_topics('car17v2.0-benchmarkY1test')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 2254)
self.assertFalse(isinstance(next(iter(topics.keys())), int))
- def test_car20_qrels(self):
qrels = search.get_qrels('car17v2.0-benchmarkY1test')
+ self.assertIsNotNone(qrels)
self.assertEqual(len(qrels), 2254)
self.assertFalse(isinstance(next(iter(qrels.keys())), int))
def test_dl19_doc(self):
topics = search.get_topics('dl19-doc')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 43)
self.assertFalse(isinstance(next(iter(topics.keys())), str))
- def test_dl19_doc_qrels(self):
qrels = search.get_qrels('dl19-doc')
+ self.assertIsNotNone(qrels)
self.assertEqual(len(qrels), 43)
self.assertFalse(isinstance(next(iter(qrels.keys())), str))
def test_dl19_passage(self):
topics = search.get_topics('dl19-passage')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 43)
self.assertFalse(isinstance(next(iter(topics.keys())), str))
- def test_dl19_passage_qrels(self):
qrels = search.get_qrels('dl19-passage')
+ self.assertIsNotNone(qrels)
self.assertEqual(len(qrels), 43)
self.assertFalse(isinstance(next(iter(qrels.keys())), str))
def test_dl20(self):
topics = search.get_topics('dl20')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 200)
self.assertFalse(isinstance(next(iter(topics.keys())), str))
- def test_dl20_doc_qrels(self):
qrels = search.get_qrels('dl20-doc')
+ self.assertIsNotNone(qrels)
self.assertEqual(len(qrels), 45)
self.assertFalse(isinstance(next(iter(qrels.keys())), str))
- def test_dl20_passage_qrels(self):
qrels = search.get_qrels('dl20-passage')
+ self.assertIsNotNone(qrels)
self.assertEqual(len(qrels), 54)
self.assertFalse(isinstance(next(iter(qrels.keys())), str))
+ # MS MARCO V1
def test_msmarco_doc(self):
topics = search.get_topics('msmarco-doc-dev')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 5193)
self.assertTrue(isinstance(next(iter(topics.keys())), int))
- def test_msmarco_doc_qrels(self):
qrels = search.get_qrels('msmarco-doc-dev')
+ self.assertIsNotNone(qrels)
self.assertEqual(len(qrels), 5193)
self.assertTrue(isinstance(next(iter(qrels.keys())), int))
def test_msmarco_doc_test(self):
topics = search.get_topics('msmarco-doc-test')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 5793)
self.assertTrue(isinstance(next(iter(topics.keys())), int))
- def test_msmarco_passage(self):
+ def test_msmarco_passage_dev(self):
topics = search.get_topics('msmarco-passage-dev-subset')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 6980)
self.assertTrue(isinstance(next(iter(topics.keys())), int))
- def test_msmarco_passage_qrels(self):
qrels = search.get_qrels('msmarco-passage-dev-subset')
+ self.assertIsNotNone(qrels)
self.assertEqual(len(qrels), 6980)
self.assertTrue(isinstance(next(iter(qrels.keys())), int))
def test_msmarco_passage_test(self):
topics = search.get_topics('msmarco-passage-test-subset')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 6837)
self.assertTrue(isinstance(next(iter(topics.keys())), int))
+ # MS MARCO V2
+ def test_msmarco_v2_doc_dev(self):
+ topics = search.get_topics('msmarco-v2-doc-dev')
+ self.assertIsNotNone(topics)
+ self.assertEqual(len(topics), 4552)
+ self.assertTrue(isinstance(next(iter(topics.keys())), int))
+
+ qrels = search.get_qrels('msmarco-v2-doc-dev')
+ self.assertIsNotNone(qrels)
+ self.assertEqual(len(qrels), 4552)
+ self.assertTrue(isinstance(next(iter(qrels.keys())), int))
+
+ def test_msmarco_v2_doc_dev2(self):
+ topics = search.get_topics('msmarco-v2-doc-dev2')
+ self.assertIsNotNone(topics)
+ self.assertEqual(len(topics), 5000)
+ self.assertTrue(isinstance(next(iter(topics.keys())), int))
+
+ qrels = search.get_qrels('msmarco-v2-doc-dev2')
+ self.assertIsNotNone(qrels)
+ self.assertEqual(len(qrels), 5000)
+ self.assertTrue(isinstance(next(iter(qrels.keys())), int))
+
+ def test_msmarco_v2_passage_dev(self):
+ topics = search.get_topics('msmarco-v2-passage-dev')
+ self.assertIsNotNone(topics)
+ self.assertEqual(len(topics), 3903)
+ self.assertTrue(isinstance(next(iter(topics.keys())), int))
+
+ qrels = search.get_qrels('msmarco-v2-passage-dev')
+ self.assertIsNotNone(qrels)
+ self.assertEqual(len(qrels), 3903)
+ self.assertTrue(isinstance(next(iter(qrels.keys())), int))
+
+ def test_msmarco_v2_passage_dev2(self):
+ topics = search.get_topics('msmarco-v2-passage-dev2')
+ self.assertIsNotNone(topics)
+ self.assertEqual(len(topics), 4281)
+ self.assertTrue(isinstance(next(iter(topics.keys())), int))
+
+ qrels = search.get_qrels('msmarco-v2-passage-dev2')
+ self.assertIsNotNone(qrels)
+ self.assertEqual(len(qrels), 4281)
+ self.assertTrue(isinstance(next(iter(qrels.keys())), int))
+
+ # Various multi-lingual test collections
def test_ntcir8_zh(self):
topics = search.get_topics('ntcir8-zh')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 73)
self.assertTrue(isinstance(next(iter(topics.keys())), str))
- def test_ntcir8_zh_qrels(self):
qrels = search.get_qrels('ntcir8-zh')
+ self.assertIsNotNone(qrels)
self.assertEqual(len(qrels), 100)
self.assertTrue(isinstance(next(iter(qrels.keys())), str))
def test_clef2006_fr(self):
topics = search.get_topics('clef2006-fr')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 49)
self.assertTrue(isinstance(next(iter(topics.keys())), str))
- def test_clef2006_fr_qrels(self):
qrels = search.get_qrels('clef2006-fr')
+ self.assertIsNotNone(qrels)
self.assertEqual(len(qrels), 49)
self.assertTrue(isinstance(next(iter(qrels.keys())), str))
def test_trec2002_ar(self):
topics = search.get_topics('trec2002-ar')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 50)
self.assertTrue(isinstance(next(iter(topics.keys())), int))
- def test_trec2002_ar_qrels(self):
qrels = search.get_qrels('trec2002-ar')
+ self.assertIsNotNone(qrels)
self.assertEqual(len(qrels), 50)
self.assertTrue(isinstance(next(iter(qrels.keys())), int))
def test_fire2012_bn(self):
topics = search.get_topics('fire2012-bn')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 50)
self.assertTrue(isinstance(next(iter(topics.keys())), int))
- def test_fire2012_bn_qrels(self):
qrels = search.get_qrels('fire2012-bn')
+ self.assertIsNotNone(qrels)
self.assertEqual(len(qrels), 50)
self.assertTrue(isinstance(next(iter(qrels.keys())), int))
def test_fire2012_hi(self):
topics = search.get_topics('fire2012-hi')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 50)
self.assertTrue(isinstance(next(iter(topics.keys())), int))
- def test_fire2012_hi_qrels(self):
qrels = search.get_qrels('fire2012-hi')
+ self.assertIsNotNone(qrels)
self.assertEqual(len(qrels), 50)
self.assertTrue(isinstance(next(iter(qrels.keys())), int))
def test_fire2012_en(self):
topics = search.get_topics('fire2012-en')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 50)
self.assertTrue(isinstance(next(iter(topics.keys())), int))
- def test_fire2012_en_qrels(self):
qrels = search.get_qrels('fire2012-en')
+ self.assertIsNotNone(qrels)
self.assertEqual(len(qrels), 50)
self.assertTrue(isinstance(next(iter(qrels.keys())), int))
- def test_trec2020_bl(self):
- topics = search.get_topics('trec2020-bl')
- self.assertEqual(len(topics), 50)
- self.assertTrue(isinstance(next(iter(topics.keys())), int))
-
+ # Epidemic QA
def test_epidemic_qa_expert_prelim(self):
topics = search.get_topics('epidemic-qa-expert-prelim')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 45)
self.assertTrue(isinstance(next(iter(topics.keys())), int))
def test_epidemic_qa_consumer_prelim(self):
topics = search.get_topics('epidemic-qa-consumer-prelim')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 42)
self.assertTrue(isinstance(next(iter(topics.keys())), int))
+ # DPR datasets
def test_dpr_nq_dev(self):
topics = search.get_topics('dpr-nq-dev')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 8757)
self.assertTrue(isinstance(next(iter(topics.keys())), int))
def test_dpr_nq_test(self):
topics = search.get_topics('dpr-nq-test')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 3610)
self.assertTrue(isinstance(next(iter(topics.keys())), int))
def test_dpr_wq_test(self):
topics = search.get_topics('dpr-wq-test')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 2032)
self.assertTrue(isinstance(next(iter(topics.keys())), int))
def test_dpr_squad_test(self):
topics = search.get_topics('dpr-squad-test')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 10570)
self.assertTrue(isinstance(next(iter(topics.keys())), int))
def test_dpr_curated_test(self):
topics = search.get_topics('dpr-curated-test')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 694)
self.assertTrue(isinstance(next(iter(topics.keys())), int))
def test_dpr_trivia_test(self):
topics = search.get_topics('dpr-trivia-test')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 11313)
self.assertTrue(isinstance(next(iter(topics.keys())), int))
def test_dpr_trivia_dev(self):
topics = search.get_topics('dpr-trivia-dev')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 8837)
self.assertTrue(isinstance(next(iter(topics.keys())), int))
+ # TREC-COVID
def test_covid_round1(self):
topics = search.get_topics('covid-round1')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 30)
self.assertEqual('coronavirus origin', topics[1]['query'])
self.assertEqual('coronavirus remdesivir', topics[30]['query'])
self.assertTrue(isinstance(next(iter(topics.keys())), int))
- def test_covid_round1_qrels(self):
qrels = search.get_qrels('covid-round1')
+ self.assertIsNotNone(qrels)
self.assertEqual(len(qrels), 30)
self.assertTrue(isinstance(next(iter(qrels.keys())), int))
def test_covid_round1_udel(self):
topics = search.get_topics('covid-round1-udel')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 30)
self.assertEqual('coronavirus origin origin COVID-19', topics[1]['query'])
self.assertEqual('coronavirus remdesivir remdesivir effective treatment COVID-19', topics[30]['query'])
@@ -448,18 +542,20 @@ def test_covid_round1_udel(self):
def test_covid_round2(self):
topics = search.get_topics('covid-round2')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 35)
self.assertEqual('coronavirus origin', topics[1]['query'])
self.assertEqual('coronavirus public datasets', topics[35]['query'])
self.assertTrue(isinstance(next(iter(topics.keys())), int))
- def test_covid_round2_qrels(self):
qrels = search.get_qrels('covid-round2')
+ self.assertIsNotNone(qrels)
self.assertEqual(len(qrels), 35)
self.assertTrue(isinstance(next(iter(qrels.keys())), int))
def test_covid_round2_udel(self):
topics = search.get_topics('covid-round2-udel')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 35)
self.assertEqual('coronavirus origin origin COVID-19', topics[1]['query'])
self.assertEqual('coronavirus public datasets public datasets COVID-19', topics[35]['query'])
@@ -467,18 +563,20 @@ def test_covid_round2_udel(self):
def test_covid_round3(self):
topics = search.get_topics('covid-round3')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 40)
self.assertEqual('coronavirus origin', topics[1]['query'])
self.assertEqual('coronavirus mutations', topics[40]['query'])
self.assertTrue(isinstance(next(iter(topics.keys())), int))
- def test_covid_round3_qrels(self):
qrels = search.get_qrels('covid-round3')
+ self.assertIsNotNone(qrels)
self.assertEqual(len(qrels), 40)
self.assertTrue(isinstance(next(iter(qrels.keys())), int))
def test_covid_round3_udel(self):
topics = search.get_topics('covid-round3-udel')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 40)
self.assertEqual('coronavirus origin origin COVID-19', topics[1]['query'])
self.assertEqual('coronavirus mutations observed mutations SARS-CoV-2 genome mutations', topics[40]['query'])
@@ -486,58 +584,75 @@ def test_covid_round3_udel(self):
def test_covid_round4(self):
topics = search.get_topics('covid-round4')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 45)
self.assertEqual('coronavirus origin', topics[1]['query'])
self.assertEqual('coronavirus mental health impact', topics[45]['query'])
self.assertTrue(isinstance(next(iter(topics.keys())), int))
- def test_covid_round4_qrels(self):
qrels = search.get_qrels('covid-round4')
+ self.assertIsNotNone(qrels)
self.assertEqual(len(qrels), 45)
self.assertTrue(isinstance(next(iter(qrels.keys())), int))
def test_covid_round4_udel(self):
topics = search.get_topics('covid-round4-udel')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 45)
self.assertEqual('coronavirus origin origin COVID-19', topics[1]['query'])
- self.assertEqual('coronavirus mental health impact COVID-19 pandemic impacted mental health',
- topics[45]['query'])
+ self.assertEqual('coronavirus mental health impact COVID-19 pandemic impacted mental health', topics[45]['query'])
self.assertTrue(isinstance(next(iter(topics.keys())), int))
def test_covid_round5(self):
topics = search.get_topics('covid-round5')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 50)
self.assertTrue(isinstance(next(iter(topics.keys())), int))
- def test_covid_round5_qrels(self):
qrels = search.get_qrels('covid-round5')
+ self.assertIsNotNone(qrels)
self.assertEqual(len(qrels), 50)
self.assertTrue(isinstance(next(iter(qrels.keys())), int))
+ # TREC News Tracks
def test_trec2018_bl(self):
topics = search.get_topics('trec2018-bl')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 50)
self.assertEqual('fef0f232a9bd94bdb96bac48c7705503', topics[393]['title'])
self.assertEqual('a1c41a70-35c7-11e3-8a0e-4e2cf80831fc', topics[825]['title'])
self.assertTrue(isinstance(next(iter(topics.keys())), int))
- def test_trec2018_bl_qrels(self):
qrels = search.get_qrels('trec2018-bl')
+ self.assertIsNotNone(qrels)
self.assertEqual(len(qrels), 50)
self.assertTrue(isinstance(next(iter(qrels.keys())), int))
def test_trec2019_bl(self):
topics = search.get_topics('trec2019-bl')
+ self.assertIsNotNone(topics)
self.assertEqual(len(topics), 60)
self.assertEqual('d7d906991e2883889f850de9ae06655e', topics[870]['title'])
self.assertEqual('0d7f5e24cafc019265d3ee4b9745e7ea', topics[829]['title'])
self.assertTrue(isinstance(next(iter(topics.keys())), int))
- def test_trec2019_bl_qrels(self):
qrels = search.get_qrels('trec2019-bl')
+ self.assertIsNotNone(qrels)
self.assertEqual(len(qrels), 57)
self.assertTrue(isinstance(next(iter(qrels.keys())), int))
+ def test_trec2020_bl(self):
+ topics = search.get_topics('trec2020-bl')
+ self.assertIsNotNone(topics)
+ self.assertEqual(len(topics), 50)
+ self.assertTrue(isinstance(next(iter(topics.keys())), int))
+
+ qrels = search.get_qrels('trec2020-bl')
+ self.assertIsNotNone(qrels)
+ self.assertEqual(len(qrels), 49)
+ self.assertTrue(isinstance(next(iter(qrels.keys())), int))
+
+ # General test cases
def test_tsv_int_topicreader(self):
# Running from command-line, we're in root of repo, but running in IDE, we're in tests/
path = 'tools/topics-and-qrels/topics.msmarco-doc.dev.txt'
| Add tests cases for new MS MARCO v2 bindings
E.g., for queries: https://github.com/castorini/pyserini/blob/master/tests/test_load_topics_qrels.py
And for qrels also...
| 2021-10-30T16:38:20Z | [] | [] |
|
castorini/pyserini | 1,098 | castorini__pyserini-1098 | [
"695"
] | b71dbc6dca04b214f95cf609390c3294976f8e8b | diff --git a/pyserini/search/lucene/_searcher.py b/pyserini/search/lucene/_searcher.py
--- a/pyserini/search/lucene/_searcher.py
+++ b/pyserini/search/lucene/_searcher.py
@@ -228,7 +228,10 @@ def set_rm3(self, fb_terms=10, fb_docs=10, original_query_weight=float(0.5), rm3
rm3_filter_terms: bool
Whether to remove non-English terms.
"""
- self.object.setRM3(fb_terms, fb_docs, original_query_weight, rm3_output_query, rm3_filter_terms)
+ if self.object.reader.getTermVectors(0):
+ self.object.setRM3(fb_terms, fb_docs, original_query_weight, rm3_output_query, rm3_filter_terms)
+ else:
+ raise TypeError("RM3 is not supported for indexes without document vectors.")
def unset_rm3(self):
"""Disable RM3 query expansion."""
| diff --git a/tests/test_search.py b/tests/test_search.py
--- a/tests/test_search.py
+++ b/tests/test_search.py
@@ -42,6 +42,20 @@ def setUp(self):
self.searcher = LuceneSearcher(f'{self.index_dir}lucene-index.cacm')
+ # Create index without document vectors
+ # The current directory depends on if you're running inside an IDE or from command line.
+ curdir = os.getcwd()
+ if curdir.endswith('tests'):
+ corpus_path = '../tests/resources/sample_collection_json'
+ else:
+ corpus_path = 'tests/resources/sample_collection_json'
+ self.no_vec_index_dir = 'no_vec_index'
+ cmd1 = f'python -m pyserini.index.lucene -collection JsonCollection ' + \
+ f'-generator DefaultLuceneDocumentGenerator ' + \
+ f'-threads 1 -input {corpus_path} -index {self.no_vec_index_dir}'
+ os.system(cmd1)
+ self.no_vec_searcher = LuceneSearcher(self.no_vec_index_dir)
+
def test_basic(self):
self.assertTrue(self.searcher.get_similarity().toString().startswith('BM25'))
@@ -220,6 +234,9 @@ def test_rm3(self):
self.assertEqual(hits[9].docid, 'CACM-1457')
self.assertAlmostEqual(hits[9].score, 1.43700, places=5)
+ with self.assertRaises(TypeError):
+ self.no_vec_searcher.set_rm3()
+
def test_doc_int(self):
# The doc method is overloaded: if input is int, it's assumed to be a Lucene internal docid.
doc = self.searcher.doc(1)
@@ -283,6 +300,7 @@ def tearDown(self):
self.searcher.close()
os.remove(self.tarball_name)
shutil.rmtree(self.index_dir)
+ shutil.rmtree(self.no_vec_index_dir)
if __name__ == '__main__':
| Better error message when index doesn't store doc vectors to support RM3 expansion
This allows us to use RM3 query expansion.
| In more detail:
```python -m pyserini.search \
--topics dl19-passage \
--index msmarco-passage \
--output run.dl19-passage.txt \
--rm3
```
Gives:
```
java.lang.NullPointerException
at io.anserini.rerank.lib.Rm3Reranker.createdFeatureVector(Rm3Reranker.java:194)
at io.anserini.rerank.lib.Rm3Reranker.estimateRelevanceModel(Rm3Reranker.java:151)
at io.anserini.rerank.lib.Rm3Reranker.rerank(Rm3Reranker.java:83)
at io.anserini.rerank.RerankerCascade.run(RerankerCascade.java:64)
at io.anserini.search.SimpleSearcher.search(SimpleSearcher.java:594)
at io.anserini.search.SimpleSearcher.search(SimpleSearcher.java:546)
```
At the very least, we should have better error messages...
Hello, I spent some time looking into this and can see where the traceback comes from.
https://github.com/castorini/pyserini/blob/a052748ea669125a129138fd2f53402a423b14bb/pyserini/search/lucene/_searcher.py#L131
Which calls this:
https://github.com/castorini/anserini/blob/2e8e9fdfedc1b81522d9e22a805e08e6a7105762/src/main/java/io/anserini/search/SimpleSearcher.java#L583
My intention was to add a test `search` call at the point where RM3 is set which is:
https://github.com/castorini/pyserini/blob/a052748ea669125a129138fd2f53402a423b14bb/pyserini/search/lucene/_searcher.py#L231
This way I should be able to add the custom error message during error handling right at the point where RM3 is being set. Something like:
```
self.object.setRM3(fb_terms, fb_docs, original_query_weight, rm3_output_query, rm3_filter_terms)
i = self.object.search("Test", 1)
# find someway to identify the error here and raise an error with a better error message
# if i has error:
# raise Index Without DocVectors Don't Support RM3 Expansion
```
However, I noticed:
1. the error does not seem to cause the program to crash
2. I wasn't able to use a try and except to catch the error
3. I couldn't get the error using `traceback`
So, how do I go about triggering something in the actual Python code when something goes wrong in the Java part? Should we be handling this in Pyserini or is it better handled in Anserini?
Also, am I approaching this the wrong way?
> This way I should be able to add the custom error message during error handling right at the point where RM3 is being set. Something like:
>
> ```
> self.object.setRM3(fb_terms, fb_docs, original_query_weight, rm3_output_query, rm3_filter_terms)
> i = self.object.search("Test", 1)
> # find someway to identify the error here and raise an error with a better error message
> # if i has error:
> # raise Index Without DocVectors Don't Support RM3 Expansion
> ```
Yup, I was thinking something along these lines. At the point of `setRM3`, determine if RM3 can be successfully executed. If no, then print a warning and the method should be a no-op.
See if there's a way you can introspect the index (i.e., directly find out of doc vectors are stored)... instead of trying a test query and catching an exception. | 2022-03-28T22:02:17Z | [] | [] |
castorini/pyserini | 1,626 | castorini__pyserini-1626 | [
"1625"
] | d8dc5b3a1f32fd5d0cebeb711ba148ea967fadbe | diff --git a/pyserini/encode/_splade.py b/pyserini/encode/_splade.py
--- a/pyserini/encode/_splade.py
+++ b/pyserini/encode/_splade.py
@@ -25,7 +25,7 @@ def encode(self, text, max_length=256, **kwargs):
batch_aggregated_logits, _ = torch.max(torch.log(1 + torch.relu(batch_logits))
* input_attention.unsqueeze(-1), dim=1)
batch_aggregated_logits = batch_aggregated_logits.cpu().detach().numpy()
- raw_weights = self._output_to_weight_dicts(batch_token_ids, batch_weights)
+ raw_weights = self._output_to_weight_dicts(batch_aggregated_logits)
return self._get_encoded_query_token_wight_dicts(raw_weights)[0]
def _output_to_weight_dicts(self, batch_aggregated_logits):
| diff --git a/tests/test_encoder.py b/tests/test_encoder.py
--- a/tests/test_encoder.py
+++ b/tests/test_encoder.py
@@ -233,6 +233,17 @@ def test_onnx_encode_unicoil(self):
temp_object.close()
del temp_object
+
+ temp_object1 = LuceneImpactSearcher(f'{self.index_dir}lucene9-index.cacm', 'naver/splade-cocondenser-ensembledistil')
+
+ # this function will never be called in _impact_searcher, here to check quantization correctness
+ results = temp_object1.encode("here is a test")
+ self.assertEqual(results.get("here"), 156)
+ self.assertEqual(results.get("a"), 31)
+ self.assertEqual(results.get("test"), 149)
+
+ temp_object1.close()
+ del temp_object1
def tearDown(self):
os.remove(self.tarball_name)
| Error for SPLADE on-the-fly encoding with pytorch
command used:
```bash
python -m pyserini.search.lucene --threads 12 --batch-size 128 \
--index msmarco-v1-passage-splade-pp-ed \
--topics msmarco-passage-dev-subset \
--encoder naver/splade-cocondenser-ensembledistil \
--output run.msmarco-v1-passage.splade-pp-ed-pytorch.dev.txt \
--hits 1000 --impact
```
error message:
> ...
> File "/home/arthur/workplace/pyserini/pyserini/encode/_splade.py", line 28, in encode
> raw_weights = self._output_to_weight_dicts(batch_token_ids, batch_weights)
> NameError: name 'batch_token_ids' is not defined
| Confirmed that I can reproduce the bug.
Interestingly, from here - https://castorini.github.io/pyserini/2cr/msmarco-v1-passage.html
We have "SPLADE++ SelfDistil: query inference with ONNX" as a condition, but *not* "SPLADE++ SelfDistil: query inference with PyTorch."
> Confirmed that I can reproduce the bug.
>
> Interestingly, from here - https://castorini.github.io/pyserini/2cr/msmarco-v1-passage.html
>
> We have "SPLADE++ SelfDistil: query inference with ONNX" as a condition, but _not_ "SPLADE++ SelfDistil: query inference with PyTorch."
I will add all entries mentioned in the paper once all results are reproduced. | 2023-09-10T00:43:23Z | [] | [] |
OWASP/owasp-mastg | 2,518 | OWASP__owasp-mastg-2518 | [
"2591"
] | 560da51508686567fe84d266df9cbf6e72444197 | diff --git a/risks/MASVS-PRIVACY/1-data-minimization/sensitive-data-in-network-traffic/android-data-in-traffic-capture/example-1/mitm_sensitive_logger.py b/risks/MASVS-PRIVACY/1-data-minimization/sensitive-data-in-network-traffic/android-data-in-traffic-capture/example-1/mitm_sensitive_logger.py
new file mode 100644
--- /dev/null
+++ b/risks/MASVS-PRIVACY/1-data-minimization/sensitive-data-in-network-traffic/android-data-in-traffic-capture/example-1/mitm_sensitive_logger.py
@@ -0,0 +1,43 @@
+from mitmproxy import http
+
+# This data would come from another file and should be defined after identifying the data that is considered sensitive for this application.
+# For example by using the Google Play Store Data Safety section.
+SENSITIVE_DATA = {
+ "precise_location_latitude": "37.7749",
+ "precise_location_longitude": "-122.4194",
+ "name": "John Doe",
+ "email_address": "[email protected]",
+ "phone_number": "+11234567890",
+ "credit_card_number": "1234 5678 9012 3456"
+}
+
+SENSITIVE_STRINGS = SENSITIVE_DATA.values()
+
+def contains_sensitive_data(string):
+ return any(sensitive in string for sensitive in SENSITIVE_STRINGS)
+
+def process_flow(flow):
+ url = flow.request.pretty_url
+ request_headers = flow.request.headers
+ request_body = flow.request.text
+ response_headers = flow.response.headers if flow.response else "No response"
+ response_body = flow.response.text if flow.response else "No response"
+
+ if (contains_sensitive_data(url) or
+ contains_sensitive_data(request_body) or
+ contains_sensitive_data(response_body)):
+ with open("sensitive_data.log", "a") as file:
+ if flow.response:
+ file.write(f"RESPONSE URL: {url}\n")
+ file.write(f"Response Headers: {response_headers}\n")
+ file.write(f"Response Body: {response_body}\n\n")
+ else:
+ file.write(f"REQUEST URL: {url}\n")
+ file.write(f"Request Headers: {request_headers}\n")
+ file.write(f"Request Body: {request_body}\n\n")
+def request(flow: http.HTTPFlow):
+ process_flow(flow)
+
+def response(flow: http.HTTPFlow):
+ process_flow(flow)
+
| diff --git a/risks/MASVS-CRYPTO/1-strong-crypto/insecure-random/android-insecure-random-use/test.md b/risks/MASVS-CRYPTO/1-strong-crypto/insecure-random/android-insecure-random-use/test.md
new file mode 100644
--- /dev/null
+++ b/risks/MASVS-CRYPTO/1-strong-crypto/insecure-random/android-insecure-random-use/test.md
@@ -0,0 +1,28 @@
+---
+platform: android
+title: Insecure Random API Usage
+type: [static]
+mitigations:
+- android-use-secure-random
+prerequisites:
+- identify-sensitive-data
+- identify-security-relevant-contexts
+---
+
+## Overview
+
+Android apps sometimes use insecure pseudorandom number generators (PRNGs) such as `java.util.Random`, which is essentially a linear congruential generator. This type of PRNG generates a predictable sequence of numbers for any given seed value, making the sequence reproducible and insecure for cryptographic use. In particular, `java.util.Random` and `Math.random()` ([the latter](https://franklinta.com/2014/08/31/predicting-the-next-math-random-in-java/) simply calling `nextDouble()` on a static `java.util.Random` instance) produce identical number sequences when initialized with the same seed across all Java implementations.
+
+## Steps
+
+1. Run a [static analysis](../../../../../techniques/android/MASTG-TECH-0014.md) tool on the app and look for insecure random APIs.
+
+## Observation
+
+The output should contain a **list of locations where insecure random APIs are used**.
+
+## Evaluation
+
+Inspect the app source code using the provided location information.
+
+The test case fails if you can find random numbers generated using those APIs that are used in security-relevant contexts.
diff --git a/risks/MASVS-CRYPTO/1-strong-crypto/insecure-random/android-non-random-use/test.md b/risks/MASVS-CRYPTO/1-strong-crypto/insecure-random/android-non-random-use/test.md
new file mode 100644
--- /dev/null
+++ b/risks/MASVS-CRYPTO/1-strong-crypto/insecure-random/android-non-random-use/test.md
@@ -0,0 +1,28 @@
+---
+platform: android
+title: Non-random Sources Usage
+type: [static]
+mitigations:
+- android-use-secure-random
+prerequisites:
+- identify-sensitive-data
+- identify-security-relevant-contexts
+---
+
+## Overview
+
+Android applications sometimes use non-random sources to generate "random" values, leading to potential security vulnerabilities. Common practices include relying on the current time, such as `Date().getTime()`, or accessing `Calendar.MILLISECOND` to produce values that are easily guessable and reproducible.
+
+## Steps
+
+1. Run a [static analysis](../../../../../techniques/android/MASTG-TECH-0014.md) tool on the app and look for uses of non-random sources.
+
+## Observation
+
+The output should contain a **list of locations where non-random sources are used**.
+
+## Evaluation
+
+Inspect the app source code using the provided location information.
+
+The test case fails if you can find security-relevant values, such as passwords or tokens, generated using non-random sources.
diff --git a/risks/MASVS-PRIVACY/1-data-minimization/sensitive-data-in-network-traffic/android-data-in-traffic-capture/test.md b/risks/MASVS-PRIVACY/1-data-minimization/sensitive-data-in-network-traffic/android-data-in-traffic-capture/test.md
new file mode 100644
--- /dev/null
+++ b/risks/MASVS-PRIVACY/1-data-minimization/sensitive-data-in-network-traffic/android-data-in-traffic-capture/test.md
@@ -0,0 +1,29 @@
+---
+platform: android
+title: Sensitive Data in Network Traffic Capture
+type: [dynamic, network]
+prerequisites:
+- identify-sensitive-data
+- privacy-policy
+- app-store-privacy-declarations
+---
+
+## Overview
+
+Attackers may capture network traffic from Android devices using an intercepting proxy, such as [OWASP ZAP](https://www.zaproxy.org/), [Burp Suite](https://portswigger.net/burp), or [mitmproxy](https://mitmproxy.org/), to analyze the data being transmitted by the app. This works even if the app uses HTTPS, as the attacker can install a custom root certificate on the Android device to decrypt the traffic. Inspecting traffic that is not encrypted with HTTPS is even easier and can be done without installing a custom root certificate for example by using [Wireshark](https://www.wireshark.org/).
+
+## Steps
+
+1. Start the device.
+
+2. Start [logging sensitive data from network traffic](../../../../../techniques/android/MASTG-TECH-0100.md).
+
+3. Launch and use the app going through the various workflows while inputting sensitive data wherever you can. Especially, places where you know that will trigger network traffic.
+
+## Observation
+
+The **network traffic sensitive data log** including decrypted HTTPS traffic contains the sensitive data you entered in the app.
+
+## Evaluation
+
+The test case fails if you can find the sensitive data you entered in the app within the **network traffic sensitive data log** that is not stated in the App Store Privacy declarations.
diff --git a/risks/MASVS-STORAGE/2-prevent-data-leakage/data-in-logs/android-data-in-logs-frida/test.md b/risks/MASVS-STORAGE/2-prevent-data-leakage/data-in-logs/android-data-in-logs-frida/test.md
new file mode 100644
--- /dev/null
+++ b/risks/MASVS-STORAGE/2-prevent-data-leakage/data-in-logs/android-data-in-logs-frida/test.md
@@ -0,0 +1,32 @@
+---
+platform: android
+title: Leakage of Sensitive Data via Logging APIs
+apis: [Log, Logger, System.out.print, System.err.print, java.lang.Throwable#printStackTrace]
+type: [dynamic]
+---
+
+## Overview
+
+On Android platforms, logging APIs like `Log`, `Logger`, `System.out.print`, `System.err.print`, and `java.lang.Throwable#printStackTrace` can inadvertently lead to the leakage of sensitive information. Log messages are recorded in logcat, a shared memory buffer, accessible since Android 4.1 (API level 16) only to privileged system applications that declare the `READ_LOGS` permission. Nonetheless, the vast ecosystem of Android devices includes pre-loaded apps with the `READ_LOGS` privilege, increasing the risk of sensitive data exposure. Therefore, direct logging to logcat is generally advised against due to its susceptibility to data leaks.
+
+## Steps
+
+1. Install and run the app.
+
+2. Navigate to the screen of the mobile app you want to analyse the log output from.
+
+3. Execute a [method trace](https://mas.owasp.org/MASTG/techniques/android/MASTG-TECH-00xx/) by attaching to the running app, targeting logging APIs and save the output.
+
+## Observation
+
+The **method trace output** contains a list of locations where logging APIs are used in the app for the current execution.
+
+## Evaluation
+
+The test case fails if you can find sensitive data being logged using those APIs.
+
+For example, the following output leaks a key via `Log`:
+
+```shell
+Log.println_native(0, 4, "tag", "key: 12345678")
+```
| New Risk - Insecure Random Usage [insecure-random]
## Description
Create a new risk for "Insecure Random Usage (MASVS-CRYPTO-1)" using the following information:
Using a non-cryptographically secure PRNG in a security context, such as authentication, poses significant risks. An attacker could potentially guess the generated numbers and gain access to privileged data or functionality. Predicting or regenerating random numbers can lead to encryption breaches, compromise sensitive user information, or enable user impersonation.
Create "`risks/MASVS-CRYPTO/1-***-****/insecure-random/risk.md`" including the following content:
```yaml
---
title: Insecure Random Usage
alias: insecure-random
platform: [android, ios]
profiles: [L1, L2]
mappings:
masvs-v1: [MSTG-CRYPTO-6]
masvs-v2: [MASVS-CRYPTO-1]
mastg-v1: [MASTG-TEST-0063, MASTG-TEST-0016]
---
## Overview
## Impact
## Modes of Introduction
## Mitigations
```
**To complete the sections follow the guidelines from [Writing MASTG Risks & Tests](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)**
Use at least the following references:
- <https://developer.android.com/reference/java/security/SecureRandom>
- <https://developer.android.com/topic/security/risks/weak-prng>
- <https://developer.android.com/privacy-and-security/cryptography#deprecated-functionality>
- <https://nvlpubs.nist.gov/nistpubs/SpecialPublications/NIST.SP.800-131Ar2.pdf>
**MASTG v1 Refactoring:**
If the risk has a MASVS v1 ID, you can use it to [search for related tests in the MASTG](https://mas.owasp.org/MASTG/tests/) and use them as input to define your risks and associated tests.
- [MASTG-TEST-0063 - Testing Random Number Generation (ios)](https://mas.owasp.org/MASTG/tests/android/MASVS-CRYPTO/MASTG-TEST-0063/)
- [MASTG-TEST-0016 - Testing Random Number Generation (android)](https://mas.owasp.org/MASTG/tests/android/MASVS-CRYPTO/MASTG-TEST-0016/)
## Acceptance Criteria
- [ ] The risk has been created in the correct directory (`risks/MASVS-CRYPTO/1-***-****/insecure-random/risk.md`)
- [ ] The risk content follows the [guidelines](https://docs.google.com/document/d/1EMsVdfrDBAu0gmjWAUEs60q-fWaOmDB5oecY9d9pOlg/edit?usp=sharing)
- [ ] At least one GitHub Issue has been created for the corresponding tests (derived from "Modes of Introduction")
- [ ] The risk indicates the related MASTG v1 tests in its metadata.
| 2024-01-19T11:18:06Z | [] | [] |
|
facebookresearch/ParlAI | 47 | facebookresearch__ParlAI-47 | [
"44"
] | b92347f260dbf363a215b4b7a4139dc7fabda89e | diff --git a/parlai/core/build_data.py b/parlai/core/build_data.py
--- a/parlai/core/build_data.py
+++ b/parlai/core/build_data.py
@@ -8,46 +8,60 @@
These can be replaced if your particular file system does not support them.
"""
+import datetime
import os
+import requests
+import shutil
+import wget
def built(path):
- return os.path.isfile(path + "/.built")
+ """Checks if '.built' flag has been set for that task."""
+ return os.path.isfile(os.path.join(path, '.built'))
def download(path, url):
- s = ('cd "%s"' % path) + '; wget ' + url
- if os.system(s) != 0:
- raise RuntimeError('failed: ' + s)
+ """Downloads file using `wget`."""
+ filename = wget.download(url, out=path)
+ print() # wget prints download status, without newline
+
+def download_request(url, path, fname):
+ """Downloads file using `requests`."""
+ with requests.Session() as session:
+ response = session.get(url, stream=True)
+ CHUNK_SIZE = 32768
+ with open(os.path.join(path, fname), 'wb') as f:
+ for chunk in response.iter_content(CHUNK_SIZE):
+ if chunk: # filter out keep-alive new chunks
+ f.write(chunk)
+ response.close()
def make_dir(path):
- s = ('mkdir -p "%s"' % (path))
- if os.system(s) != 0:
- raise RuntimeError('failed: ' + s)
+ """Makes the directory and any nonexistent parent directories."""
+ os.makedirs(path, exist_ok=True)
def mark_done(path):
- s = ('date > "%s"/.built' % path)
- if os.system(s) != 0:
- raise RuntimeError('failed: ' + s)
+ """Marks the path as done by adding a '.built' file with the current
+ timestamp.
+ """
+ with open(os.path.join(path, '.built'), 'w') as write:
+ write.write(str(datetime.datetime.today()))
def move(path1, path2):
- s = ('mv "%s" "%s"' % (path1, path2))
- if os.system(s) != 0:
- raise RuntimeError('failed: ' + s)
+ """Renames the given file."""
+ shutil.move(path1, path2)
def remove_dir(path):
- s = ('rm -rf "%s"' % (path))
- if os.system(s) != 0:
- raise RuntimeError('failed: ' + s)
+ """Removes the given directory, if it exists."""
+ shutil.rmtree(path, ignore_errors=True)
def untar(path, fname, deleteTar=True):
+ """Unpacks the given archive file to the same directory, then (by default)
+ deletes the archive file.
+ """
print('unpacking ' + fname)
- s = ('cd "%s"' % path) + ';' + 'tar xfz "%s"' % (path + fname)
- if os.system(s) != 0:
- raise RuntimeError('failed: ' + s)
- # remove tar file
+ fullpath = os.path.join(path, fname)
+ shutil.unpack_archive(fullpath, path)
if deleteTar:
- s = ('cd "%s"' % path) + ';' + 'rm "%s"' % (path + fname)
- if os.system(s) != 0:
- raise RuntimeError('failed: ' + s)
+ os.remove(fullpath)
def _get_confirm_token(response):
for key, value in response.cookies.items():
@@ -55,12 +69,10 @@ def _get_confirm_token(response):
return value
return None
-def download_file_from_google_drive(gd_id, destination):
- import requests
-
+def download_from_google_drive(gd_id, destination):
+ """Uses the requests package to download a file from Google Drive."""
URL = 'https://docs.google.com/uc?export=download'
- session = requests.Session()
with requests.Session() as session:
response = session.get(URL, params={'id': gd_id}, stream=True)
token = _get_confirm_token(response)
diff --git a/parlai/core/params.py b/parlai/core/params.py
--- a/parlai/core/params.py
+++ b/parlai/core/params.py
@@ -14,12 +14,6 @@
def str2bool(value):
return value.lower() in ('yes', 'true', 't', '1', 'y')
-def path(s):
- # Add a trailing slash if its not there.
- if s[-1] != '/':
- s += '/'
- return s
-
class ParlaiParser(object):
"""Pseudo-extension of argparse which sets a number of parameters for the
ParlAI framework. More options can be added specific to other modules by
@@ -43,7 +37,7 @@ def __init__(self, add_parlai_args=True, add_model_args=False):
def add_parlai_data_path(self):
parlai_dir = (os.path.dirname(os.path.dirname(os.path.dirname(
os.path.realpath(__file__)))))
- default_data_path = parlai_dir + '/data/'
+ default_data_path = os.path.join(parlai_dir, 'data')
self.parser.add_argument(
'-dp', '--datapath', default=default_data_path,
help='path to datasets, defaults to {parlai_dir}/data')
@@ -51,7 +45,7 @@ def add_parlai_data_path(self):
def add_mturk_log_path(self):
parlai_dir = (os.path.dirname(os.path.dirname(os.path.dirname(
os.path.realpath(__file__)))))
- default_log_path = parlai_dir + '/logs/mturk/'
+ default_log_path = os.path.join(parlai_dir, 'logs', 'mturk')
self.parser.add_argument(
'--mturk-log-path', default=default_log_path,
help='path to mturk logs, defaults to {parlai_dir}/logs/mturk')
@@ -60,7 +54,7 @@ def add_parlai_args(self):
parlai_dir = (os.path.dirname(os.path.dirname(os.path.dirname(
os.path.realpath(__file__)))))
os.environ['PARLAI_HOME'] = parlai_dir
- default_downloads_path = parlai_dir + '/downloads/'
+ default_downloads_path = os.path.join(parlai_dir, 'downloads')
self.parser.add_argument(
'-t', '--task',
@@ -103,10 +97,10 @@ def parse_args(self, args=None, print_args=True):
self.args = self.parser.parse_args(args=args)
self.opt = {k: v for k, v in vars(self.args).items() if v is not None}
if 'download_path' in self.opt:
- self.opt['download_path'] = path(self.opt['download_path'])
+ self.opt['download_path'] = self.opt['download_path']
os.environ['PARLAI_DOWNPATH'] = self.opt['download_path']
if 'datapath' in self.opt:
- self.opt['datapath'] = path(self.opt['datapath'])
+ self.opt['datapath'] = self.opt['datapath']
if print_args:
self.print_args()
return self.opt
diff --git a/parlai/mturk/core/manage_hit.py b/parlai/mturk/core/manage_hit.py
--- a/parlai/mturk/core/manage_hit.py
+++ b/parlai/mturk/core/manage_hit.py
@@ -40,7 +40,7 @@ def _send_new_message(json_api_endpoint_url, task_group_id, conversation_id, age
post_data_dict['text'] = message_text
if reward:
post_data_dict['reward'] = reward
-
+
request = requests.post(json_api_endpoint_url, data=json.dumps(post_data_dict))
return json.loads(request.json())
@@ -90,8 +90,8 @@ def create_hits(opt, task_config, task_module_name, bot, num_hits, hit_reward, i
approval_index_url_template = html_api_endpoint_url + "?method_name=approval_index&task_group_id={{task_group_id}}&conversation_id=1&cur_agent_id={{cur_agent_id}}&requester_key="+requester_key_gt
- worker_agent_id = task_config['worker_agent_id']
- bot_agent_id = bot.getID()
+ worker_agent_id = task_config['worker_agent_id']
+ bot_agent_id = bot.getID()
cids = range(1, num_hits+1)
cid_map = {cid: i for i, cid in enumerate(cids)}
c_done_map = {cid: False for cid in cids}
@@ -115,12 +115,12 @@ def create_hits(opt, task_config, task_module_name, bot, num_hits, hit_reward, i
logs[cid].append(response)
new_message = _send_new_message(
json_api_endpoint_url=json_api_endpoint_url,
- task_group_id=task_group_id,
- conversation_id=cid,
- agent_id=bot_agent_id,
- message_text=response.get('text', None),
+ task_group_id=task_group_id,
+ conversation_id=cid,
+ agent_id=bot_agent_id,
+ message_text=response.get('text', None),
reward=response.get('reward', None),
- episode_done=response.get('episode_done', False),
+ episode_done=response.get('episode_done', False),
)
if new_message['message_id'] > last_message_id:
last_message_id = new_message['message_id']
@@ -132,13 +132,13 @@ def create_hits(opt, task_config, task_module_name, bot, num_hits, hit_reward, i
while len(conversations_remaining) > 0:
ret = _get_new_messages(
json_api_endpoint_url=json_api_endpoint_url,
- task_group_id=task_group_id,
- after_message_id=last_message_id,
+ task_group_id=task_group_id,
+ after_message_id=last_message_id,
excluded_agent_id=bot_agent_id,
)
conversation_dict = ret['conversation_dict']
new_last_message_id = ret['last_message_id']
-
+
if new_last_message_id:
last_message_id = new_last_message_id
@@ -168,22 +168,22 @@ def create_hits(opt, task_config, task_module_name, bot, num_hits, hit_reward, i
logs[conversation_id].append(response)
_send_new_message(
json_api_endpoint_url=json_api_endpoint_url,
- task_group_id=task_group_id,
- conversation_id=conversation_id,
- agent_id=bot_agent_id,
- message_text=response.get('text', None),
+ task_group_id=task_group_id,
+ conversation_id=conversation_id,
+ agent_id=bot_agent_id,
+ message_text=response.get('text', None),
reward=response.get('reward', None),
- episode_done=response.get('episode_done', False),
+ episode_done=response.get('episode_done', False),
)
# We don't create new HITs until this point, so that the HIT page will always have the conversation fully populated.
if not hits_created:
print('Creating HITs...')
hit_type_id = create_hit_type(
- hit_title=task_config['hit_title'],
- hit_description=task_config['hit_description'] + ' (ID: ' + task_group_id + ')',
- hit_keywords=task_config['hit_keywords'],
- hit_reward=hit_reward,
+ hit_title=task_config['hit_title'],
+ hit_description=task_config['hit_description'] + ' (ID: ' + task_group_id + ')',
+ hit_keywords=task_config['hit_keywords'],
+ hit_reward=hit_reward,
is_sandbox=is_sandbox
)
mturk_chat_url = None
@@ -192,8 +192,8 @@ def create_hits(opt, task_config, task_module_name, bot, num_hits, hit_reward, i
mturk_chat_url = html_api_endpoint_url + "?method_name=chat_index&task_group_id="+str(task_group_id)+"&conversation_id="+str(cid)+"&cur_agent_id="+str(worker_agent_id)
if not chat_page_only:
mturk_page_url = create_hit_with_hit_type(
- page_url=mturk_chat_url,
- hit_type_id=hit_type_id,
+ page_url=mturk_chat_url,
+ hit_type_id=hit_type_id,
is_sandbox=is_sandbox
)
@@ -230,11 +230,13 @@ def create_hits(opt, task_config, task_module_name, bot, num_hits, hit_reward, i
# Saving logs to file
# Log format: {conversation_id: [list of messages in the conversation]}
mturk_log_path = opt['mturk_log_path']
- task_group_path = mturk_log_path + task_module_name + '_' + datetime.now().strftime("%Y-%m-%d_%H:%M:%S") + '/'
+ task_group_path = os.path.join(mturk_log_path,
+ task_module_name + '_' +
+ datetime.now().strftime('%Y-%m-%d_%H:%M:%S'))
os.makedirs(task_group_path)
- with open(task_group_path+'approved.json', 'w') as file:
- file.write(json.dumps(logs_approved))
- with open(task_group_path+'rejected.json', 'w') as file:
- file.write(json.dumps(logs_rejected))
+ with open(os.path.join(task_group_path, 'approved.json'), 'w') as fout:
+ fout.write(json.dumps(logs_approved))
+ with open(os.path.join(task_group_path, 'rejected.json'), 'w') as fout:
+ fout.write(json.dumps(logs_rejected))
print("All conversations are saved to "+opt['mturk_log_path']+" in JSON format.\n")
diff --git a/parlai/mturk/core/setup_aws.py b/parlai/mturk/core/setup_aws.py
--- a/parlai/mturk/core/setup_aws.py
+++ b/parlai/mturk/core/setup_aws.py
@@ -164,7 +164,7 @@ def setup_rds():
VpcId=vpc_id)
security_group_id = response['GroupId']
print('RDS: Security group created.')
-
+
data = ec2.authorize_security_group_ingress(
GroupId=security_group_id,
IpPermissions=[
@@ -220,7 +220,7 @@ def setup_rds():
db_instances = response['DBInstances']
db_instance = db_instances[0]
status = db_instance['DBInstanceStatus']
-
+
endpoint = db_instance['Endpoint']
host = endpoint['Address']
@@ -240,10 +240,10 @@ def setup_relay_server_api(mturk_submit_url, rds_host, task_config, is_sandbox,
os.remove(parent_dir + '/' + lambda_server_zip_file_name)
# Copying files
- with open(parent_dir+'/handler_template.py', 'r') as handler_template_file:
+ with open(os.path.join(parent_dir, 'handler_template.py'), 'r') as handler_template_file:
handler_file_string = handler_template_file.read()
handler_file_string = handler_file_string.replace(
- '# {{block_task_config}}',
+ '# {{block_task_config}}',
"frame_height = " + str(mturk_hit_frame_height) + "\n" + \
"mturk_submit_url = \'" + mturk_submit_url + "\'\n" + \
"rds_host = \'" + rds_host + "\'\n" + \
@@ -254,20 +254,20 @@ def setup_relay_server_api(mturk_submit_url, rds_host, task_config, is_sandbox,
"num_hits = " + str(num_hits) + "\n" + \
"is_sandbox = " + str(is_sandbox) + "\n" + \
'task_description = ' + task_config['task_description'])
- with open(parent_dir + '/' + lambda_server_directory_name+'/handler.py', "w") as handler_file:
+ with open(os.path.join(parent_dir, lambda_server_directory_name, 'handler.py'), 'w') as handler_file:
handler_file.write(handler_file_string)
create_zip_file(
- lambda_server_directory_name=lambda_server_directory_name,
+ lambda_server_directory_name=lambda_server_directory_name,
lambda_server_zip_file_name=lambda_server_zip_file_name,
files_to_copy=files_to_copy
)
- with open(parent_dir + '/' + lambda_server_zip_file_name, mode='rb') as zip_file:
+ with open(os.path.join(parent_dir, lambda_server_zip_file_name), mode='rb') as zip_file:
zip_file_content = zip_file.read()
# Create Lambda function
lambda_client = boto3.client('lambda', region_name=region_name)
lambda_function_arn = None
- try:
+ try:
# Case 1: if Lambda function exists
lambda_function = lambda_client.get_function(FunctionName=lambda_function_name)
print("Lambda: Function already exists. Uploading latest version of code...")
@@ -287,7 +287,7 @@ def setup_relay_server_api(mturk_submit_url, rds_host, task_config, is_sandbox,
except ClientError as e:
# Should create IAM role for Lambda server
iam_client.create_role(
- RoleName = iam_role_name,
+ RoleName = iam_role_name,
AssumeRolePolicyDocument = '''{ "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Principal": { "Service": "lambda.amazonaws.com" }, "Action": "sts:AssumeRole" } ]}'''
)
iam_client.attach_role_policy(
@@ -341,7 +341,7 @@ def setup_relay_server_api(mturk_submit_url, rds_host, task_config, is_sandbox,
shutil.rmtree(parent_dir + '/' + lambda_server_directory_name)
os.remove(parent_dir + '/' + lambda_server_zip_file_name)
- # Check API Gateway existence.
+ # Check API Gateway existence.
# If doesn't exist, create the APIs, point them to Lambda function, and set correct configurations
api_gateway_exists = False
rest_api_id = None
@@ -439,7 +439,7 @@ def setup_relay_server_api(mturk_submit_url, rds_host, task_config, is_sandbox,
def check_mturk_balance(num_hits, hit_reward, is_sandbox):
client = boto3.client(
- service_name = 'mturk',
+ service_name = 'mturk',
region_name = 'us-east-1',
endpoint_url = 'https://mturk-requester-sandbox.us-east-1.amazonaws.com'
)
@@ -458,7 +458,7 @@ def check_mturk_balance(num_hits, hit_reward, is_sandbox):
quit()
else:
raise
-
+
balance_needed = num_hits * hit_reward * 1.2
if user_balance < balance_needed:
@@ -469,7 +469,7 @@ def check_mturk_balance(num_hits, hit_reward, is_sandbox):
def create_hit_type(hit_title, hit_description, hit_keywords, hit_reward, is_sandbox):
client = boto3.client(
- service_name = 'mturk',
+ service_name = 'mturk',
region_name = 'us-east-1',
endpoint_url = 'https://mturk-requester-sandbox.us-east-1.amazonaws.com'
)
@@ -483,7 +483,7 @@ def create_hit_type(hit_title, hit_description, hit_keywords, hit_reward, is_san
'QualificationTypeId': '00000000000000000071',
'Comparator': 'In',
'LocaleValues': [
- {'Country': 'US'},
+ {'Country': 'US'},
{'Country': 'CA'},
{'Country': 'GB'},
{'Country': 'AU'},
@@ -515,7 +515,7 @@ def create_hit_with_hit_type(page_url, hit_type_id, is_sandbox):
'''
client = boto3.client(
- service_name = 'mturk',
+ service_name = 'mturk',
region_name = 'us-east-1',
endpoint_url = 'https://mturk-requester-sandbox.us-east-1.amazonaws.com'
)
@@ -524,7 +524,7 @@ def create_hit_with_hit_type(page_url, hit_type_id, is_sandbox):
if not is_sandbox:
client = boto3.client(service_name = 'mturk', region_name='us-east-1')
- # Create the HIT
+ # Create the HIT
response = client.create_hit_with_hit_type(
HITTypeId=hit_type_id,
MaxAssignments=1,
@@ -770,4 +770,4 @@ def clean_aws():
if __name__ == "__main__":
if sys.argv[1] == 'clean':
- clean_aws()
\ No newline at end of file
+ clean_aws()
diff --git a/parlai/tasks/babi/agents.py b/parlai/tasks/babi/agents.py
--- a/parlai/tasks/babi/agents.py
+++ b/parlai/tasks/babi/agents.py
@@ -3,22 +3,22 @@
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
-import copy
from parlai.core.fbdialog_teacher import FbDialogTeacher
from parlai.core.agents import MultiTaskTeacher
from .build import build
+import copy
+import os
def _path(exsz, task, opt, dt=''):
# Build the data if it doesn't exist.
build(opt)
if dt == '':
dt = opt['datatype'].split(':')[0]
- return (opt['datapath'] + '/bAbI/' +
- 'tasks_1-20_v1-2/en-valid' +
- '{exsz}-nosf/qa{task}_{type}.txt'.format(
- exsz=exsz, task=task, type=dt))
+ return os.path.join(opt['datapath'], 'bAbI', 'tasks_1-20_v1-2',
+ 'en-valid{exsz}-nosf'.format(exsz=exsz),
+ 'qa{task}_{type}.txt'.format(task=task, type=dt))
# Single bAbI task (1k training).
diff --git a/parlai/tasks/babi/build.py b/parlai/tasks/babi/build.py
--- a/parlai/tasks/babi/build.py
+++ b/parlai/tasks/babi/build.py
@@ -7,19 +7,20 @@
# Download and build the data if it does not exist.
import parlai.core.build_data as build_data
+import os
def build(opt):
- dpath = opt['datapath'] + "/bAbI/"
+ dpath = os.path.join(opt['datapath'], 'bAbI')
if not build_data.built(dpath):
- print("[building data: " + dpath + "]")
+ print('[building data: ' + dpath + ']')
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# Download the data.
- fname = "babi.tar.gz"
- url = "https://s3.amazonaws.com/fair-data/parlai/babi/" + fname
+ fname = 'babi.tar.gz'
+ url = 'https://s3.amazonaws.com/fair-data/parlai/babi/' + fname
build_data.download(dpath, url)
build_data.untar(dpath, fname)
diff --git a/parlai/tasks/booktest/agents.py b/parlai/tasks/booktest/agents.py
--- a/parlai/tasks/booktest/agents.py
+++ b/parlai/tasks/booktest/agents.py
@@ -3,13 +3,15 @@
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
-import copy
-import json
-import random
+
from parlai.core.agents import Teacher
from parlai.core.fbdialog_teacher import FbDialogTeacher
from .build import build
+import copy
+import json
+import random
+import os
class EvalTeacher(FbDialogTeacher):
def __init__(self, opt, shared=None):
@@ -18,9 +20,8 @@ def __init__(self, opt, shared=None):
suffix = 'validation_NECN.20k'
else:
suffix = 'test_CN.10k'
- opt['datafile'] = (
- opt['datapath'] + '/BookTest/booktest-gut/' +
- suffix + '.txt')
+ opt['datafile'] = os.path.join(
+ opt['datapath'], 'BookTest', 'booktest-gut', suffix + '.txt')
super().__init__(opt, shared)
@@ -32,9 +33,8 @@ class StreamTeacher(Teacher):
def __init__(self, opt, shared=None):
build(opt)
# Only used for the train set.
- self.datafile = (
- opt['datapath'] + '/BookTest/booktest-gut/' +
- 'train.14M+.txt')
+ self.datafile = os.path.join(
+ opt['datapath'], 'BookTest', 'booktest-gut', 'train.14M+.txt')
self.fin = open(self.datafile)
def __len__(self):
diff --git a/parlai/tasks/booktest/build.py b/parlai/tasks/booktest/build.py
--- a/parlai/tasks/booktest/build.py
+++ b/parlai/tasks/booktest/build.py
@@ -6,19 +6,19 @@
# Download and build the data if it does not exist.
import parlai.core.build_data as build_data
-
+import os
def build(opt):
- dpath = opt['datapath'] + "/BookTest/"
+ dpath = os.path.join(opt['datapath'], 'BookTest')
if not build_data.built(dpath):
- print("[building data: " + dpath + "]")
+ print('[building data: ' + dpath + ']')
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# Download the data.
- fname = "booktest.tar.bz2"
- url = "https://s3.amazonaws.com/fair-data/parlai/booktest/" + fname
+ fname = 'booktest.tar.bz2'
+ url = 'https://s3.amazonaws.com/fair-data/parlai/booktest/' + fname
build_data.download(dpath, url)
build_data.untar(dpath, fname)
diff --git a/parlai/tasks/cbt/agents.py b/parlai/tasks/cbt/agents.py
--- a/parlai/tasks/cbt/agents.py
+++ b/parlai/tasks/cbt/agents.py
@@ -3,12 +3,14 @@
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
-import copy
from parlai.core.fbdialog_teacher import FbDialogTeacher
from parlai.core.agents import MultiTaskTeacher
from .build import build
+import copy
+import os
+
def _path(task, opt):
# Build the data if it doesn't exist.
@@ -21,9 +23,9 @@ def _path(task, opt):
suffix = 'test_2500ex'
elif dt == 'valid':
suffix = 'valid_2000ex'
- return (opt['datapath'] +
- '/CBT/CBTest/data/' +
- task + '_' + suffix + '.txt')
+
+ return os.path.join(
+ opt['datapath'], 'CBT', 'CBTest', 'data', task + '_' + suffix + '.txt')
class NETeacher(FbDialogTeacher):
diff --git a/parlai/tasks/cbt/build.py b/parlai/tasks/cbt/build.py
--- a/parlai/tasks/cbt/build.py
+++ b/parlai/tasks/cbt/build.py
@@ -6,19 +6,19 @@
# Download and build the data if it does not exist.
import parlai.core.build_data as build_data
-
+import os
def build(opt):
- dpath = opt['datapath'] + "/CBT/"
+ dpath = os.path.join(opt['datapath'], 'CBT')
if not build_data.built(dpath):
- print("[building data: " + dpath + "]")
+ print('[building data: ' + dpath + ']')
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# Download the data.
- fname = "cbt.tar.gz"
- url = "https://s3.amazonaws.com/fair-data/parlai/cbt/" + fname
+ fname = 'cbt.tar.gz'
+ url = 'https://s3.amazonaws.com/fair-data/parlai/cbt/' + fname
build_data.download(dpath, url)
build_data.untar(dpath, fname)
diff --git a/parlai/tasks/cornell_movie/agents.py b/parlai/tasks/cornell_movie/agents.py
--- a/parlai/tasks/cornell_movie/agents.py
+++ b/parlai/tasks/cornell_movie/agents.py
@@ -3,19 +3,19 @@
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
-import copy
from parlai.core.fbdialog_teacher import FbDialogTeacher
from .build import build
+import copy
+import os
def _path(opt, filtered):
# Build the data if it doesn't exist.
build(opt)
dt = opt['datatype'].split(':')[0]
- return (opt['datapath'] + '/CornellMovie/' +
- '{type}.txt'.format(
- type=dt + filtered))
+ return os.path.join(opt['datapath'], 'CornellMovie',
+ dt + filtered + '.txt')
class DefaultTeacher(FbDialogTeacher):
diff --git a/parlai/tasks/cornell_movie/build.py b/parlai/tasks/cornell_movie/build.py
--- a/parlai/tasks/cornell_movie/build.py
+++ b/parlai/tasks/cornell_movie/build.py
@@ -5,18 +5,18 @@
# of patent rights can be found in the PATENTS file in the same directory.
# Download and build the data if it does not exist.
-import codecs
import parlai.core.build_data as build_data
-
+import codecs
+import os
def create_fb_format(lines_file, convo_file, outpath):
- print("[building fbformat]")
- ftrain = open(outpath + 'train.txt', 'w')
- fvalid = open(outpath + 'valid.txt', 'w')
- ftest = open(outpath + 'test.txt', 'w')
+ print('[building fbformat]')
+ ftrain = open(os.path.join(outpath, 'train.txt'), 'w')
+ fvalid = open(os.path.join(outpath, 'valid.txt'), 'w')
+ ftest = open(os.path.join(outpath, 'test.txt'), 'w')
lines = {}
-
- codecs.register_error("strict", codecs.ignore_errors)
+
+ codecs.register_error('strict', codecs.ignore_errors)
with codecs.open(lines_file, 'r') as f:
for line in f:
l = line.split(' ')
@@ -49,22 +49,22 @@ def create_fb_format(lines_file, convo_file, outpath):
def build(opt):
- dpath = opt['datapath'] + "/CornellMovie/"
+ dpath = os.path.join(opt['datapath'], 'CornellMovie')
if not build_data.built(dpath):
- print("[building data: " + dpath + "]")
+ print('[building data: ' + dpath + ']')
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# Download the data.
- fname = "cornell_movie_dialogs_corpus.zip"
- url = "http://www.mpi-sws.org/~cristian/data/" +fname
- build_data.download(dpath, url)
+ fname = 'cornell_movie_dialogs_corpus.zip'
+ url = 'http://www.mpi-sws.org/~cristian/data/' + fname
+ build_data.download_request(url, dpath, fname)
build_data.untar(dpath, fname)
- dpext = dpath + '/cornell movie-dialogs corpus/'
- create_fb_format(dpext + 'movie_lines.txt',
- dpext + 'movie_conversations.txt',
+ dpext = os.path.join(dpath, 'cornell movie-dialogs corpus')
+ create_fb_format(os.path.join(dpext, 'movie_lines.txt'),
+ os.path.join(dpext, 'movie_conversations.txt'),
dpath)
# Mark the data as built.
diff --git a/parlai/tasks/dbll_babi/agents.py b/parlai/tasks/dbll_babi/agents.py
--- a/parlai/tasks/dbll_babi/agents.py
+++ b/parlai/tasks/dbll_babi/agents.py
@@ -3,11 +3,13 @@
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
-import copy
from parlai.core.fbdialog_teacher import FbDialogTeacher
from .build import build
+import copy
+import os
+
tasks = {}
tasks[1] = 'rl1_pure_imitation'
tasks[2] = 'rl2_pos_neg'
@@ -27,34 +29,34 @@
}
-def _path(subdir, task, opt, dt=''):
+def _path(subdir, task, opt, dt=''):
build(opt)
if dt == '':
dt = opt['datatype'].split(':')[0]
- task_name = "%s_%s" % (task.split('_')[1],
+ task_name = '%s_%s' % (task.split('_')[1],
tasks[int(task.split('_')[0])])
- return (opt['datapath'] + '/DBLL/dbll/' +
- '{subdir}_{task}_{suffix}.txt'.format(
- subdir=subdir, task=task_name,
- suffix=_suffixes[dt]))
+ return os.path.join(opt['datapath'], 'DBLL', 'dbll',
+ '{subdir}_{task}_{suffix}.txt'.format(
+ subdir=subdir, task=task_name,
+ suffix=_suffixes[dt]))
class TaskTeacher(FbDialogTeacher):
def __init__(self, opt, shared=None):
params = opt['task'].split(':')[2]
opt = copy.deepcopy(opt)
- opt['datafile'] = _path('babi/babi1', params, opt)
- opt['cands_datafile'] = _path('babi/babi1', params, opt, 'train')
+ opt['datafile'] = _path(os.path.join('babi', 'babi1'), params, opt)
+ opt['cands_datafile'] = _path(os.path.join('babi', 'babi1'), params,
+ opt, 'train')
super().__init__(opt, shared)
# Defaults to task 2 with p=0.5.
class DefaultTeacher(FbDialogTeacher):
def __init__(self, opt, shared=None):
- task = "2_p0.5"
+ task = '2_p0.5'
opt = copy.deepcopy(opt)
- opt['datafile'] = _path('babi/babi1', task, opt)
- opt['cands_datafile'] = _path('babi/babi1', task, opt, 'train')
+ opt['datafile'] = _path(os.path.join('babi', 'babi1'), task, opt)
+ opt['cands_datafile'] = _path(os.path.join('babi', 'babi1'), task,
+ opt, 'train')
super().__init__(opt, shared)
-
-
diff --git a/parlai/tasks/dbll_babi/build.py b/parlai/tasks/dbll_babi/build.py
--- a/parlai/tasks/dbll_babi/build.py
+++ b/parlai/tasks/dbll_babi/build.py
@@ -6,19 +6,19 @@
# Download and build the data if it does not exist.
import parlai.core.build_data as build_data
-
+import os
def build(opt):
- dpath = opt['datapath'] + "/DBLL/"
+ dpath = os.path.join(opt['datapath'], 'DBLL')
if not build_data.built(dpath):
- print("[building data: " + dpath + "]")
+ print('[building data: ' + dpath + ']')
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# Download the data.
- fname = "dbll.tgz"
- url = "https://s3.amazonaws.com/fair-data/parlai/dbll/" + fname
+ fname = 'dbll.tgz'
+ url = 'https://s3.amazonaws.com/fair-data/parlai/dbll/' + fname
build_data.download(dpath, url)
build_data.untar(dpath, fname)
diff --git a/parlai/tasks/dbll_movie/agents.py b/parlai/tasks/dbll_movie/agents.py
--- a/parlai/tasks/dbll_movie/agents.py
+++ b/parlai/tasks/dbll_movie/agents.py
@@ -3,11 +3,13 @@
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
-import copy
from parlai.core.fbdialog_teacher import FbDialogTeacher
from .build import build
+import copy
+import os
+
tasks = {}
tasks[1] = 'rl1_pure_imitation'
tasks[2] = 'rl2_pos_neg'
@@ -32,18 +34,18 @@ def _path(subdir, task, opt):
dt = opt['datatype'].split(':')[0]
task_name = "%s_%s" % (task.split('_')[1],
tasks[int(task.split('_')[0])])
- return (opt['datapath'] + '/DBLL/dbll/' +
- '{subdir}_{task}_{suffix}.txt'.format(
- subdir=subdir, task=task_name,
- suffix=_suffixes[dt]))
+ return os.path.join(opt['datapath'], 'DBLL', 'dbll',
+ '{subdir}_{task}_{suffix}.txt'.format(
+ subdir=subdir, task=task_name,
+ suffix=_suffixes[dt]))
# The knowledge base of facts that can be used to answer questions.
class KBTeacher(FbDialogTeacher):
def __init__(self, opt, shared=None):
build(opt)
- opt['datafile'] = (opt['datapath'] + '/DBLL/dbll/movieqa-dbll/' +
- 'movie_kb.txt')
+ opt['datafile'] = os.path.join(
+ opt['datapath'], 'DBLL', 'dbll', 'movieqa-dbll', 'movie_kb.txt')
super().__init__(opt, shared)
@@ -52,10 +54,11 @@ class TaskTeacher(FbDialogTeacher):
def __init__(self, opt, shared=None):
params = opt['task'].split(':')[2]
opt = copy.deepcopy(opt)
- opt['datafile'] = _path('movieqa-dbll/movieqa1', params, opt)
- opt['cands_datafile'] = (opt['datapath'] +
- '/WikiMovies/movieqa/' +
- 'knowledge_source/entities.txt')
+ opt['datafile'] = _path(os.path.join('movieqa-dbll', 'movieqa1'),
+ params, opt)
+ opt['cands_datafile'] = os.path.join(opt['datapath'], 'WikiMovies',
+ 'movieqa', 'knowledge_source',
+ 'entities.txt')
super().__init__(opt, shared)
@@ -64,9 +67,10 @@ class DefaultTeacher(FbDialogTeacher):
def __init__(self, opt, shared=None):
task = "2_p0.5"
opt = copy.deepcopy(opt)
- opt['datafile'] = _path('movieqa-dbll/movieqa1', task, opt)
- opt['cands_datafile'] = (opt['datapath'] +
- '/WikiMovies/movieqa/' +
- 'knowledge_source/entities.txt')
+ opt['datafile'] = _path(os.path.join('movieqa-dbll', 'movieqa1'),
+ task, opt)
+ opt['cands_datafile'] = os.path.join(opt['datapath'], 'WikiMovies',
+ 'movieqa', 'knowledge_source',
+ 'entities.txt')
super().__init__(opt, shared)
self.defaultPosReward = 1
diff --git a/parlai/tasks/dbll_movie/build.py b/parlai/tasks/dbll_movie/build.py
--- a/parlai/tasks/dbll_movie/build.py
+++ b/parlai/tasks/dbll_movie/build.py
@@ -8,21 +8,22 @@
import parlai.core.build_data as build_data
import parlai.tasks.wikimovies.build as wikimovies_build
+import os
def build(opt):
# Depends upon another dataset, wikimovies, build that first.
wikimovies_build.build(opt)
- dpath = opt['datapath'] + "/DBLL/"
+ dpath = os.path.join(opt['datapath'], 'DBLL')
if not build_data.built(dpath):
- print("[building data: " + dpath + "]")
+ print('[building data: ' + dpath + ']')
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# Download the data.
- fname = "dbll.tgz"
- url = "https://s3.amazonaws.com/fair-data/parlai/dbll/" + fname
+ fname = 'dbll.tgz'
+ url = 'https://s3.amazonaws.com/fair-data/parlai/dbll/' + fname
build_data.download(dpath, url)
build_data.untar(dpath, fname)
diff --git a/parlai/tasks/dialog_babi/agents.py b/parlai/tasks/dialog_babi/agents.py
--- a/parlai/tasks/dialog_babi/agents.py
+++ b/parlai/tasks/dialog_babi/agents.py
@@ -3,12 +3,13 @@
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
-import copy
from parlai.core.fbdialog_teacher import FbDialogTeacher
from parlai.core.agents import MultiTaskTeacher
from .build import build
+import copy
+import os
tasks = {}
tasks[1] = 'dialog-babi-task1-API-calls'
@@ -29,18 +30,17 @@ def _path(task, opt):
suffix = 'tst'
elif dt == 'valid':
suffix = 'dev'
- return (opt['datapath'] + '/dialog-bAbI/dialog-bAbI-tasks/' +
- '{tsk}-{type}.txt'.format(
- tsk=tasks[int(task)], type=suffix))
+ return os.path.join(opt['datapath'], 'dialog-bAbI', 'dialog-bAbI-tasks',
+ '{tsk}-{type}.txt'.format(tsk=tasks[int(task)], type=suffix))
# The knowledge base of facts that can be used to answer questions.
class KBTeacher(FbDialogTeacher):
def __init__(self, opt, shared=None):
build(opt)
- opt['datafile'] = (opt['datapath'] +
- '/dialog-bAbI/dialog-bAbI-tasks/' +
- 'dialog-babi-kb-all.txt')
+ opt['datafile'] = os.path.join(opt['datapath'], 'dialog-bAbI',
+ 'dialog-bAbI-tasks',
+ 'dialog-babi-kb-all.txt')
super().__init__(opt, shared)
@@ -57,7 +57,7 @@ def __init__(self, opt, shared=None):
opt = copy.deepcopy(opt)
opt['task'] = ','.join('dialog_babi:Task:%d' % (i + 1)
for i in range(6))
- opt['cands_datafile'] = (opt['datapath'] +
- '/dialog-bAbI/dialog-bAbI-tasks/' +
- 'dialog-babi-candidates.txt')
+ opt['cands_datafile'] = os.path.join(opt['datapath'], 'dialog-bAbI',
+ 'dialog-bAbI-tasks',
+ 'dialog-babi-candidates.txt')
super().__init__(opt, shared)
diff --git a/parlai/tasks/dialog_babi/build.py b/parlai/tasks/dialog_babi/build.py
--- a/parlai/tasks/dialog_babi/build.py
+++ b/parlai/tasks/dialog_babi/build.py
@@ -6,19 +6,20 @@
# Download and build the data if it does not exist.
import parlai.core.build_data as build_data
+import os
def build(opt):
- dpath = opt['datapath'] + "/dialog-bAbI/"
+ dpath = os.path.join(opt['datapath'], 'dialog-bAbI')
if not build_data.built(dpath):
- print("[building data: " + dpath + "]")
+ print('[building data: ' + dpath + ']')
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# Download the data.
- fname = "dialog_babi.tar.gz"
- url = "https://s3.amazonaws.com/fair-data/parlai/dialog_babi/" + fname
+ fname = 'dialog_babi.tar.gz'
+ url = 'https://s3.amazonaws.com/fair-data/parlai/dialog_babi/' + fname
build_data.download(dpath, url)
build_data.untar(dpath, fname)
diff --git a/parlai/tasks/mctest/agents.py b/parlai/tasks/mctest/agents.py
--- a/parlai/tasks/mctest/agents.py
+++ b/parlai/tasks/mctest/agents.py
@@ -3,17 +3,19 @@
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
-import copy
from parlai.core.fbdialog_teacher import FbDialogTeacher
from .build import build
+import copy
+import os
+
def _path(opt, filtered):
# Build the data if it doesn't exist.
build(opt)
dt = opt['datatype'].split(':')[0]
- return '{}/MCTest/{}{}.txt'.format(opt['datapath'], dt, filtered)
+ return os.path.join(opt['datapath'], 'MCTest', dt + filtered + '.txt')
class Task160Teacher(FbDialogTeacher):
diff --git a/parlai/tasks/mctest/build.py b/parlai/tasks/mctest/build.py
--- a/parlai/tasks/mctest/build.py
+++ b/parlai/tasks/mctest/build.py
@@ -6,11 +6,11 @@
# Download and build the data if it does not exist.
import parlai.core.build_data as build_data
-
+import os
def create_fb_format(outpath, dtype, inpath, inpath2):
- print("building fbformat:" + dtype)
- fout = open(outpath + dtype + '.txt', 'w')
+ print('building fbformat:' + dtype)
+ fout = open(os.path.join(outpath, dtype + '.txt'), 'w')
with open(inpath + '.tsv') as f:
lines = [line.strip('\n') for line in f]
if inpath2 is None:
@@ -41,28 +41,34 @@ def create_fb_format(outpath, dtype, inpath, inpath2):
fout.close()
def build(opt):
- dpath = opt['datapath'] + "/MCTest/"
+ dpath = os.path.join(opt['datapath'], 'MCTest')
if not build_data.built(dpath):
- print("[building data: " + dpath + "]")
+ print('[building data: ' + dpath + ']')
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# Download the data.
- fname = "mctest.tar.gz"
- url = "https://s3.amazonaws.com/fair-data/parlai/mctest/" + fname
+ fname = 'mctest.tar.gz'
+ url = 'https://s3.amazonaws.com/fair-data/parlai/mctest/' + fname
build_data.download(dpath, url)
build_data.untar(dpath, fname)
- dpext = dpath + 'mctest/'
- create_fb_format(dpath, 'train160', dpext + 'MCTest/mc160.train', None)
- create_fb_format(dpath, 'valid160', dpext + 'MCTest/mc160.dev', None)
- create_fb_format(dpath, 'test160', dpext + 'MCTest/mc160.test',
- dpext + 'MCTestAnswers/mc160.test.ans')
- create_fb_format(dpath, 'train500', dpext + 'MCTest/mc500.train', None)
- create_fb_format(dpath, 'valid500', dpext + 'MCTest/mc500.dev', None)
- create_fb_format(dpath, 'test500', dpext + 'MCTest/mc500.test',
- dpext + 'MCTestAnswers/mc500.test.ans')
+ dpext = os.path.join(dpath, 'mctest')
+ create_fb_format(dpath, 'train160',
+ os.path.join(dpext, 'MCTest', 'mc160.train'), None)
+ create_fb_format(dpath, 'valid160',
+ os.path.join(dpext, 'MCTest', 'mc160.dev'), None)
+ create_fb_format(dpath, 'test160',
+ os.path.join(dpext, 'MCTest', 'mc160.test'),
+ os.path.join(dpext, 'MCTestAnswers', 'mc160.test.ans'))
+ create_fb_format(dpath, 'train500',
+ os.path.join(dpext, 'MCTest', 'mc500.train'), None)
+ create_fb_format(dpath, 'valid500',
+ os.path.join(dpext, 'MCTest', 'mc500.dev'), None)
+ create_fb_format(dpath, 'test500',
+ os.path.join(dpext, 'MCTest', 'mc500.test'),
+ os.path.join(dpext, 'MCTestAnswers', 'mc500.test.ans'))
# Mark the data as built.
build_data.mark_done(dpath)
diff --git a/parlai/tasks/moviedialog/agents.py b/parlai/tasks/moviedialog/agents.py
--- a/parlai/tasks/moviedialog/agents.py
+++ b/parlai/tasks/moviedialog/agents.py
@@ -3,17 +3,19 @@
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
-import copy
from parlai.core.fbdialog_teacher import FbDialogTeacher
from parlai.core.agents import MultiTaskTeacher
from .build import build
+import copy
+import os
+
tasks = {}
-tasks[1] = 'task1_qa/task1_qa_'
-tasks[2] = 'task2_recs/task2_recs_'
-tasks[3] = 'task3_qarecs/task3_qarecs_'
-tasks[4] = 'task4_reddit/task4_reddit/task4_reddit_'
+tasks[1] = os.path.join('task1_qa', 'task1_qa_')
+tasks[2] = os.path.join('task2_recs', 'task2_recs_')
+tasks[3] = os.path.join('task3_qarecs', 'task3_qarecs_')
+tasks[4] = os.path.join('task4_reddit', 'task4_reddit', 'task4_reddit_')
def _path(task, opt):
# Build the data if it doesn't exist.
@@ -27,8 +29,9 @@ def _path(task, opt):
elif dt == 'valid':
suffix = 'dev'
- datafile = (opt['datapath'] + '/MovieDialog/movie_dialog_dataset/' +
- tasks[int(task)] + suffix + '.txt')
+ datafile = os.path.join(opt['datapath'], 'MovieDialog',
+ 'movie_dialog_dataset',
+ tasks[int(task)] + suffix + '.txt')
if int(task) == 4:
if dt == 'train':
candpath = None
@@ -36,8 +39,8 @@ def _path(task, opt):
candpath = datafile.replace(suffix + '.txt',
'cand-' + dt + '.txt')
else:
- candpath = (opt['datapath'] +
- '/MovieDialog/movie_dialog_dataset/entities.txt')
+ candpath = os.path.join(opt['datapath'], 'MovieDialog',
+ 'movie_dialog_dataset', 'entities.txt')
return datafile, candpath
@@ -45,9 +48,8 @@ def _path(task, opt):
class KBTeacher(FbDialogTeacher):
def __init__(self, opt, shared=None):
build(opt)
- opt['datafile'] = (opt['datapath'] +
- 'MovieDialog/movie_dialog_dataset/' +
- 'movie_kb.txt')
+ opt['datafile'] = os.path.join(opt['datapath'], 'MovieDialog',
+ 'movie_dialog_dataset', 'movie_kb.txt')
super().__init__(opt, shared)
diff --git a/parlai/tasks/moviedialog/build.py b/parlai/tasks/moviedialog/build.py
--- a/parlai/tasks/moviedialog/build.py
+++ b/parlai/tasks/moviedialog/build.py
@@ -6,28 +6,28 @@
# Download and build the data if it does not exist.
import parlai.core.build_data as build_data
-
+import os
def build(opt):
- dpath = opt['datapath'] + "/MovieDialog/"
+ dpath = os.path.join(opt['datapath'], 'MovieDialog')
if not build_data.built(dpath):
- print("[building data: " + dpath + "]")
+ print('[building data: ' + dpath + ']')
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# Download the data.
- fname = "moviedialog.tar.gz"
- url = "https://s3.amazonaws.com/fair-data/parlai/moviedialog/" + fname
+ fname = 'moviedialog.tar.gz'
+ url = 'https://s3.amazonaws.com/fair-data/parlai/moviedialog/' + fname
build_data.download(dpath, url)
build_data.untar(dpath, fname)
- dpath2 = dpath + "/movie_dialog_dataset/task4_reddit/"
- fname2a = dpath2 + "p6tyohj"
- fname2b = dpath2 + "p6tyohj.tgz"
- url2 = "http://tinyurl.com/" + "p6tyohj"
+ dpath2 = os.path.join(dpath, 'movie_dialog_dataset', 'task4_reddit')
+ fname2a = os.path.join(dpath2, 'p6tyohj')
+ fname2b = os.path.join(dpath2, 'p6tyohj.tgz')
+ url2 = 'http://tinyurl.com/' + 'p6tyohj'
build_data.download(dpath2, url2)
build_data.move(fname2a, fname2b)
- build_data.untar(dpath2, "p6tyohj.tgz")
+ build_data.untar(dpath2, 'p6tyohj.tgz')
# Mark the data as built.
build_data.mark_done(dpath)
diff --git a/parlai/tasks/mturkwikimovies/agents.py b/parlai/tasks/mturkwikimovies/agents.py
--- a/parlai/tasks/mturkwikimovies/agents.py
+++ b/parlai/tasks/mturkwikimovies/agents.py
@@ -3,11 +3,12 @@
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
-import copy
from parlai.core.fbdialog_teacher import FbDialogTeacher
from .build import build
+import copy
+import os
def _path(opt, filtered):
# Build the data if it doesn't exist. It depends on wikimovies.
@@ -15,15 +16,15 @@ def _path(opt, filtered):
dt = opt['datatype'].split(':')[0]
if dt == 'valid':
dt = 'dev'
- return (opt['datapath'] + '/MTurkWikiMovies/mturkwikimovies/' +
- 'qa-{type}.txt'.format(type=dt))
+ return os.path.join(opt['datapath'], 'MTurkWikiMovies', 'mturkwikimovies',
+ 'qa-{type}.txt'.format(type=dt))
class DefaultTeacher(FbDialogTeacher):
def __init__(self, opt, shared=None):
opt = copy.deepcopy(opt)
opt['datafile'] = _path(opt, '')
- opt['cands_datafile'] = (opt['datapath'] +
- '/WikiMovies/movieqa/' +
- 'knowledge_source/entities.txt')
+ opt['cands_datafile'] = os.path.join(opt['datapath'], 'WikiMovies',
+ 'movieqa', 'knowledge_source',
+ 'entities.txt')
super().__init__(opt, shared)
diff --git a/parlai/tasks/mturkwikimovies/build.py b/parlai/tasks/mturkwikimovies/build.py
--- a/parlai/tasks/mturkwikimovies/build.py
+++ b/parlai/tasks/mturkwikimovies/build.py
@@ -7,21 +7,22 @@
import parlai.core.build_data as build_data
import parlai.tasks.wikimovies.build as wikimovies_build
+import os
def build(opt):
# Depends upon another dataset, wikimovies, build that first.
wikimovies_build.build(opt)
- dpath = opt['datapath'] + "/MTurkWikiMovies/"
+ dpath = os.path.join(opt['datapath'], 'MTurkWikiMovies')
if not build_data.built(dpath):
- print("[building data: " + dpath + "]")
+ print('[building data: ' + dpath + ']')
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# Download the data.
- fname = "mturkwikimovies.tar.gz"
- url = ("https://s3.amazonaws.com/fair-data/parlai/mturkwikimovies/"
+ fname = 'mturkwikimovies.tar.gz'
+ url = ('https://s3.amazonaws.com/fair-data/parlai/mturkwikimovies/'
+ fname)
build_data.download(dpath, url)
build_data.untar(dpath, fname)
diff --git a/parlai/tasks/opensubtitles/agents.py b/parlai/tasks/opensubtitles/agents.py
--- a/parlai/tasks/opensubtitles/agents.py
+++ b/parlai/tasks/opensubtitles/agents.py
@@ -3,19 +3,20 @@
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
-import copy
from parlai.core.fbdialog_teacher import FbDialogTeacher
from .build import build
+import copy
+import os
+
def _path(opt, filtered):
# Build the data if it doesn't exist.
build(opt)
dt = opt['datatype'].split(':')[0]
- return (opt['datapath'] + '/OpenSubtitles/' +
- '{type}.txt'.format(
- type=dt + filtered))
+ return os.path.join(opt['datapath'], 'OpenSubtitles',
+ dt + filtered + '.txt')
class DefaultTeacher(FbDialogTeacher):
diff --git a/parlai/tasks/opensubtitles/build.py b/parlai/tasks/opensubtitles/build.py
--- a/parlai/tasks/opensubtitles/build.py
+++ b/parlai/tasks/opensubtitles/build.py
@@ -5,57 +5,54 @@
# of patent rights can be found in the PATENTS file in the same directory.
# Download and build the data if it does not exist.
+import parlai.core.build_data as build_data
import codecs
import gzip
-import parlai.core.build_data as build_data
+import os
def create_fb_format(inpath, outpath):
- print("[building fbformat]")
- ftrain = open(outpath + 'train.txt', 'w')
- fvalid = open(outpath + 'valid.txt', 'w')
- ftest = open(outpath + 'test.txt', 'w')
+ print('[building fbformat]')
+ ftrain = open(os.path.join(outpath, 'train.txt'), 'w')
+ fvalid = open(os.path.join(outpath, 'valid.txt'), 'w')
+ ftest = open(os.path.join(outpath, 'test.txt'), 'w')
- # find all the files.
- import subprocess
- result = subprocess.run(['find', inpath],
- stdout=subprocess.PIPE)
- list = str(result.stdout).split('\\n')
-
conv_id = 0
- for f in list:
- if f[-3:] == '.gz':
- dialog = ''
- conv_id = conv_id + 1
- with gzip.open(f, 'r') as f1:
- print(str(conv_id) + ": " + f)
- words = ''
- line_id = 1
- turn_id = 1
- for line in f1:
- line = str(line)
- if line.find('<s id="') != -1:
- # new sentence
- if len(words) > 0:
- if (turn_id % 2) == 0:
- dialog = dialog + (str(line_id) + ' ' + words)
- else:
- dialog = dialog + ('\t' + words + '\n')
- line_id = line_id + 1
- turn_id = turn_id + 1
- words = ''
- else:
- i1 = line.find('<w id="')
- if i1 >= 0:
- line = line[i1:]
- word = line[line.find('>')+1:line.find('</w')]
- words = words + ' ' + word.replace('\t', ' ')
- handle = ftrain
- if (conv_id % 10) == 0:
- handle = ftest
- if (conv_id % 10) == 1:
- handle = fvalid
- handle.write(dialog + '\n')
+ # find all the files.
+ for root, subfolder, files in os.walk(inpath):
+ for f in files:
+ if f.endswith('.gz'):
+ dialog = ''
+ conv_id = conv_id + 1
+ with gzip.open(os.path.join(root, f), 'r') as f1:
+ # print(str(conv_id) + ': ' + f)
+ words = ''
+ line_id = 1
+ turn_id = 1
+ for line in f1:
+ line = str(line)
+ if line.find('<s id="') != -1:
+ # new sentence
+ if len(words) > 0:
+ if (turn_id % 2) == 0:
+ dialog += str(line_id) + ' ' + words
+ else:
+ dialog += '\t' + words + '\n'
+ line_id += 1
+ turn_id = turn_id + 1
+ words = ''
+ else:
+ i1 = line.find('<w id="')
+ if i1 >= 0:
+ line = line[i1:]
+ word = line[line.find('>')+1:line.find('</w')]
+ words = words + ' ' + word.replace('\t', ' ')
+ handle = ftrain
+ if (conv_id % 10) == 0:
+ handle = ftest
+ if (conv_id % 10) == 1:
+ handle = fvalid
+ handle.write(dialog + '\n')
ftrain.close()
fvalid.close()
@@ -63,20 +60,20 @@ def create_fb_format(inpath, outpath):
def build(opt):
- dpath = opt['datapath'] + "/OpenSubtitles/"
+ dpath = os.path.join(opt['datapath'], 'OpenSubtitles')
if not build_data.built(dpath):
- print("[building data: " + dpath + "]")
+ print('[building data: ' + dpath + ']')
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# Download the data.
- fname = "download.php?f=OpenSubtitles/en.tar.gz"
- url = ("http://opus.lingfil.uu.se/" + fname)
- build_data.download(dpath, url)
- build_data.untar(dpath, 'download.php?f=OpenSubtitles%2Fen.tar.gz')
+ fname = 'download.php?f=OpenSubtitles/en.tar.gz'
+ url = ('http://opus.lingfil.uu.se/' + fname)
+ build_data.download(os.path.join(dpath, 'OpenSubtitles.tar.gz'), url)
+ build_data.untar(dpath, 'OpenSubtitles.tar.gz')
- create_fb_format(dpath + '/OpenSubtitles/en/', dpath)
+ create_fb_format(os.path.join(dpath, 'OpenSubtitles', 'en'), dpath)
# Mark the data as built.
build_data.mark_done(dpath)
diff --git a/parlai/tasks/qacnn/agents.py b/parlai/tasks/qacnn/agents.py
--- a/parlai/tasks/qacnn/agents.py
+++ b/parlai/tasks/qacnn/agents.py
@@ -3,17 +3,18 @@
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
-import copy
from parlai.core.fbdialog_teacher import FbDialogTeacher
from .build import build
+import copy
+import os
def _path(opt):
# Build the data if it doesn't exist.
build(opt)
dt = opt['datatype'].split(':')[0]
- return '{}/QACNN/{}.txt'.format(opt['datapath'], dt)
+ return os.path.join(opt['datapath'], 'QACNN', dt + '.txt')
class DefaultTeacher(FbDialogTeacher):
diff --git a/parlai/tasks/qacnn/build.py b/parlai/tasks/qacnn/build.py
--- a/parlai/tasks/qacnn/build.py
+++ b/parlai/tasks/qacnn/build.py
@@ -6,6 +6,7 @@
# Download and build the data if it does not exist.
import parlai.core.build_data as build_data
+import os
def _process(fname, fout):
@@ -26,17 +27,15 @@ def _process(fname, fout):
def create_fb_format(outpath, dtype, inpath):
print('building fbformat:' + dtype)
- import os
- fout = open(outpath + dtype + '.txt', 'w')
- for file in os.listdir(inpath):
- if file.endswith('.question'):
- fname = os.path.join(inpath, file)
- _process(fname, fout)
- fout.close()
+ with open(os.path.join(outpath, dtype + '.txt'), 'w') as fout:
+ for f in os.listdir(inpath):
+ if f.endswith('.question'):
+ fname = os.path.join(inpath, f)
+ _process(fname, fout)
def build(opt):
- dpath = opt['datapath'] + '/QACNN/'
+ dpath = os.path.join(opt['datapath'], 'QACNN')
if not build_data.built(dpath):
print('[building data: ' + dpath + ']')
@@ -46,12 +45,15 @@ def build(opt):
# Download the data.
fname = 'cnn.tgz'
gd_id = '0BwmD_VLjROrfTTljRDVZMFJnVWM'
- build_data.download_file_from_google_drive(gd_id, dpath + fname)
+ build_data.download_from_google_drive(gd_id, os.path.join(dpath, fname))
build_data.untar(dpath, fname)
- create_fb_format(dpath, 'train', dpath + 'cnn/questions/training/')
- create_fb_format(dpath, 'valid', dpath + 'cnn/questions/validation/')
- create_fb_format(dpath, 'test', dpath + 'cnn/questions/test/')
+ create_fb_format(dpath, 'train',
+ os.path.join(dpath, 'cnn', 'questions', 'training'))
+ create_fb_format(dpath, 'valid',
+ os.path.join(dpath, 'cnn', 'questions', 'validation'))
+ create_fb_format(dpath, 'test',
+ os.path.join(dpath, 'cnn', 'questions', 'test'))
# Mark the data as built.
build_data.mark_done(dpath)
diff --git a/parlai/tasks/qadailymail/agents.py b/parlai/tasks/qadailymail/agents.py
--- a/parlai/tasks/qadailymail/agents.py
+++ b/parlai/tasks/qadailymail/agents.py
@@ -3,17 +3,19 @@
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
-import copy
from parlai.core.fbdialog_teacher import FbDialogTeacher
from .build import build
+import copy
+import os
+
def _path(opt):
# Build the data if it doesn't exist.
build(opt)
dt = opt['datatype'].split(':')[0]
- return '{}/QADailyMail/{}.txt'.format(opt['datapath'], dt)
+ return os.path.join(opt['datapath'], 'QADailyMail' , dt + '.txt')
class DefaultTeacher(FbDialogTeacher):
diff --git a/parlai/tasks/qadailymail/build.py b/parlai/tasks/qadailymail/build.py
--- a/parlai/tasks/qadailymail/build.py
+++ b/parlai/tasks/qadailymail/build.py
@@ -6,6 +6,7 @@
# Download and build the data if it does not exist.
import parlai.core.build_data as build_data
+import os
def _process(fname, fout):
@@ -26,17 +27,15 @@ def _process(fname, fout):
def create_fb_format(outpath, dtype, inpath):
print('building fbformat:' + dtype)
- import os
- fout = open(outpath + dtype + '.txt', 'w')
- for file in os.listdir(inpath):
- if file.endswith('.question'):
- fname = os.path.join(inpath, file)
- _process(fname, fout)
- fout.close()
+ with open(os.path.join(outpath, dtype + '.txt'), 'w') as fout:
+ for f in os.listdir(inpath):
+ if f.endswith('.question'):
+ fname = os.path.join(inpath, f)
+ _process(fname, fout)
def build(opt):
- dpath = opt['datapath'] + '/QADailyMail/'
+ dpath = os.path.join(opt['datapath'], 'QADailyMail')
if not build_data.built(dpath):
print('[building data: ' + dpath + ']')
@@ -46,13 +45,13 @@ def build(opt):
# Download the data.
fname = 'qadailymail.tar.gz'
gd_id = '0BwmD_VLjROrfN0xhTDVteGQ3eG8'
- build_data.download_file_from_google_drive(gd_id, dpath + fname)
+ build_data.download_from_google_drive(gd_id, os.path.join(dpath, fname))
build_data.untar(dpath, fname)
- ext = 'dailymail/questions/'
- create_fb_format(dpath, 'train', dpath + ext + 'training/')
- create_fb_format(dpath, 'valid', dpath + ext + 'validation/')
- create_fb_format(dpath, 'test', dpath + ext + 'test/')
+ ext = os.path.join('dailymail', 'questions')
+ create_fb_format(dpath, 'train', os.path.join(dpath, ext, 'training'))
+ create_fb_format(dpath, 'valid', os.path.join(dpath, ext, 'validation'))
+ create_fb_format(dpath, 'test', os.path.join(dpath, ext, 'test'))
# Mark the data as built.
build_data.mark_done(dpath)
diff --git a/parlai/tasks/simplequestions/agents.py b/parlai/tasks/simplequestions/agents.py
--- a/parlai/tasks/simplequestions/agents.py
+++ b/parlai/tasks/simplequestions/agents.py
@@ -3,17 +3,19 @@
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
-import copy
from parlai.core.fbdialog_teacher import FbDialogTeacher
from .build import build
+import copy
+import os
+
def _path(opt, filtered):
# Build the data if it doesn't exist.
build(opt)
dt = opt['datatype'].split(':')[0]
- return '{}/SimpleQuestions/sq/{}.txt'.format(opt['datapath'], dt)
+ return os.path.join(opt['datapath'], 'SimpleQuestions', 'sq', dt + '.txt')
class DefaultTeacher(FbDialogTeacher):
diff --git a/parlai/tasks/simplequestions/build.py b/parlai/tasks/simplequestions/build.py
--- a/parlai/tasks/simplequestions/build.py
+++ b/parlai/tasks/simplequestions/build.py
@@ -6,19 +6,20 @@
# Download and build the data if it does not exist.
import parlai.core.build_data as build_data
+import os
def build(opt):
- dpath = opt['datapath'] + "/SimpleQuestions/"
+ dpath = os.path.join(opt['datapath'], 'SimpleQuestions')
if not build_data.built(dpath):
- print("[building data: " + dpath + "]")
+ print('[building data: ' + dpath + ']')
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# Download the data.
- fname = "simplequestions.tar.gz"
- url = ("https://s3.amazonaws.com/fair-data/parlai/simplequestions/"
+ fname = 'simplequestions.tar.gz'
+ url = ('https://s3.amazonaws.com/fair-data/parlai/simplequestions/'
+ fname)
build_data.download(dpath, url)
build_data.untar(dpath, fname)
diff --git a/parlai/tasks/squad/agents.py b/parlai/tasks/squad/agents.py
--- a/parlai/tasks/squad/agents.py
+++ b/parlai/tasks/squad/agents.py
@@ -3,12 +3,15 @@
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
-import json
-import random
+
from parlai.core.agents import Teacher
from parlai.core.dialog_teacher import DialogTeacher
from .build import build
+import json
+import random
+import os
+
class HandwrittenTeacher(Teacher):
"""Hand-written SQuAD teacher, which loads the json squad data and
@@ -24,9 +27,7 @@ def __init__(self, opt, shared=None):
suffix = 'train'
else:
suffix = 'dev'
- datapath = (
- opt['datapath'] + 'SQuAD/' +
- suffix + '-v1.1.json')
+ datapath = os.path.join(opt['datapath'], 'SQuAD', suffix + '-v1.1.json')
self.data = self._setup_data(datapath)
self.episode_idx = -1
super().__init__(opt, shared)
@@ -89,10 +90,8 @@ def __init__(self, opt, shared=None):
suffix = 'train'
else:
suffix = 'dev'
- opt['datafile'] = (
- opt['datapath'] + 'SQuAD/' +
- suffix + '-v1.1.json'
- )
+ opt['datafile'] = os.path.join(opt['datapath'], 'SQuAD',
+ suffix + '-v1.1.json')
self.id = 'squad'
super().__init__(opt, shared)
diff --git a/parlai/tasks/squad/build.py b/parlai/tasks/squad/build.py
--- a/parlai/tasks/squad/build.py
+++ b/parlai/tasks/squad/build.py
@@ -6,20 +6,21 @@
# Download and build the data if it does not exist.
import parlai.core.build_data as build_data
+import os
def build(opt):
- dpath = opt['datapath'] + "/SQuAD/"
+ dpath = os.path.join(opt['datapath'], 'SQuAD')
if not build_data.built(dpath):
- print("[building data: " + dpath + "]")
+ print('[building data: ' + dpath + ']')
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# Download the data.
- fname1 = "train-v1.1.json"
- fname2 = "dev-v1.1.json"
- url = "https://rajpurkar.github.io/SQuAD-explorer/dataset/"
+ fname1 = 'train-v1.1.json'
+ fname2 = 'dev-v1.1.json'
+ url = 'https://rajpurkar.github.io/SQuAD-explorer/dataset/'
build_data.download(dpath, url + fname1)
build_data.download(dpath, url + fname2)
diff --git a/parlai/tasks/ubuntu/agents.py b/parlai/tasks/ubuntu/agents.py
--- a/parlai/tasks/ubuntu/agents.py
+++ b/parlai/tasks/ubuntu/agents.py
@@ -4,10 +4,12 @@
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
-import csv
from parlai.core.dialog_teacher import DialogTeacher
from .build import build
+import csv
+import os
+
class DefaultTeacher(DialogTeacher):
"""This teacher inherits from the core Dialog Teacher, which just
@@ -19,10 +21,8 @@ class DefaultTeacher(DialogTeacher):
def __init__(self, opt, shared=None):
self.datatype = opt['datatype']
build(opt)
- opt['datafile'] = (
- opt['datapath'] + 'Ubuntu/' +
- opt['datatype'].split(':')[0] + '.csv'
- )
+ opt['datafile'] = os.path.join(opt['datapath'], 'Ubuntu',
+ opt['datatype'].split(':')[0] + '.csv')
super().__init__(opt, shared)
def setup_data(self, path):
diff --git a/parlai/tasks/ubuntu/build.py b/parlai/tasks/ubuntu/build.py
--- a/parlai/tasks/ubuntu/build.py
+++ b/parlai/tasks/ubuntu/build.py
@@ -11,7 +11,7 @@
def build(opt):
- dpath = opt['datapath'] + '/Ubuntu/'
+ dpath = os.path.join(opt['datapath'], 'Ubuntu')
if not build_data.built(dpath):
print('[building data: ' + dpath + ']')
diff --git a/parlai/tasks/vqa_coco2014/agents.py b/parlai/tasks/vqa_coco2014/agents.py
--- a/parlai/tasks/vqa_coco2014/agents.py
+++ b/parlai/tasks/vqa_coco2014/agents.py
@@ -3,41 +3,40 @@
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
-import json
-import random
-from PIL import Image
from parlai.core.agents import Teacher
from .build import build, buildImage
+from PIL import Image
+import json
+import random
+import os
+
def _path(opt):
build(opt)
dt = opt['datatype'].split(':')[0]
- if dt == "train":
+ if dt == 'train':
ques_suffix = 'MultipleChoice_mscoco_train2014'
annotation_suffix = 'mscoco_train2014'
- img_suffix = 'train2014/COCO_train2014_'
- elif dt == "valid":
+ img_suffix = os.path.join('train2014', 'COCO_train2014_')
+ elif dt == 'valid':
ques_suffix = 'MultipleChoice_mscoco_val2014'
annotation_suffix = 'mscoco_val2014'
- img_suffix = 'val2014/COCO_val2014_'
+ img_suffix = os.path.join('val2014', 'COCO_val2014_')
else:
ques_suffix = 'MultipleChoice_mscoco_test2015'
annotation_suffix = 'None'
- img_suffix = 'test2014/COCO_test2014_'
+ img_suffix = os.path.join('test2014', 'COCO_test2014_')
- data_path = (
- opt['datapath'] + "/VQA-COCO2014/" +
+ data_path = os.path.join(opt['datapath'], 'VQA-COCO2014',
ques_suffix + '_questions.json')
- annotation_path = (
- opt['datapath'] + "/VQA-COCO2014/" +
+ annotation_path = os.path.join(opt['datapath'], 'VQA-COCO2014',
annotation_suffix + '_annotations.json')
- image_path = (
- opt['datapath'] + "/VQA-COCO2014/" + img_suffix)
+ image_path = os.path.join(opt['datapath'], 'VQA-COCO2014', img_suffix)
return data_path, annotation_path, image_path
@@ -67,7 +66,6 @@ def __len__(self):
# return state/action dict based upon passed state
def act(self):
-
if self.datatype == 'train':
self.episode_idx = random.randrange(self.len)
else:
@@ -94,12 +92,12 @@ def act(self):
}
def _setup_data(self, data_path, annotation_path, image_path):
- print("loading: " + data_path)
+ print('loading: ' + data_path)
with open(data_path) as data_file:
self.ques = json.load(data_file)
if self.datatype != 'test':
- print("loading: " + annotation_path)
+ print('loading: ' + annotation_path)
with open(annotation_path) as data_file:
self.annotation = json.load(data_file)
@@ -119,12 +117,12 @@ def __init__(self, opt, shared=None):
data_path, annotation_path, image_path = _path(opt)
self._setup_data(data_path, annotation_path, image_path)
self.episode_idx = -1
+
def __len__(self):
return self.len
# return state/action dict based upon passed state
def act(self):
-
if self.datatype == 'train':
self.episode_idx = random.randrange(self.len)
else:
@@ -153,13 +151,12 @@ def act(self):
}
def _setup_data(self, data_path, annotation_path, image_path):
-
- print("loading: " + data_path)
+ print('loading: ' + data_path)
with open(data_path) as data_file:
self.ques = json.load(data_file)
if self.datatype != 'test':
- print("loading: " + annotation_path)
+ print('loading: ' + annotation_path)
with open(annotation_path) as data_file:
self.annotation = json.load(data_file)
diff --git a/parlai/tasks/vqa_coco2014/build.py b/parlai/tasks/vqa_coco2014/build.py
--- a/parlai/tasks/vqa_coco2014/build.py
+++ b/parlai/tasks/vqa_coco2014/build.py
@@ -6,21 +6,22 @@
# Download and build the data if it does not exist.
import parlai.core.build_data as build_data
+import os
def buildImage(dpath):
- print("[building image data: " + dpath + "]")
+ print('[building image data: ' + dpath + ']')
# download the image data.
- fname1 = "train2014.zip"
- fname2 = "val2014.zip"
- fname3 = "test2014.zip"
+ fname1 = 'train2014.zip'
+ fname2 = 'val2014.zip'
+ fname3 = 'test2014.zip'
- url = "http://msvocds.blob.core.windows.net/coco2014/"
+ url = 'http://msvocds.blob.core.windows.net/coco2014/'
build_data.download(dpath, url + fname1)
build_data.download(dpath, url + fname2)
build_data.download(dpath, url + fname3)
-
+
build_data.untar(dpath, fname1, False)
build_data.untar(dpath, fname2, False)
build_data.untar(dpath, fname3, False)
@@ -28,22 +29,22 @@ def buildImage(dpath):
def build(opt):
- dpath = opt['datapath'] + "/VQA-COCO2014/"
+ dpath = os.path.join(opt['datapath'], 'VQA-COCO2014')
if not build_data.built(dpath):
- print("[building data: " + dpath + "]")
+ print('[building data: ' + dpath + ']')
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# Download the data.
- fname1 = "Questions_Train_mscoco.zip"
- fname2 = "Questions_Val_mscoco.zip"
- fname3 = "Questions_Test_mscoco.zip"
+ fname1 = 'Questions_Train_mscoco.zip'
+ fname2 = 'Questions_Val_mscoco.zip'
+ fname3 = 'Questions_Test_mscoco.zip'
- fname4 = "Annotations_Val_mscoco.zip"
- fname5 = "Annotations_Train_mscoco.zip"
+ fname4 = 'Annotations_Val_mscoco.zip'
+ fname5 = 'Annotations_Train_mscoco.zip'
- url = "http://visualqa.org/data/mscoco/vqa/"
+ url = 'http://visualqa.org/data/mscoco/vqa/'
build_data.download(dpath, url + fname1)
build_data.download(dpath, url + fname2)
build_data.download(dpath, url + fname3)
@@ -61,7 +62,3 @@ def build(opt):
# Mark the data as built.
build_data.mark_done(dpath)
-
-
-
-
diff --git a/parlai/tasks/webquestions/agents.py b/parlai/tasks/webquestions/agents.py
--- a/parlai/tasks/webquestions/agents.py
+++ b/parlai/tasks/webquestions/agents.py
@@ -3,17 +3,19 @@
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
-import copy
from parlai.core.fbdialog_teacher import FbDialogTeacher
from .build import build
+import copy
+import os
+
def _path(opt):
# Build the data if it doesn't exist.
build(opt)
dt = opt['datatype'].split(':')[0]
- return '{}/WebQuestions/{}.txt'.format(opt['datapath'], dt)
+ return os.path.join(opt['datapath'], 'WebQuestions', dt + '.txt')
class DefaultTeacher(FbDialogTeacher):
def __init__(self, opt, shared=None):
diff --git a/parlai/tasks/webquestions/build.py b/parlai/tasks/webquestions/build.py
--- a/parlai/tasks/webquestions/build.py
+++ b/parlai/tasks/webquestions/build.py
@@ -5,9 +5,10 @@
# of patent rights can be found in the PATENTS file in the same directory.
# Download and build the data if it does not exist.
-import re
-import json
import parlai.core.build_data as build_data
+import json
+import os
+import re
def parse_ans(a):
@@ -18,11 +19,12 @@ def parse_ans(a):
ans = ans + '|' + a
return ans.lstrip('|')
+
def create_fb_format(outpath, dtype, inpath):
- print("building fbformat:" + dtype)
+ print('building fbformat:' + dtype)
with open(inpath) as data_file:
data = json.load(data_file)
- fout = open(outpath + dtype + '.txt', 'w')
+ fout = open(os.path.join(outpath, dtype + '.txt'), 'w')
for i in range(len(data)):
q = data[i]['utterance']
a = parse_ans(data[i]['targetValue'])
@@ -32,27 +34,25 @@ def create_fb_format(outpath, dtype, inpath):
def build(opt):
- dpath = opt['datapath'] + "/WebQuestions/"
+ dpath = os.path.join(opt['datapath'], 'WebQuestions')
if not build_data.built(dpath):
- print("[building data: " + dpath + "]")
+ print('[building data: ' + dpath + ']')
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# Download the data.
- url = ("https://worksheets.codalab.org/rest/bundles/" +
- "0x4a763f8cde224c2da592b75f29e2f5c2/contents/blob/")
- build_data.download(dpath, url)
- build_data.move(dpath + 'index.html', dpath + 'train.json')
-
- url = ("https://worksheets.codalab.org/rest/bundles/" +
- "0xe7bac352fce7448c9ef238fb0a297ec2/contents/blob/")
- build_data.download(dpath, url)
- build_data.move(dpath + 'index.html', dpath + 'test.json')
-
- create_fb_format(dpath, 'train', dpath + 'train.json')
- create_fb_format(dpath, 'valid', dpath + 'train.json')
- create_fb_format(dpath, 'test', dpath + 'test.json')
+ url = ('https://worksheets.codalab.org/rest/bundles/' +
+ '0x4a763f8cde224c2da592b75f29e2f5c2/contents/blob/')
+ build_data.download(os.path.join(dpath, 'train.json'), url)
+
+ url = ('https://worksheets.codalab.org/rest/bundles/' +
+ '0xe7bac352fce7448c9ef238fb0a297ec2/contents/blob/')
+ build_data.download(os.path.join(dpath, 'test.json'), url)
+
+ create_fb_format(dpath, 'train', os.path.join(dpath, 'train.json'))
+ create_fb_format(dpath, 'valid', os.path.join(dpath, 'train.json'))
+ create_fb_format(dpath, 'test', os.path.join(dpath, 'test.json'))
# Mark the data as built.
build_data.mark_done(dpath)
diff --git a/parlai/tasks/wikimovies/agents.py b/parlai/tasks/wikimovies/agents.py
--- a/parlai/tasks/wikimovies/agents.py
+++ b/parlai/tasks/wikimovies/agents.py
@@ -3,11 +3,13 @@
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
-import copy
from parlai.core.fbdialog_teacher import FbDialogTeacher
from .build import build
+import copy
+import os
+
def _path(opt):
build(opt)
@@ -19,10 +21,9 @@ def _path(opt):
suffix = 'test'
elif dt == 'valid':
suffix = 'dev'
- return (opt['datapath'] + '/WikiMovies/' +
- 'movieqa/questions/wiki_entities/' +
- 'wiki-entities_qa_{suffix}.txt'.format(
- suffix=suffix))
+ return os.path.join(opt['datapath'], 'WikiMovies', 'movieqa', 'questions',
+ 'wiki_entities',
+ 'wiki-entities_qa_{suffix}.txt'.format(suffix=suffix))
# The knowledge base of facts that can be used to answer questions.
@@ -32,11 +33,11 @@ def __init__(self, opt, shared=None):
task = opt.get('task', 'wikimovies:KB:kb')
kb = task.split(':')[2]
kbs = {}
- kbs['kb'] = 'wiki_entities/wiki_entities_kb.txt'
+ kbs['kb'] = os.path.join('wiki_entities', 'wiki_entities_kb.txt')
kbs['wiki'] = 'wiki.txt'
kbs['ie'] = 'wiki_ie.txt'
- opt['datafile'] = (opt['datapath'] + '/WikiMovies/movieqa/' +
- 'knowledge_source/' + kbs[kb])
+ opt['datafile'] = os.path.join(opt['datapath'], 'WikiMovies', 'movieqa',
+ 'knowledge_source', kbs[kb])
super().__init__(opt, shared)
@@ -46,7 +47,7 @@ def __init__(self, opt, shared=None):
build(opt)
opt = copy.deepcopy(opt)
opt['datafile'] = _path(opt)
- opt['cands_datafile'] = (opt['datapath'] +
- '/WikiMovies/movieqa/' +
- 'knowledge_source/entities.txt')
+ opt['cands_datafile'] = os.path.join(opt['datapath'], 'WikiMovies',
+ 'movieqa', 'knowledge_source',
+ 'entities.txt')
super().__init__(opt, shared)
diff --git a/parlai/tasks/wikimovies/build.py b/parlai/tasks/wikimovies/build.py
--- a/parlai/tasks/wikimovies/build.py
+++ b/parlai/tasks/wikimovies/build.py
@@ -6,19 +6,20 @@
# Download and build the data if it does not exist.
import parlai.core.build_data as build_data
+import os
def build(opt):
- dpath = opt['datapath'] + "/WikiMovies/"
+ dpath = os.path.join(opt['datapath'], 'WikiMovies')
if not build_data.built(dpath):
- print("[building data: " + dpath + "]")
+ print('[building data: ' + dpath + ']')
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# Download the data.
- fname = "wikimovies.tar.gz"
- url = "https://s3.amazonaws.com/fair-data/parlai/wikimovies/" + fname
+ fname = 'wikimovies.tar.gz'
+ url = 'https://s3.amazonaws.com/fair-data/parlai/wikimovies/' + fname
build_data.download(dpath, url)
build_data.untar(dpath, fname)
diff --git a/parlai/tasks/wikiqa/agents.py b/parlai/tasks/wikiqa/agents.py
--- a/parlai/tasks/wikiqa/agents.py
+++ b/parlai/tasks/wikiqa/agents.py
@@ -3,18 +3,19 @@
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
-import copy
from parlai.core.fbdialog_teacher import FbDialogTeacher
from .build import build
+import copy
+import os
+
def _path(opt, filtered):
# Build the data if it doesn't exist.
build(opt)
dt = opt['datatype'].split(':')[0]
- return (opt['datapath'] + '/WikiQA/' +
- '{type}.txt'.format(type=dt + filtered))
+ return os.path.join(opt['datapath'], 'WikiQA', dt + filtered + '.txt')
class FilteredTeacher(FbDialogTeacher):
diff --git a/parlai/tasks/wikiqa/build.py b/parlai/tasks/wikiqa/build.py
--- a/parlai/tasks/wikiqa/build.py
+++ b/parlai/tasks/wikiqa/build.py
@@ -6,11 +6,12 @@
# Download and build the data if it does not exist.
import parlai.core.build_data as build_data
+import os
def create_fb_format(outpath, dtype, inpath):
- print("building fbformat:" + dtype)
- fout = open(outpath + dtype + '.txt', 'w')
+ print('building fbformat:' + dtype)
+ fout = open(os.path.join(outpath, dtype + '.txt'), 'w')
with open(inpath) as f:
lines = [line.strip('\n') for line in f]
lastqid = None
@@ -37,26 +38,32 @@ def create_fb_format(outpath, dtype, inpath):
def build(opt):
- dpath = opt['datapath'] + "/WikiQA/"
+ dpath = os.path.join(opt['datapath'], 'WikiQA')
if not build_data.built(dpath):
- print("[building data: " + dpath + "]")
+ print('[building data: ' + dpath + ']')
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# Download the data.
- fname = "wikiqa.tar.gz"
- url = "https://s3.amazonaws.com/fair-data/parlai/wikiqa/" + fname
+ fname = 'wikiqa.tar.gz'
+ url = 'https://s3.amazonaws.com/fair-data/parlai/wikiqa/' + fname
build_data.download(dpath, url)
build_data.untar(dpath, fname)
- dpext = dpath + 'WikiQACorpus/'
- create_fb_format(dpath, 'train', dpext + 'WikiQA-train.tsv')
- create_fb_format(dpath, 'valid', dpext + 'WikiQA-dev.tsv')
- create_fb_format(dpath, 'test', dpext + 'WikiQA-test.tsv')
- create_fb_format(dpath, 'train-filtered', dpext + 'WikiQA-train.tsv')
- create_fb_format(dpath, 'valid-filtered', dpext + 'WikiQA-dev.tsv')
- create_fb_format(dpath, 'test-filtered', dpext + 'WikiQA-test.tsv')
+ dpext = os.path.join(dpath, 'WikiQACorpus')
+ create_fb_format(dpath, 'train',
+ os.path.join(dpext, 'WikiQA-train.tsv'))
+ create_fb_format(dpath, 'valid',
+ os.path.join(dpext, 'WikiQA-dev.tsv'))
+ create_fb_format(dpath, 'test',
+ os.path.join(dpext, 'WikiQA-test.tsv'))
+ create_fb_format(dpath, 'train-filtered',
+ os.path.join(dpext, 'WikiQA-train.tsv'))
+ create_fb_format(dpath, 'valid-filtered',
+ os.path.join(dpext, 'WikiQA-dev.tsv'))
+ create_fb_format(dpath, 'test-filtered',
+ os.path.join(dpext, 'WikiQA-test.tsv'))
# Mark the data as built.
build_data.mark_done(dpath)
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -12,6 +12,9 @@
with open('LICENSE') as f:
license = f.read()
+with open('requirements.txt') as f:
+ reqs = f.read()
+
setup(
name='parlai',
version='0.1.0',
@@ -20,6 +23,6 @@
url='http://parl.ai/',
license=license,
packages=find_packages(exclude=(
- 'data', 'docs', 'downloads', 'examples', 'tests')),
- install_requires=['nltk'],
+ 'data', 'docs', 'downloads', 'examples', 'logs', 'tests')),
+ install_requires=reqs.strip().split('\n'),
)
| diff --git a/tests/check_examples.sh b/tests/check_examples.sh
--- a/tests/check_examples.sh
+++ b/tests/check_examples.sh
@@ -1,6 +1,12 @@
#!/bin/bash
-set -e # exit if any command fails
+# Copyright (c) 2017-present, Facebook, Inc.
+# All rights reserved.
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree. An additional grant
+# of patent rights can be found in the PATENTS file in the same directory.
+
+set -e # stop if any tests fail
cd ../examples/
python display_data.py -t babi:task1k:1
python base_train.py -t babi:task1k:1
diff --git a/tests/run_tests_long.sh b/tests/run_tests_long.sh
--- a/tests/run_tests_long.sh
+++ b/tests/run_tests_long.sh
@@ -6,4 +6,5 @@
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
+set -e # stop if any tests fail
python test_data.py
diff --git a/tests/run_tests_short.sh b/tests/run_tests_short.sh
--- a/tests/run_tests_short.sh
+++ b/tests/run_tests_short.sh
@@ -6,6 +6,7 @@
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
+set -e # stop if any tests fail
python test_import.py
python test_dict.py
python test_threadutils.py
| Path handling in tasks
Hello,
Thanks a lot for your work and for sharing your work with the world. I just started to play with the repository and I would like to know your opinion on the following point: the path are hard coded with a slash in string.
It lead to a few hiccups in the download files for example cornell movie and opensubtitles (which could be updated for the 2016 version).
I would suggest to replace the string concatenation of path with the elegant `os.path.join` built-in function to handle the system dependency.
| 2017-05-06T03:20:13Z | [] | [] |
|
facebookresearch/ParlAI | 476 | facebookresearch__ParlAI-476 | [
"474"
] | f16622ff0c36faa22b24a9eb372717067dca3036 | diff --git a/parlai/tasks/multinli/__init__.py b/parlai/tasks/multinli/__init__.py
new file mode 100644
--- /dev/null
+++ b/parlai/tasks/multinli/__init__.py
@@ -0,0 +1,5 @@
+# Copyright (c) 2017-present, Facebook, Inc.
+# All rights reserved.
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree. An additional grant
+# of patent rights can be found in the PATENTS file in the same directory.
diff --git a/parlai/tasks/multinli/agents.py b/parlai/tasks/multinli/agents.py
new file mode 100644
--- /dev/null
+++ b/parlai/tasks/multinli/agents.py
@@ -0,0 +1,73 @@
+# Copyright (c) 2017-present, Facebook, Inc.
+# All rights reserved.
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree. An additional grant
+# of patent rights can be found in the PATENTS file in the same directory.
+
+from parlai.core.teachers import DialogTeacher
+from .build import build
+
+import os
+import copy
+import json
+
+
+MULTINLI = 'MultiNLI'
+MULTINLI_VERSION = '1.0'
+MULTINLI_PREFIX = 'multinli_'
+MULTINLI_PREMISE_PREFIX = 'Premise: '
+MULTINLI_HYPO_PREFIX = 'Hypothesis: '
+MULTINLI_LABELS = ['entailment', 'contradiction', 'neutral']
+MULTINLI_PREMISE_KEY = 'sentence1'
+MULTINLI_HYPO_KEY = 'sentence2'
+MULTINLI_ANSWER_KEY = 'gold_label'
+
+
+def _path(opt):
+ build(opt)
+
+ dt = opt['datatype'].split(':')[0]
+
+ if dt == 'train':
+ suffix = 'train'
+ # Using matched set as valid and mismatched set as test
+ elif dt == 'valid':
+ suffix = 'dev_matched'
+ elif dt == 'test':
+ suffix = 'dev_mismatched'
+ else:
+ raise RuntimeError('Not valid datatype.')
+
+ data_path = os.path.join(opt['datapath'], MULTINLI,
+ MULTINLI_PREFIX + MULTINLI_VERSION,
+ MULTINLI_PREFIX + MULTINLI_VERSION +
+ '_' + suffix + '.jsonl')
+
+ return data_path
+
+
+class DefaultTeacher(DialogTeacher):
+ def __init__(self, opt, shared=None):
+ opt = copy.deepcopy(opt)
+ data_path = _path(opt)
+ opt['datafile'] = data_path
+ self.id = 'MultiNLI'
+
+ super().__init__(opt, shared)
+
+ def setup_data(self, path):
+ print('loading: ' + path)
+
+ with open(path, 'r') as data_file:
+ for pair_line in data_file:
+ pair = json.loads(pair_line)
+ premise = MULTINLI_PREMISE_PREFIX + pair[MULTINLI_PREMISE_KEY]
+ hypo = MULTINLI_HYPO_PREFIX + pair[MULTINLI_HYPO_KEY]
+ answer = [pair[MULTINLI_ANSWER_KEY]]
+
+ if answer == '-':
+ continue
+
+ question = premise + '\n' + hypo
+
+ yield (question, answer, None, MULTINLI_LABELS), True
diff --git a/parlai/tasks/multinli/build.py b/parlai/tasks/multinli/build.py
new file mode 100644
--- /dev/null
+++ b/parlai/tasks/multinli/build.py
@@ -0,0 +1,37 @@
+
+# Copyright (c) 2017-present, Facebook, Inc.
+# All rights reserved.
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree. An additional grant
+# of patent rights can be found in the PATENTS file in the same directory.
+
+import parlai.core.build_data as build_data
+import os
+
+
+MULTINLI_BASE_URL = 'https://www.nyu.edu/projects/bowman/multinli/'
+
+
+def build(opt):
+ dpath = os.path.join(opt['datapath'], 'MultiNLI')
+ version = '1.0'
+
+ if not build_data.built(dpath, version_string=version):
+ print('[building data: ' + dpath + ']')
+
+ if build_data.built(dpath):
+ # an older version exists, so remove these outdated files.
+ build_data.remove_dir(dpath)
+ build_data.make_dir(dpath)
+
+ # download the data.
+ fname = 'multinli_' + version + '.zip'
+ # dataset URL
+ url = MULTINLI_BASE_URL + fname
+ build_data.download(url, dpath, fname)
+
+ # uncompress it
+ build_data.untar(dpath, fname)
+
+ # mark the data as built
+ build_data.mark_done(dpath, version_string=version)
diff --git a/parlai/tasks/task_list.py b/parlai/tasks/task_list.py
--- a/parlai/tasks/task_list.py
+++ b/parlai/tasks/task_list.py
@@ -143,6 +143,13 @@
"tags": [ "All", "QA" ],
"description": "Closed-domain QA dataset asking MTurk-derived questions about movies, answerable from Wikipedia. From Li et al. '16. Link: https://arxiv.org/abs/1611.09823"
},
+ {
+ "id": "MultiNLI",
+ "display_name": "MultiNLI",
+ "task": "multinli",
+ "tags": [ "All", "Entailment" ],
+ "description": "A dataset designed for use in the development and evaluation of machine learning models for sentence understanding. Each example contains a premise and hypothesis. Model has to predict whether premise and hypothesis entail, contradict or are neutral to each other. From Williams et al. '17. Link: https://arxiv.org/abs/1704.05426"
+ },
{
"id": "OpenSubtitles",
"display_name": "Open Subtitles",
| diff --git a/tests/test_downloads.py b/tests/test_downloads.py
--- a/tests/test_downloads.py
+++ b/tests/test_downloads.py
@@ -449,6 +449,29 @@ def test_ms_marco(self):
shutil.rmtree(self.TMP_PATH)
+ def test_multinli(self):
+ from parlai.core.params import ParlaiParser
+ from parlai.tasks.multinli.agents import DefaultTeacher
+
+ opt = ParlaiParser().parse_args(args=self.args)
+
+ for dt in ['train', 'valid', 'test']:
+ opt['datatype'] = dt
+
+ teacher = DefaultTeacher(opt)
+ reply = teacher.act()
+ check(opt, reply)
+ assert len(reply.get('label_candidates')) == 3
+ assert reply.get('text').find('Premise') != -1
+ assert reply.get('text').find('Hypothesis') != -1
+
+ if dt == 'train':
+ assert reply.get('labels')[0] in ['entailment',
+ 'contradiction',
+ 'neutral']
+
+ shutil.rmtree(self.TMP_PATH)
+
if __name__ == '__main__':
# clean out temp dir first
| MultiNLI support
Do we want support for MultiNLI? https://www.nyu.edu/projects/bowman/multinli/
I can open up a PR for this.
| Yes, sure! | 2018-01-03T19:06:50Z | [] | [] |
facebookresearch/ParlAI | 482 | facebookresearch__ParlAI-482 | [
"481"
] | f9ee62e2c444dac631382cbf51de7d002c5a986d | diff --git a/parlai/tasks/multinli/agents.py b/parlai/tasks/multinli/agents.py
--- a/parlai/tasks/multinli/agents.py
+++ b/parlai/tasks/multinli/agents.py
@@ -46,6 +46,24 @@ def _path(opt):
return data_path
+def setup_data(path):
+ print('loading: ' + path)
+
+ with open(path, 'r') as data_file:
+ for pair_line in data_file:
+ pair = json.loads(pair_line)
+ premise = MULTINLI_PREMISE_PREFIX + pair[MULTINLI_PREMISE_KEY]
+ hypo = MULTINLI_HYPO_PREFIX + pair[MULTINLI_HYPO_KEY]
+ answer = [pair[MULTINLI_ANSWER_KEY]]
+
+ if answer == '-':
+ continue
+
+ question = premise + '\n' + hypo
+
+ yield (question, answer, None, MULTINLI_LABELS), True
+
+
class DefaultTeacher(DialogTeacher):
def __init__(self, opt, shared=None):
opt = copy.deepcopy(opt)
@@ -56,18 +74,4 @@ def __init__(self, opt, shared=None):
super().__init__(opt, shared)
def setup_data(self, path):
- print('loading: ' + path)
-
- with open(path, 'r') as data_file:
- for pair_line in data_file:
- pair = json.loads(pair_line)
- premise = MULTINLI_PREMISE_PREFIX + pair[MULTINLI_PREMISE_KEY]
- hypo = MULTINLI_HYPO_PREFIX + pair[MULTINLI_HYPO_KEY]
- answer = [pair[MULTINLI_ANSWER_KEY]]
-
- if answer == '-':
- continue
-
- question = premise + '\n' + hypo
-
- yield (question, answer, None, MULTINLI_LABELS), True
+ return setup_data(path)
diff --git a/parlai/tasks/snli/__init__.py b/parlai/tasks/snli/__init__.py
new file mode 100644
--- /dev/null
+++ b/parlai/tasks/snli/__init__.py
@@ -0,0 +1,5 @@
+# Copyright (c) 2017-present, Facebook, Inc.
+# All rights reserved.
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree. An additional grant
+# of patent rights can be found in the PATENTS file in the same directory.
diff --git a/parlai/tasks/snli/agents.py b/parlai/tasks/snli/agents.py
new file mode 100644
--- /dev/null
+++ b/parlai/tasks/snli/agents.py
@@ -0,0 +1,52 @@
+# Copyright (c) 2017-present, Facebook, Inc.
+# All rights reserved.
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree. An additional grant
+# of patent rights can be found in the PATENTS file in the same directory.
+
+from parlai.core.teachers import DialogTeacher
+from parlai.tasks.multinli.agents import setup_data
+from .build import build
+
+import os
+import copy
+import json
+
+
+SNLI = 'SNLI'
+SNLI_VERSION = '1.0'
+SNLI_PREFIX = 'snli_'
+
+
+def _path(opt):
+ build(opt)
+
+ dt = opt['datatype'].split(':')[0]
+
+ if dt == 'train':
+ suffix = 'train'
+ elif dt == 'valid':
+ suffix = 'dev'
+ elif dt == 'test':
+ suffix = 'test'
+ else:
+ raise RuntimeError('Not valid datatype.')
+
+ data_path = os.path.join(opt['datapath'], SNLI,
+ SNLI_PREFIX + SNLI_VERSION,
+ SNLI_PREFIX + SNLI_VERSION +
+ '_' + suffix + '.jsonl')
+
+ return data_path
+
+
+class DefaultTeacher(DialogTeacher):
+ def __init__(self, opt, shared=None):
+ opt = copy.deepcopy(opt)
+ data_path = _path(opt)
+ opt['datafile'] = data_path
+ self.id = 'SNLI'
+ super().__init__(opt, shared)
+
+ def setup_data(self, path):
+ return setup_data(path)
diff --git a/parlai/tasks/snli/build.py b/parlai/tasks/snli/build.py
new file mode 100644
--- /dev/null
+++ b/parlai/tasks/snli/build.py
@@ -0,0 +1,37 @@
+
+# Copyright (c) 2017-present, Facebook, Inc.
+# All rights reserved.
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree. An additional grant
+# of patent rights can be found in the PATENTS file in the same directory.
+
+import parlai.core.build_data as build_data
+import os
+
+
+SNLI_BASE_URL = 'https://nlp.stanford.edu/projects/snli/'
+
+
+def build(opt):
+ dpath = os.path.join(opt['datapath'], 'SNLI')
+ version = '1.0'
+
+ if not build_data.built(dpath, version_string=version):
+ print('[building data: ' + dpath + ']')
+
+ if build_data.built(dpath):
+ # an older version exists, so remove these outdated files.
+ build_data.remove_dir(dpath)
+ build_data.make_dir(dpath)
+
+ # download the data.
+ fname = 'snli_' + version + '.zip'
+ # dataset URL
+ url = SNLI_BASE_URL + fname
+ build_data.download(url, dpath, fname)
+
+ # uncompress it
+ build_data.untar(dpath, fname)
+
+ # mark the data as built
+ build_data.mark_done(dpath, version_string=version)
diff --git a/parlai/tasks/task_list.py b/parlai/tasks/task_list.py
--- a/parlai/tasks/task_list.py
+++ b/parlai/tasks/task_list.py
@@ -192,6 +192,13 @@
"tags": [ "All", "QA" ],
"description": "Open-domain QA dataset based on Freebase triples from Bordes et al. '15. Link: https://arxiv.org/abs/1506.02075"
},
+ {
+ "id": "SNLI",
+ "display_name": "The Stanford Natural Language Inference (SNLI) Corpus",
+ "task": "snli",
+ "tags": [ "All", "Entailment" ],
+ "description": "The SNLI corpus (version 1.0) is a collection of 570k human-written English sentence pairs manually labeled for balanced classification with the labels entailment, contradiction, and neutral, supporting the task of natural language inference (NLI), also known as recognizing textual entailment (RTE). See https://nlp.stanford.edu/projects/snli/"
+ },
{
"id": "SQuAD",
"display_name": "SQuAD",
| diff --git a/tests/test_downloads.py b/tests/test_downloads.py
--- a/tests/test_downloads.py
+++ b/tests/test_downloads.py
@@ -472,6 +472,29 @@ def test_multinli(self):
shutil.rmtree(self.TMP_PATH)
+ def test_snli(self):
+ from parlai.core.params import ParlaiParser
+ from parlai.tasks.snli.agents import DefaultTeacher
+
+ opt = ParlaiParser().parse_args(args=self.args)
+
+ for dt in ['train', 'valid', 'test']:
+ opt['datatype'] = dt
+
+ teacher = DefaultTeacher(opt)
+ reply = teacher.act()
+ check(opt, reply)
+ assert len(reply.get('label_candidates')) == 3
+ assert reply.get('text').find('Premise') != -1
+ assert reply.get('text').find('Hypothesis') != -1
+
+ if dt == 'train':
+ assert reply.get('labels')[0] in ['entailment',
+ 'contradiction',
+ 'neutral']
+
+ shutil.rmtree(self.TMP_PATH)
+
if __name__ == '__main__':
# clean out temp dir first
| SNLI support
https://nlp.stanford.edu/projects/snli/
| 2018-01-04T10:12:29Z | [] | [] |
|
facebookresearch/ParlAI | 487 | facebookresearch__ParlAI-487 | [
"475"
] | 2fbd477ad66a5b33439203273e850b6419c77188 | diff --git a/parlai/tasks/narrative_qa/__init__.py b/parlai/tasks/narrative_qa/__init__.py
new file mode 100644
--- /dev/null
+++ b/parlai/tasks/narrative_qa/__init__.py
@@ -0,0 +1,5 @@
+# Copyright (c) 2017-present, Facebook, Inc.
+# All rights reserved.
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree. An additional grant
+# of patent rights can be found in the PATENTS file in the same directory.
diff --git a/parlai/tasks/narrative_qa/agents.py b/parlai/tasks/narrative_qa/agents.py
new file mode 100644
--- /dev/null
+++ b/parlai/tasks/narrative_qa/agents.py
@@ -0,0 +1,135 @@
+# Copyright (c) 2017-present, Facebook, Inc.
+# All rights reserved.
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree. An additional grant
+# of patent rights can be found in the PATENTS file in the same directory.
+
+from parlai.core.teachers import DialogTeacher
+from .build import build
+
+import os
+import copy
+import csv
+import glob
+
+
+def _path(opt):
+ build(opt)
+
+ dt = opt['datatype'].split(':')[0]
+
+ if not (dt == 'train' or dt == 'valid' or dt == 'test'):
+ raise RuntimeError('Not valid datatype.')
+
+ suffix = dt
+
+ data_path = os.path.join(opt['datapath'], 'NarrativeQA',
+ 'narrative_qa', suffix)
+
+ return data_path
+
+
+class SummariesTeacher(DialogTeacher):
+ def __init__(self, opt, shared=None):
+ opt = copy.deepcopy(opt)
+ data_path = _path(opt)
+ opt['datafile'] = data_path
+ self.id = 'NarrativeQA'
+
+ super().__init__(opt, shared)
+
+ def setup_data(self, path):
+ print('loading data from: ' + path)
+
+ qa_path = os.path.join(path, 'qaps.csv')
+ summaries_path = os.path.join(path, 'summaries.csv')
+
+ qa_pairs = dict()
+
+ with open(qa_path, 'r') as f:
+ reader = csv.DictReader(f)
+ for row in reader:
+ if row['document_id'] not in qa_pairs:
+ qa_pairs[row['document_id']] = []
+ qa_pairs[row['document_id']].append(row)
+
+ with open(summaries_path, 'r') as f:
+ reader = csv.DictReader(f)
+
+ for row in reader:
+ info = 'Summary: %s' % row['summary_tokenized']
+
+ for i, qa in enumerate(qa_pairs[row['document_id']]):
+ question = qa['question_tokenized']
+ answer1 = qa['answer1_tokenized']
+ answer2 = qa['answer2_tokenized']
+
+ if i == 0:
+ # Prepend start info in first question
+ yield (info + '\n' + question,
+ [answer1, answer2]), True
+ else:
+ yield (question, [answer1, answer2]), False
+
+
+class DefaultTeacher(DialogTeacher):
+ def __init__(self, opt, shared=None):
+ opt = copy.deepcopy(opt)
+ data_path = _path(opt)
+ opt['datafile'] = data_path
+ self.id = 'NarrativeQA'
+
+ super().__init__(opt, shared)
+
+ def setup_data(self, path):
+ print('loading data from: ' + path)
+
+ qa_path = os.path.join(path, 'qaps.csv')
+ documents_path = os.path.join(path, 'documents.csv')
+
+ stories_base_path = os.path.join(path, '..', 'stories')
+ qa_pairs = dict()
+
+ print("%s stories found." %
+ len(glob.glob(os.path.join(stories_base_path, "*.content"))))
+
+ with open(qa_path, 'r') as f:
+ reader = csv.DictReader(f)
+ for row in reader:
+ if row['document_id'] not in qa_pairs:
+ qa_pairs[row['document_id']] = []
+ qa_pairs[row['document_id']].append(row)
+
+ with open(documents_path, 'r') as f:
+ reader = csv.DictReader(f)
+
+ for row in reader:
+ story_path = os.path.join(stories_base_path,
+ row['document_id'] + '.content')
+
+ if not os.path.exists(story_path):
+ continue
+
+ story = None
+ with open(story_path, 'r', encoding='utf-8',
+ errors='ignore') as f:
+ story = f.read().strip()
+
+ info = 'Title: %s' % row['wiki_title']
+ info += '\nKind: %s' % row['kind']
+ info += '\nStory url: %s' % row['story_url']
+ info += '\nStory start: %s' % row['story_start']
+ info += '\nStory end: %s' % row['story_end']
+ info += '\nStory: %s' % story
+
+ for i, qa in enumerate(qa_pairs[row['document_id']]):
+ question = qa['question_tokenized']
+ answer1 = qa['answer1_tokenized']
+ answer2 = qa['answer2_tokenized']
+
+ if i == 0:
+ # Prepend start info in first question
+ yield (info + '\n' + question, [
+ answer1, answer2]), True
+ else:
+ yield (question, [answer1, answer2]), False
diff --git a/parlai/tasks/narrative_qa/build.py b/parlai/tasks/narrative_qa/build.py
new file mode 100644
--- /dev/null
+++ b/parlai/tasks/narrative_qa/build.py
@@ -0,0 +1,186 @@
+
+# Copyright (c) 2017-present, Facebook, Inc.
+# All rights reserved.
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree. An additional grant
+# of patent rights can be found in the PATENTS file in the same directory.
+
+import parlai.core.build_data as build_data
+import os
+import subprocess
+import shutil
+import csv
+import stat
+import time
+import gzip
+
+
+NARRATIVE_QA_DOWNLOAD_URL = 'https://github.com/deepmind/narrativeqa/archive/master.zip'
+
+
+def get_rows_for_set(reader, req_set):
+ selected_rows = [row for row in reader if row['set'].strip() == req_set]
+ return selected_rows
+
+
+def read_csv_to_dict_list(filepath):
+ f = open(filepath, 'r')
+ return csv.DictReader(f, delimiter=','), f
+
+
+def write_dict_list_to_csv(dict_list, filepath):
+ keys = list(dict_list[0].keys())
+
+ with open(filepath, 'w') as f:
+ writer = csv.DictWriter(f, fieldnames=keys)
+ writer.writeheader()
+
+ for row in dict_list:
+ writer.writerow(row)
+
+
+def divide_csv_into_sets(csv_filepath, sets=['train', 'valid', 'test']):
+ reader, fh = read_csv_to_dict_list(csv_filepath)
+
+ base_filename = os.path.basename(csv_filepath).split('.')[0]
+ base_path = os.path.dirname(csv_filepath)
+
+ for s in sets:
+ path = os.path.join(base_path,
+ base_filename + '_' + s + '.csv')
+ fh.seek(0)
+ rows = get_rows_for_set(reader, s)
+ write_dict_list_to_csv(rows, path)
+
+ fh.close()
+
+
+def make_folders(base_path, sets=['train', 'valid', 'test']):
+ for s in sets:
+ path = os.path.join(base_path, s)
+ if not os.path.exists(path):
+ os.mkdir(path)
+
+
+def move_files(base_path, sets=['train', 'valid', 'test']):
+ source = os.listdir(base_path)
+
+ for f in source:
+ for s in sets:
+ if f.endswith('_' + s + '.csv'):
+ final_name = f[:-(len('_' + s + '.csv'))] + '.csv'
+ f = os.path.join(base_path, f)
+ shutil.move(f, os.path.join(base_path, s, final_name))
+
+
+# Returns false unless the story was already downloaded and
+# has appropriate size
+def try_downloading(directory, row):
+ document_id, kind, story_url, story_size = row['document_id'], \
+ row['kind'], row['story_url'], row['story_file_size']
+ story_path = os.path.join(directory, document_id + '.content')
+
+ actual_story_size = 0
+ if os.path.exists(story_path):
+ with open(story_path, 'rb') as f:
+ actual_story_size = len(f.read())
+
+ if actual_story_size <= 19000:
+ if kind == 'gutenberg':
+ time.sleep(2)
+
+ build_data.download(story_url, directory,
+ document_id + '.content')
+ else:
+ return True
+
+ file_type = subprocess.check_output(['file', '-b', story_path])
+ file_type = file_type.decode('utf-8')
+
+ if 'gzip compressed' in file_type:
+ gz_path = os.path.join(directory,
+ document_id + '.content.gz')
+ shutil.move(story_path, gz_path)
+ build_tools.untar(gz_path)
+
+ return False
+
+
+def download_stories(path):
+ documents_csv = os.path.join(path, 'documents.csv')
+ tmp_dir = os.path.join(path, 'tmp')
+ build_data.make_dir(tmp_dir)
+
+ with open(documents_csv, 'r') as f:
+ reader = csv.DictReader(f, delimiter=',')
+ for row in reader:
+ print("Downloading %s (%s)" % (row['wiki_title'],
+ row['document_id']))
+ finished = try_downloading(tmp_dir, row)
+ count = 0
+ while not finished and count < 5:
+ if count != 0:
+ print("Retrying (%d retries left)" % (5 - count - 1))
+ finished = try_downloading(tmp_dir, row)
+ count += 1
+
+
+def build(opt):
+ dpath = os.path.join(opt['datapath'], 'NarrativeQA')
+ version = None
+
+ if not build_data.built(dpath, version_string=version):
+ print('[building data: ' + dpath + ']')
+
+ if build_data.built(dpath):
+ # an older version exists, so remove these outdated files.
+ build_data.remove_dir(dpath)
+ build_data.make_dir(dpath)
+
+ # download the data.
+ fname = 'narrative_qa.zip'
+ # dataset URL
+ url = NARRATIVE_QA_DOWNLOAD_URL
+ build_data.download(url, dpath, fname)
+
+ # uncompress it
+ build_data.untar(dpath, fname)
+
+ print('downloading stories now')
+ base_path = os.path.join(dpath, 'narrativeqa-master')
+
+ download_stories(base_path)
+
+ # move from tmp to stories
+ tmp_stories_path = os.path.join(base_path,
+ 'tmp')
+ new_stories_path = os.path.join(base_path,
+ 'stories')
+ shutil.move(tmp_stories_path, new_stories_path)
+
+ # divide into train, valid and test for summaries
+ summaries_csv_path = os.path.join(base_path, 'third_party',
+ 'wikipedia', 'summaries.csv')
+ new_path = os.path.join(base_path, 'summaries.csv')
+ shutil.move(summaries_csv_path, new_path)
+
+ divide_csv_into_sets(new_path)
+
+ # divide into sets for questions
+ questions_path = os.path.join(base_path, 'qaps.csv')
+ divide_csv_into_sets(questions_path)
+
+ # divide into sets for documents
+ documents_path = os.path.join(base_path, 'documents.csv')
+ divide_csv_into_sets(documents_path)
+
+ # move specific set's files into their set's folder
+ make_folders(base_path)
+ move_files(base_path)
+
+ # move narrativeqa-master to narrative_qa
+ new_path = os.path.join(dpath, 'narrative_qa')
+ shutil.move(base_path, new_path)
+
+ # mark the data as built
+ build_data.mark_done(dpath, version_string=version)
diff --git a/parlai/tasks/task_list.py b/parlai/tasks/task_list.py
--- a/parlai/tasks/task_list.py
+++ b/parlai/tasks/task_list.py
@@ -150,6 +150,14 @@
"tags": [ "All", "Entailment" ],
"description": "A dataset designed for use in the development and evaluation of machine learning models for sentence understanding. Each example contains a premise and hypothesis. Model has to predict whether premise and hypothesis entail, contradict or are neutral to each other. From Williams et al. '17. Link: https://arxiv.org/abs/1704.05426"
},
+ {
+ "id": "NarrativeQA",
+ "display_name": "NarrativeQA",
+ "task": "narrative_qa",
+ "tags": [ "All", "QA" ],
+ "description": "A dataset and set of tasks in which the reader must answer questions about stories by reading entire books or movie scripts. From Kočiský et. al. '17. Link: https://arxiv.org/abs/1712.07040'",
+ "notes": "You can access summaries only task for NarrativeQA by using task 'narrative_qa:summaries'. By default, only stories are provided."
+ },
{
"id": "OpenSubtitles",
"display_name": "Open Subtitles",
| diff --git a/tests/test_downloads.py b/tests/test_downloads.py
--- a/tests/test_downloads.py
+++ b/tests/test_downloads.py
@@ -211,6 +211,29 @@ def test_mturkwikimovies(self):
shutil.rmtree(self.TMP_PATH)
+ def test_narrative_qa(self):
+ from parlai.core.params import ParlaiParser
+ from parlai.tasks.narrative_qa.agents import DefaultTeacher, SummariesTeacher
+
+ opt = ParlaiParser().parse_args(args=self.args)
+ for dt in ['train', 'valid', 'test']:
+ opt['datatype'] = dt
+
+ teacher = DefaultTeacher(opt)
+ reply = teacher.act()
+ check(opt, reply)
+
+ shutil.rmtree(self.TMP_PATH)
+
+ for dt in ['train', 'valid', 'test']:
+ opt['datatype'] = dt
+
+ teacher = SummariesTeacher(opt)
+ reply = teacher.act()
+ check(opt, reply)
+
+ shutil.rmtree(self.TMP_PATH)
+
def test_opensubtitles(self):
from parlai.core.params import ParlaiParser
from parlai.tasks.opensubtitles.agents import DefaultTeacher
| NarrativeQA support
https://arxiv.org/abs/1712.07040 and https://github.com/deepmind/narrativeqa .
I can work on this one also if we want support.
| sure! that'd be awesome
How would we like to approach this?
We will have to select one of the following approaches:
- Download all of the stories in one go while building dataset
- Reading a story interactively each time by making an HTTP request at training time.
I would like to go with the first approach as that allows offline development at a later time.
| 2018-01-05T19:08:23Z | [] | [] |
facebookresearch/ParlAI | 876 | facebookresearch__ParlAI-876 | [
"826"
] | f623dd5b4990fc2cb77cec278af9d7bb4f6e01a9 | diff --git a/parlai/agents/fairseq/fairseq.py b/parlai/agents/fairseq/fairseq.py
--- a/parlai/agents/fairseq/fairseq.py
+++ b/parlai/agents/fairseq/fairseq.py
@@ -4,346 +4,438 @@
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
-from parlai.core.agents import Agent
from parlai.core.dict import DictionaryAgent
try:
- from .fairseq_py.fairseq import models
+ from fairseq import models, optim
except ImportError:
- raise RuntimeError("Please run 'python setup.py build' and "
- "'python setup.py develop' from the fairseq_py directory")
-from .fairseq_py.fairseq.models import fconv
-from .fairseq_py.fairseq.multiprocessing_trainer import MultiprocessingTrainer
-from .fairseq_py.fairseq import criterions
-from .fairseq_py.fairseq import dictionary
-from .fairseq_py.fairseq.sequence_generator import SequenceGenerator
-from .fairseq_py.fairseq import options
-
-from torch.autograd import Variable
-
-from collections import deque
+ raise RuntimeError(
+ "Please run \"pip install -U 'git+https://github.com/pytorch/"
+ "[email protected]#egg=fairseq'\""
+ )
+from fairseq import trainer, fp16_trainer
+from fairseq.criterions.cross_entropy import CrossEntropyCriterion
+from fairseq.sequence_generator import SequenceGenerator
+from fairseq import options
+from fairseq.tasks.fairseq_task import FairseqTask
+from fairseq.utils import convert_padding_direction
+
+from parlai.core.torch_agent import TorchAgent
+from parlai.core.build_data import modelzoo_path
+
import argparse
-import numpy as np
-import random
import torch
import os
+import numpy as np
+import pickle
+
+# If a model file is loaded, these arguments may NOT be overridden in the
+# command line:
+NON_OVERRIDABLE_ARGS = {
+ 'arch',
+ 'encoder_embed_dim',
+ 'encoder_layers',
+ 'decoder_embed_dim',
+ 'decoder_layers',
+ 'decoder_out_embed_dim',
+ 'decoder_attention',
+}
+
+
+def _fairseq_opt_wrapper(opt):
+ """
+ Marshalls from a dict to a argparse.Namespace object for API compatibility.
+ Also does some necessary post-processing needed for fairseq-py.
-def OptWrapper(opt):
+ :param opt: dict. ParlAI options passed around from everywhere.
+ :return: an argparse.Namespace object for use in fairseq-py.
+ """
args = argparse.Namespace()
+
+ # first set args according to ParlAI options
for key in opt:
if opt[key] is not None:
setattr(args, key, opt[key])
- args.model = models.arch_model_map[args.arch]
- getattr(models, args.model).parse_arch(args)
- return args
+ # at this point the user *must* have specified an arch
+ if not hasattr(args, "arch"):
+ raise ValueError("--arch/-a must be specified")
+ # fill in default options from the model
+ models.ARCH_CONFIG_REGISTRY[args.arch](args)
-def _make_fairseq_dict(parlai_dict):
- fairseq_dict = dictionary.Dictionary()
- for i in range(len(parlai_dict)):
- fairseq_dict.add_symbol(parlai_dict[i])
- return fairseq_dict
+ # post processing of args. See
+ # https://github.com/pytorch/fairseq/blob/v0.5.0/fairseq/options.py#L95
+ if hasattr(args, "lr"):
+ args.lr = options.eval_str_list(args.lr, type=float)
+ if hasattr(args, "update_freq"):
+ args.update_freq = options.eval_str_list(args.update_freq, int)
+ if hasattr(args, "max_sentences_valid"):
+ args.max_sentences_valid = args.max_sentences
+ if getattr(args, "truncate") == -1:
+ # some torch agents use positional embeddings, which must have a max length
+ setattr(args, "truncate", 1024)
+ if not hasattr(args, "max_source_positions"):
+ # fairseq uses a different name for this CLI parameter
+ # Sometimes it's set in model defaults, but not for all models
+ setattr(args, "max_source_positions", getattr(args, "truncate"))
+ # if we don't have source lengths, we don't have target lengths
+ setattr(args, "max_target_positions", getattr(args, "truncate"))
+ # handle modelzoo if possible
+ for k in ("encoder_embed_path", "decoder_embed_path"):
+ if hasattr(args, k) and getattr(args, k) is not None:
+ setattr(args, k, modelzoo_path(opt.get("datapath"), getattr(args, k)))
-class FairseqAgent(Agent):
- """Agent which takes an input sequence and produces an output sequence.
+ # Here we hardcode a few options that we currently do not support
+ # turn off distributed training
+ args.distributed_world_size = 1
+ args.distributed_rank = 0
- For more information, see Convolutional Sequence to Sequence Learning
- `(Gehring et al. 2017) <https://arxiv.org/abs/1705.03122>`_.
- """
+ return args, vars(args)
+
+
+class _FairseqDictionary(DictionaryAgent):
+ """Skeleton dictionary class needed for interaction with fairseq-py"""
+
+ def pad(self):
+ return self.pad_index
+
+ def eos(self):
+ return self[self.end_token]
+
+ def unk(self):
+ return self[self.unk_token]
+
+ @property
+ def pad_index(self):
+ return self[self.null_token]
+
+ @property
+ def eos_index(self):
+ return self[self.end_token]
+
+ @property
+ def unk_index(self):
+ return self[self.unk_token]
+
+ def add_symbol(self):
+ raise NotImplementedError("This is a fake class")
+
+ @property
+ def symbols(self):
+ return self.tok2ind.keys()
+
+
+class _ParlaiTask(FairseqTask):
+ """Skeleton task class needed for interaction with fairseq-py."""
+
+ def __init__(self, dictionary):
+ self.dict = dictionary
+
+ @property
+ def target_dictionary(self):
+ return self.dict
+
+ @property
+ def source_dictionary(self):
+ return self.dict
+
+
+class FairseqAgent(TorchAgent):
+ """Generic wrapper around fairseq for use in ParlAI"""
+
+ metrics = {}
+ # TODO: merge with TorchAgent.add_cmdline_args
@staticmethod
def add_cmdline_args(argparser):
"""Add command-line arguments specifically for this agent."""
+ # first we need to add the general torch agent operations
+ TorchAgent.add_cmdline_args(argparser)
+
agent = argparser.add_argument_group('Fairseq Arguments')
- agent.add_argument(
- '-tr', '--truncate',
- type=int, default=-1,
- help='truncate input & output lengths to speed up training (may '
- 'reduce accuracy). This fixes all input and output to have a '
- 'maximum length. This reduces the total amount of padding in '
- 'the batches.')
- agent.add_argument(
- '--max-positions',
- default=1024,
- type=int,
- metavar='N',
- help='max number of tokens in the sequence')
agent.add_argument(
'--seed',
default=1,
type=int,
metavar='N',
- help='pseudo random number generator seed')
+ help='pseudo random number generator seed'
+ )
+ agent.add_argument(
+ '--skip-generation',
+ default=False,
+ type=bool,
+ metavar='BOOL',
+ help='Skips test time beam search. Much faster if you only need PPL',
+ )
+
+ # Dictionary construction stuff. Using the subclass in case we end up
+ # needing any fairseq specific things
+ _FairseqDictionary.add_cmdline_args(argparser)
+
+ # Optimization and learning rate schedule specific arguments
options.add_optimization_args(argparser)
+ known_args = argparser.parse_known_args(nohelp=True)[0]
+ if hasattr(known_args, "optimizer"):
+ optimizer = known_args.optimizer
+ opt_group = argparser.add_argument_group(
+ '{} optimizer arguments'.format(optimizer)
+ )
+ optim.OPTIMIZER_REGISTRY[optimizer].add_args(opt_group)
+ if hasattr(known_args, "lr_scheduler"):
+ lr_scheduler = known_args.lr_scheduler
+ lr_group = argparser.add_argument_group(
+ '{} scheduler arguments'.format(lr_scheduler)
+ )
+ optim.lr_scheduler.LR_SCHEDULER_REGISTRY[lr_scheduler].add_args(lr_group)
+
+ # Generation arguments
options.add_generation_args(argparser)
- options.add_model_args(argparser)
- DictionaryAgent.add_cmdline_args(argparser)
+
+ # We need to find out the fairseq model-specific options, so grab the
+ # architecture stuff and look up its options
+ arch_group = options.add_model_args(argparser)
+ # Fairseq marks the arch flag as required, but it may be specified
+ # by a saved model cache, so we do some weird stuff to undo that
+ for a in arch_group._actions:
+ if a.dest == "arch":
+ a.required = False
+ a.default = None
+ break
+ known_args = argparser.parse_known_args(nohelp=True)[0]
+ if hasattr(known_args, "arch") and known_args.arch is not None:
+ arch = known_args.arch
+ arch_group = argparser.add_argument_group(
+ "{} architecture arguments".format(arch)
+ )
+ models.ARCH_MODEL_REGISTRY[arch].add_args(arch_group)
+
+ # Override a few defaults from within fairseq to more sensible defaults
+ argparser.set_defaults(
+ clip_norm=0.1,
+ adam_betas="(0.9,0.98)"
+ )
def __init__(self, opt, shared=None):
- # initialize defaults first
+ # In general use a basic TorchAgent wherever possible
super().__init__(opt, shared)
if not shared:
- # this is not a shared instance of this class, so do full
- # initialization. if shared is set, only set up shared members.
- saved_state = None
- if opt.get('model_file') and os.path.isfile(opt['model_file']):
- # load model parameters if available
- print('Loading existing model params from ' +
- opt['model_file'])
- new_opt, saved_state = self.load(opt['model_file'])
- # override options with stored ones
- opt = self._override_opt(new_opt)
-
- self.args = OptWrapper(opt)
- self.parlai_dict = DictionaryAgent(opt)
- self.fairseq_dict = _make_fairseq_dict(self.parlai_dict)
- self.id = 'Fairseq'
- self.truncate = opt['truncate'] if opt['truncate'] > 0 else None
-
- self.EOS = self.fairseq_dict[self.fairseq_dict.eos()]
- self.EOS_TENSOR = (torch.LongTensor(1, 1)
- .fill_(self.fairseq_dict.eos()))
- self.NULL_IDX = self.fairseq_dict.pad()
-
- encoder = fconv.FConvEncoder(
- self.fairseq_dict,
- embed_dim=self.args.encoder_embed_dim,
- convolutions=eval(self.args.encoder_layers),
- dropout=self.args.dropout,
- max_positions=self.args.max_positions)
- decoder = fconv.FConvDecoder(
- self.fairseq_dict,
- embed_dim=self.args.decoder_embed_dim,
- convolutions=eval(self.args.decoder_layers),
- out_embed_dim=self.args.decoder_out_embed_dim,
- attention=eval(self.args.decoder_attention),
- dropout=self.args.dropout,
- max_positions=self.args.max_positions)
- self.model = fconv.FConvModel(encoder, decoder)
-
- # from fairseq's build_criterion()
- if self.args.label_smoothing > 0:
- self.criterion = criterions.LabelSmoothedCrossEntropyCriterion(
- self.args.label_smoothing, self.NULL_IDX)
+ # this is not a shared instance of this class, so do full initialization
+
+ # fairseq expects options to be in argparse format, instead of a dict
+ # We also need to do some argument postprocessing and whatnot
+ self.args, self.opt = _fairseq_opt_wrapper(opt)
+
+ # seed the RNG
+ torch.manual_seed(self.args.seed)
+
+ # Just some identifying info
+ self.id = "fairseq:{}".format(self.args.arch)
+
+ # construct dictionaries for parlai frontend and fairseq backend
+ self.dict = _FairseqDictionary(self.opt)
+
+ # We need a placeholder task for fairseq
+ self.task = _ParlaiTask(self.dict)
+
+ # actually construct the model and generator
+ model_class = models.ARCH_MODEL_REGISTRY[self.args.arch]
+ self.model = model_class.build_model(self.args, self.task)
+ self.generator = SequenceGenerator(
+ [self.model],
+ tgt_dict=self.dict,
+ beam_size=self.args.beam,
+ stop_early=(not self.args.no_early_stop),
+ normalize_scores=(not self.args.unnormalized),
+ len_penalty=self.args.lenpen,
+ )
+ # set up the grader and the trainer
+ # TODO: maybe support label smoothing here
+ self.criterion = CrossEntropyCriterion(self.args, self.task)
+
+ if self.args.fp16:
+ self.trainer = fp16_trainer.FP16Trainer(
+ self.args, self.task, self.model, self.criterion
+ )
else:
- self.criterion = criterions.CrossEntropyCriterion(
- self.args, self.fairseq_dict)
-
- self.trainer = MultiprocessingTrainer(self.args, self.model, self.criterion)
- if saved_state is not None:
- self.set_states(saved_state)
+ # TODO: we might choose to add a --no-fp16 opt in the future to
+ # explicitly disable fp16 instead
+ if torch.cuda.get_device_capability(0)[0] >= 7:
+ print("Heads up: using --fp16 could be a lot faster!")
+ self.trainer = trainer.Trainer(
+ self.args, self.task, self.model, self.criterion
+ )
+
+ # if the model already existed, let's preload it and the trainer
+ if self.opt.get('model_file') and os.path.isfile(self.opt['model_file']):
+ print('Loading existing model params from ' + self.opt['model_file'])
+ self.load(self.opt.get('model_file'))
+
+ # move things to the GPU if possible
+ if self.use_cuda:
+ self.model = self.model.cuda()
+ self.generator = self.generator.cuda()
+
+ # Start things off clean
self.reset()
- def _override_opt(self, new_opt):
- """Set overridable opts from loaded opt file.
-
- Print out each added key and each overriden key.
- Only override args specific to the model.
- """
- model_args = {
- 'arch',
- 'encoder-embed-dim',
- 'encoder-layers',
- 'decoder-embed-dim',
- 'decoder-layers',
- 'decoder-out-embed-dim',
- 'decoder-attention',
- }
-
- for k, v in new_opt.items():
- if k not in model_args:
- # skip non-model args
+ def _check_opts_unchanged(self, saved_opts, current_opts):
+ """Verify that critical options do not differ in command line vs saved model"""
+ for k in NON_OVERRIDABLE_ARGS:
+ if k not in saved_opts or k not in current_opts:
+ # if it's not an option needed by this fairseq model, don't stress
continue
- if k not in self.opt:
- print('Adding new option [ {k}: {v} ]'.format(k=k, v=v))
- elif self.opt[k] != v:
- print('Overriding option [ {k}: {old} => {v}]'.format(
- k=k, old=self.opt[k], v=v))
- self.opt[k] = v
- return self.opt
+ if saved_opts[k] != current_opts[k]:
+ raise ValueError(
+ '{} cannot be overridden when --model-file is specified'.format(k)
+ )
+
+ def save(self, path):
+ """Save using fairseq's checkpointing."""
+ if not path:
+ return
+ self.trainer.save_checkpoint(path, {'opt': self.opt, 'epoch': 0})
+ # Parlai expects options to also be saved
+ with open(path + ".opt", 'wb') as handle:
+ # overridden options shouldn't be stored, only the main ones
+ if 'override' in self.opt:
+ del self.opt['override']
+ pickle.dump(self.opt, handle, protocol=pickle.HIGHEST_PROTOCOL)
+
+ def load(self, path):
+ """Load using fairseq's checkpointing."""
+ old_options = self.trainer.load_checkpoint(path)
+ self._check_opts_unchanged(old_options, self.opt)
+
+ def shutdown(self):
+ if not hasattr(self, 'trainer'):
+ # looks like this is a "fake" model that isn't actually used for batch_act.
+ # we don't need to save this one.
+ return
+ super().shutdown()
def reset(self):
"""Reset observation and episode_done."""
- self.observation = None
- self.episode_done = True
-
- def observe(self, observation):
- # shallow copy observation (deep copy can be expensive)
- observation = observation.copy()
- if not self.episode_done and not observation.get('preprocessed', False):
- # if the last example wasn't the end of an episode, then we need to
- # recall what was said in that example
- prev_dialogue = self.observation['text']
- observation['text'] = prev_dialogue + '\n' + observation['text']
- self.observation = observation
- self.episode_done = observation['episode_done']
- return observation
-
- def act(self):
- # call batch_act with this batch of one
- return self.batch_act([self.observation])[0]
+ super().reset()
+ self.reset_metrics()
def batch_act(self, observations):
bsz = len(observations)
# initialize a table of replies with this agent's id
- batch_reply = [{'id': self.getID()} for _ in range(bsz)]
-
- # convert the observations into batches of inputs and targets
- # valid_inds tells us the indices of all valid examples
- # e.g. for input [{}, {'text': 'hello'}, {}, {}], valid_inds is [1]
- # since the other three elements had no 'text' field
+ batch_reply = [{"id": self.getID()} for _ in range(bsz)]
- # also, split observations into sub-batches based on number of gpus
- obs_split = np.array_split(observations, self.trainer.num_replicas)
- samples = [self.batchify(obs) for obs in obs_split]
- samples = [s for s in samples if s[0] is not None]
- any_valid = any(len(s[0]) > 0 for s in samples)
-
- if not any_valid:
- # no valid examples, just return the empty responses we set up
+ # torchagent boilerplate
+ self.is_training = any(["labels" in obs for obs in observations])
+ vec_obs = [self.vectorize(obs) for obs in observations]
+ xs, _, ys, _, valid_inds = self.map_valid(vec_obs)
+ if xs is None:
return batch_reply
- # produce predictions if testing; otherwise, train
- has_targets = any(s[1] is not None for s in samples)
- if not has_targets:
- offset = 0
- for s in samples:
- xs = s[0]
- valid_inds = s[2]
-
- predictions = self._generate(self.args, xs)
- for i in range(len(predictions)):
- # map the predictions back to non-empty examples in the batch
- batch_reply[valid_inds[i] + offset]['text'] = predictions[i]
- if i == 0:
- print('prediction:', predictions[i])
- offset += len(valid_inds)
- else:
- loss = self._train(samples)
+ # here begins fairseq specific stuff
+ samples = self._make_sample(xs, ys)
- batch_reply[0]['metrics'] = {}
- for k, v in loss.items():
- batch_reply[0]['metrics'][k] = v * bsz
+ if self.is_training:
+ self.model.train()
+ self.trainer.train_step(samples)
+ else:
+ # grade the evaluation label
+ self.model.eval()
+ if ys is not None:
+ # Interactive mode won't have a gold label
+ self.trainer.valid_step(samples)
+
+ # Grade each of the candidate sequences
+ # TODO: grade everything in observations[i]['label_candidates']
+
+ # Next generate freely to create our response
+ if self.args.skip_generation:
+ # skip the generation step
+ for i in valid_inds:
+ batch_reply[i]["text"] = ""
+ else:
+ # actually do the generation
+ for i, response in zip(valid_inds, self._generate(samples)):
+ batch_reply[i]["text"] = response
return batch_reply
- def parse(self, string):
- return [self.fairseq_dict.index(word)
- for word in self.parlai_dict.tokenize(string)]
-
- def batchify(self, observations):
- """Convert a list of observations into input & target tensors."""
- # valid examples
- exs = [ex for ex in observations if 'text' in ex]
- # the indices of the valid (non-empty) tensors
- valid_inds = [i for i, ex in enumerate(observations) if 'text' in ex]
-
- # set up the input tensors
- batchsize = len(exs)
- if batchsize == 0:
- return None, None, None
- # tokenize the text
- parsed_x = [deque(maxlen=self.truncate) for _ in exs]
- for dq, ex in zip(parsed_x, exs):
- dq += self.parse(ex['text'])
- # parsed = [self.parse(ex['text']) for ex in exs]
- max_x_len = max((len(x) for x in parsed_x))
- for x in parsed_x:
- # left pad with zeros
- x.extendleft([self.fairseq_dict.pad()] * (max_x_len - len(x)))
- xs = torch.LongTensor(parsed_x)
-
- # set up the target tensors
- ys = None
- if 'labels' in exs[0]:
- # randomly select one of the labels to update on, if multiple
- labels = [random.choice(ex.get('labels', [''])) for ex in exs]
- parsed_y = [deque(maxlen=self.truncate) for _ in labels]
- for dq, y in zip(parsed_y, labels):
- dq.extendleft(reversed(self.parse(y)))
- for y in parsed_y:
- y.append(self.fairseq_dict.eos())
- # append EOS to each label
- max_y_len = max(len(y) for y in parsed_y)
- for y in parsed_y:
- y += [self.fairseq_dict.pad()] * (max_y_len - len(y))
- ys = torch.LongTensor(parsed_y)
- return xs, ys, valid_inds
-
- def _positions_for_tokens(self, tokens):
- size = tokens.size()
- not_pad = tokens.ne(self.fairseq_dict.pad()).long()
- new_pos = tokens.new(size).fill_(self.fairseq_dict.pad())
- new_pos += not_pad
- for i in range(1, size[1]):
- new_pos[:, i] += new_pos[:, i-1] - 1
- return new_pos
+ def _generate(self, samples):
+ src_tokens = samples["net_input"]["src_tokens"]
+ src_lengths = samples["net_input"]["src_lengths"]
+ gens = self.generator.generate(src_tokens, src_lengths, maxlen=64)
+ responses = []
+ for i in range(len(src_tokens)):
+ beams = gens[i]
+ selected = max(beams, key=lambda x: x["score"])
+ response = []
+ for t in selected["tokens"]:
+ t = t.item()
+ if t == self.dict.eos:
+ break
+ response.append(self.dict[t])
+ responses.append(" ".join(response))
+ return responses
+
+ def report(self):
+ # if we haven't initialized yet, just return a dummy object
+ if not hasattr(self, "trainer"):
+ return {}
+
+ # These are the metrics we'll pass up the way, and their new names
+ train_metrics = {"train_loss", "ups", "wps", "gnorm", "clip"}
+ valid_metrics = {"valid_loss"}
+
+ metrics = train_metrics if self.is_training else valid_metrics
+
+ output = {k: self.trainer.meters[k].avg for k in metrics}
+
+ # additionally output perplexity. note that fairseq models use base 2
+ # in cross_entropy:
+ # github.com/pytorch/fairseq/blob/master/fairseq/criterions/cross_entropy.py#L55
+ if "train_loss" in output:
+ output["train_ppl"] = np.exp2(output["train_loss"])
+ if "valid_loss" in output:
+ output["ppl"] = np.exp2(output["valid_loss"])
+
+ return output
+
+ def reset_metrics(self):
+ if not hasattr(self, "trainer"):
+ # We haven't initialized the trainer yet, so we don't have any metrics
+ return
+ # We need to reset everything
+ for k in self.trainer.meters:
+ self.trainer.meters[k].reset()
+
+ def receive_metrics(self, metrics_dict):
+ """Used to update lr scheduler."""
+ self.trainer.lr_step(-1, metrics_dict["valid_loss"])
+
+ # Helper functions
+ def _seq_length(self, xs):
+ """Computes length of the sequence (non-padded size)"""
+ return xs.ne(self.dict.pad_index).long().sum(dim=-1)
def _right_shifted_ys(self, ys):
+ """Replaces first token with EOS and shifts the remaining tokens right one."""
result = torch.LongTensor(ys.size())
- result[:, 0] = self.fairseq_dict.index(self.EOS)
+ result[:, 0] = self.dict.eos_index
result[:, 1:] = ys[:, :-1]
return result
- def _generate(self, opt, src_tokens):
- if not hasattr(self, 'translator'):
- self.translator = SequenceGenerator(
- [self.trainer.get_model()],
- beam_size=opt.beam,
- stop_early=(not opt.no_early_stop),
- normalize_scores=(not opt.unnormalized),
- len_penalty=opt.lenpen)
- self.translator.cuda()
- tokens = src_tokens.cuda(async=True)
- translations = self.translator.generate(Variable(tokens))
- results = [t[0] for t in translations]
- output_lines = [[] for _ in range(len(results))]
- for i in range(len(results)):
- output_lines[i] = ' '.join(self.fairseq_dict[idx]
- for idx in results[i]['tokens'][:-1])
- return output_lines
-
- def _train(self, samples):
- """Update the model using the targets."""
- for i, sample in enumerate(samples):
- # add extra info to samples
- sample = {
- 'src_tokens': sample[0],
- 'input_tokens': self._right_shifted_ys(sample[1]),
- 'target': sample[1],
- 'id': None
- }
- sample['ntokens'] = sum(len(t) for t in sample['target'])
- sample['src_positions'] = self._positions_for_tokens(
- sample['src_tokens'])
- sample['input_positions'] = self._positions_for_tokens(
- sample['input_tokens'])
- samples[i] = sample
- return self.trainer.train_step(samples)
-
- def save(self, path=None):
- path = self.opt.get('model_file', None) if path is None else path
- if path and hasattr(self, 'trainer'):
- model = {}
- model['state_dict'] = self.trainer.get_model().state_dict()
- model['opt'] = self.opt
- with open(path, 'wb') as write:
- torch.save(model, write)
-
- def shutdown(self):
- """Save the state of the model when shutdown."""
- path = self.opt.get('model_file', None)
- if path is not None:
- self.save(path + '.shutdown_state')
- super().shutdown()
-
- def load(self, path):
- """Return opt and model states."""
- model = torch.load(path, map_location=lambda cpu, _: cpu)
- return model['opt'], model['state_dict']
-
- def set_states(self, state_dict):
- """Set the state dict of the model from saved states."""
- self.trainer.get_model().load_state_dict(state_dict)
+ def _make_sample(self, xs, ys):
+ """Generates a sample object that Fairseq expects."""
+ # add extra info to samples
+ # TODO: should the right/left padding thing be in torch agent?
+ repadded = convert_padding_direction(xs, self.dict.pad(), right_to_left=True)
+ sample = {}
+ sample["net_input"] = {
+ "src_tokens": repadded,
+ "src_lengths": self._seq_length(xs),
+ }
+ if ys is not None:
+ sample["target"] = ys
+ sample["ntokens"] = sum(self._seq_length(ys)).item()
+ sample["net_input"]["prev_output_tokens"] = self._right_shifted_ys(ys)
+ return sample
diff --git a/parlai/agents/fairseq/fairseq_py/__init__.py b/parlai/agents/fairseq/fairseq_py/__init__.py
deleted file mode 100644
--- a/parlai/agents/fairseq/fairseq_py/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# Copyright (c) 2017-present, Facebook, Inc.
-# All rights reserved.
-# This source code is licensed under the BSD-style license found in the
-# LICENSE file in the root directory of this source tree. An additional grant
-# of patent rights can be found in the PATENTS file in the same directory.
diff --git a/parlai/agents/fairseq/fairseq_py/fairseq/__init__.py b/parlai/agents/fairseq/fairseq_py/fairseq/__init__.py
deleted file mode 100644
--- a/parlai/agents/fairseq/fairseq_py/fairseq/__init__.py
+++ /dev/null
@@ -1,11 +0,0 @@
-# Copyright (c) 2017-present, Facebook, Inc.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the LICENSE file in
-# the root directory of this source tree. An additional grant of patent rights
-# can be found in the PATENTS file in the same directory.
-#
-
-from .multiprocessing_pdb import pdb
-
-__all__ = ['pdb']
diff --git a/parlai/agents/fairseq/fairseq_py/fairseq/bleu.py b/parlai/agents/fairseq/fairseq_py/fairseq/bleu.py
deleted file mode 100644
--- a/parlai/agents/fairseq/fairseq_py/fairseq/bleu.py
+++ /dev/null
@@ -1,106 +0,0 @@
-# Copyright (c) 2017-present, Facebook, Inc.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the LICENSE file in
-# the root directory of this source tree. An additional grant of patent rights
-# can be found in the PATENTS file in the same directory.
-#
-
-import ctypes
-import math
-import torch
-
-try:
- from fairseq import libbleu
-except ImportError as e:
- import sys
- sys.stderr.write('ERROR: missing libbleu.so. run `python setup.py install`\n')
- raise e
-
-
-C = ctypes.cdll.LoadLibrary(libbleu.__file__)
-
-
-class BleuStat(ctypes.Structure):
- _fields_ = [
- ('reflen', ctypes.c_size_t),
- ('predlen', ctypes.c_size_t),
- ('match1', ctypes.c_size_t),
- ('count1', ctypes.c_size_t),
- ('match2', ctypes.c_size_t),
- ('count2', ctypes.c_size_t),
- ('match3', ctypes.c_size_t),
- ('count3', ctypes.c_size_t),
- ('match4', ctypes.c_size_t),
- ('count4', ctypes.c_size_t),
- ]
-
-
-class Scorer(object):
- def __init__(self, pad, eos, unk):
- self.stat = BleuStat()
- self.pad = pad
- self.eos = eos
- self.unk = unk
- self.reset()
-
- def reset(self, one_init=False):
- if one_init:
- C.bleu_one_init(ctypes.byref(self.stat))
- else:
- C.bleu_zero_init(ctypes.byref(self.stat))
-
- def add(self, ref, pred):
- if not isinstance(ref, torch.IntTensor):
- raise TypeError('ref must be a torch.IntTensor (got {})'
- .format(type(ref)))
- if not isinstance(pred, torch.IntTensor):
- raise TypeError('pred must be a torch.IntTensor(got {})'
- .format(type(pred)))
-
- assert self.unk > 0, 'unknown token index must be >0'
- rref = ref.clone()
- rref.apply_(lambda x: x if x != self.unk else -x)
-
- rref = rref.contiguous().view(-1)
- pred = pred.contiguous().view(-1)
-
- C.bleu_add(
- ctypes.byref(self.stat),
- ctypes.c_size_t(rref.size(0)),
- ctypes.c_void_p(rref.data_ptr()),
- ctypes.c_size_t(pred.size(0)),
- ctypes.c_void_p(pred.data_ptr()),
- ctypes.c_int(self.pad),
- ctypes.c_int(self.eos))
-
- def score(self, order=4):
- psum = sum(math.log(p) if p > 0 else float('-Inf')
- for p in self.precision()[:order])
- return self.brevity() * math.exp(psum / order) * 100
-
- def precision(self):
- def ratio(a, b):
- return a / b if b > 0 else 0
-
- return [
- ratio(self.stat.match1, self.stat.count1),
- ratio(self.stat.match2, self.stat.count2),
- ratio(self.stat.match3, self.stat.count3),
- ratio(self.stat.match4, self.stat.count4),
- ]
-
- def brevity(self):
- r = self.stat.reflen / self.stat.predlen
- return min(1, math.exp(1 - r))
-
- def result_string(self, order=4):
- assert order <= 4, "BLEU scores for order > 4 aren't supported"
- fmt = 'BLEU{} = {:2.2f}, {:2.1f}'
- for _ in range(1, order):
- fmt += '/{:2.1f}'
- fmt += ' (BP={:.3f}, ratio={:.3f}, syslen={}, reflen={})'
- bleup = [p * 100 for p in self.precision()[:order]]
- return fmt.format(order, self.score(order=order), *bleup,
- self.brevity(), self.stat.reflen/self.stat.predlen,
- self.stat.predlen, self.stat.reflen)
diff --git a/parlai/agents/fairseq/fairseq_py/fairseq/criterions/__init__.py b/parlai/agents/fairseq/fairseq_py/fairseq/criterions/__init__.py
deleted file mode 100644
--- a/parlai/agents/fairseq/fairseq_py/fairseq/criterions/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copyright (c) 2017-present, Facebook, Inc.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the LICENSE file in
-# the root directory of this source tree. An additional grant of patent rights
-# can be found in the PATENTS file in the same directory.
-#
-
-from .cross_entropy import CrossEntropyCriterion
-from .label_smoothed_cross_entropy import LabelSmoothedCrossEntropyCriterion
-
-__all__ = [
- 'CrossEntropyCriterion',
- 'LabelSmoothedCrossEntropyCriterion',
-]
diff --git a/parlai/agents/fairseq/fairseq_py/fairseq/criterions/cross_entropy.py b/parlai/agents/fairseq/fairseq_py/fairseq/criterions/cross_entropy.py
deleted file mode 100644
--- a/parlai/agents/fairseq/fairseq_py/fairseq/criterions/cross_entropy.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# Copyright (c) 2017-present, Facebook, Inc.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the LICENSE file in
-# the root directory of this source tree. An additional grant of patent rights
-# can be found in the PATENTS file in the same directory.
-#
-
-import math
-import torch.nn.functional as F
-
-from .fairseq_criterion import FairseqCriterion
-
-
-class CrossEntropyCriterion(FairseqCriterion):
-
- def __init__(self, args, dst_dict):
- super().__init__(args, dst_dict)
-
- def forward(self, model, sample):
- """Compute the loss for the given sample.
-
- Returns a tuple with three elements:
- 1) the loss, as a Variable
- 2) the sample size, which is used as the denominator for the gradient
- 3) logging outputs to display while training
- """
- net_output = model(**sample['net_input'])
- input = net_output.view(-1, net_output.size(-1))
- target = sample['target'].view(-1)
- loss = F.cross_entropy(input, target, size_average=False, ignore_index=self.padding_idx)
- sample_size = sample['target'].size(0) if self.args.sentence_avg else sample['ntokens']
- logging_output = {
- 'loss': loss.data[0],
- 'sample_size': sample_size,
- }
- return loss, sample_size, logging_output
-
- @staticmethod
- def aggregate_logging_outputs(logging_outputs):
- """Aggregate logging outputs from data parallel training."""
- sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)
- return {
- 'loss': sum(log.get('loss', 0) for log in logging_outputs) / sample_size / math.log(2),
- }
diff --git a/parlai/agents/fairseq/fairseq_py/fairseq/criterions/fairseq_criterion.py b/parlai/agents/fairseq/fairseq_py/fairseq/criterions/fairseq_criterion.py
deleted file mode 100644
--- a/parlai/agents/fairseq/fairseq_py/fairseq/criterions/fairseq_criterion.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# Copyright (c) 2017-present, Facebook, Inc.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the LICENSE file in
-# the root directory of this source tree. An additional grant of patent rights
-# can be found in the PATENTS file in the same directory.
-#
-
-from torch.nn.modules.loss import _Loss
-
-
-class FairseqCriterion(_Loss):
-
- def __init__(self, args, dst_dict):
- super().__init__()
- self.args = args
- self.padding_idx = dst_dict.pad()
-
- def forward(self, model, sample):
- """Compute the loss for the given sample.
-
- Returns a tuple with three elements:
- 1) the loss, as a Variable
- 2) the sample size, which is used as the denominator for the gradient
- 3) logging outputs to display while training
- """
- raise NotImplementedError
-
- @staticmethod
- def aggregate_logging_outputs(logging_outputs):
- """Aggregate logging outputs from data parallel training."""
- raise NotImplementedError
-
- @staticmethod
- def grad_denom(sample_sizes):
- """Compute the gradient denominator for a set of sample sizes."""
- return sum(sample_sizes)
diff --git a/parlai/agents/fairseq/fairseq_py/fairseq/criterions/label_smoothed_cross_entropy.py b/parlai/agents/fairseq/fairseq_py/fairseq/criterions/label_smoothed_cross_entropy.py
deleted file mode 100644
--- a/parlai/agents/fairseq/fairseq_py/fairseq/criterions/label_smoothed_cross_entropy.py
+++ /dev/null
@@ -1,76 +0,0 @@
-# Copyright (c) 2017-present, Facebook, Inc.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the LICENSE file in
-# the root directory of this source tree. An additional grant of patent rights
-# can be found in the PATENTS file in the same directory.
-#
-
-import math
-import torch
-from torch.autograd.variable import Variable
-import torch.nn.functional as F
-
-from .fairseq_criterion import FairseqCriterion
-
-
-class LabelSmoothedNLLLoss(torch.autograd.Function):
-
- @staticmethod
- def forward(ctx, input, target, eps, padding_idx, weights):
- grad_input = input.new(input.size()).zero_()
- target = target.view(target.size(0), 1)
- grad_input = grad_input.scatter_(grad_input.dim() - 1, target, eps - 1)
-
- norm = grad_input.size(-1)
- if weights is not None:
- norm = weights.sum()
- grad_input.mul(weights.view(1, weights.size(0)).expand_as(grad_input))
-
- if padding_idx is not None:
- norm -= 1 if weights is None else weights[padding_idx]
- grad_input.select(grad_input.dim() - 1, padding_idx).fill_(0)
-
- grad_input = grad_input.add(-eps / norm)
-
- ctx.grad_input = grad_input
- return input.new([grad_input.view(-1).dot(input.view(-1))])
-
- @staticmethod
- def backward(ctx, grad):
- return Variable(ctx.grad_input, volatile=True) * grad, None, None, None, None
-
-
-class LabelSmoothedCrossEntropyCriterion(FairseqCriterion):
-
- def __init__(self, args, dst_dict, weights=None):
- super().__init__(args, dst_dict)
- self.eps = args.label_smoothing
- self.weights = weights
-
- def forward(self, model, sample):
- """Compute the loss for the given sample.
-
- Returns a tuple with three elements:
- 1) the loss, as a Variable
- 2) the sample size, which is used as the denominator for the gradient
- 3) logging outputs to display while training
- """
- net_output = model(**sample['net_input'])
- input = F.log_softmax(net_output.view(-1, net_output.size(-1)))
- target = sample['target'].view(-1)
- loss = LabelSmoothedNLLLoss.apply(input, target, self.eps, self.padding_idx, self.weights)
- sample_size = sample['target'].size(0) if self.args.sentence_avg else sample['ntokens']
- logging_output = {
- 'loss': loss.data[0],
- 'sample_size': sample_size,
- }
- return loss, sample_size, logging_output
-
- @staticmethod
- def aggregate_logging_outputs(logging_outputs):
- """Aggregate logging outputs from data parallel training."""
- sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)
- return {
- 'loss': sum(log.get('loss', 0) for log in logging_outputs) / sample_size / math.log(2),
- }
diff --git a/parlai/agents/fairseq/fairseq_py/fairseq/data.py b/parlai/agents/fairseq/fairseq_py/fairseq/data.py
deleted file mode 100644
--- a/parlai/agents/fairseq/fairseq_py/fairseq/data.py
+++ /dev/null
@@ -1,375 +0,0 @@
-# Copyright (c) 2017-present, Facebook, Inc.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the LICENSE file in
-# the root directory of this source tree. An additional grant of patent rights
-# can be found in the PATENTS file in the same directory.
-#
-
-import contextlib
-import itertools
-import glob
-import numbers
-import numpy as np
-import os
-import torch
-import torch.utils.data
-
-from fairseq.dictionary import Dictionary
-from fairseq.indexed_dataset import IndexedDataset, IndexedInMemoryDataset, IndexedRawTextDataset
-
-
-def has_binary_files(data_dir, splits):
- for split in splits:
- if len(glob.glob(os.path.join(data_dir, '{}.*-*.*.bin'.format(split)))) < 2:
- return False
- return True
-
-
-def infer_language_pair(path, splits):
- """Infer language pair from filename: <split>.<lang1>-<lang2>.(...).idx"""
- src, dst = None, None
- for filename in os.listdir(path):
- parts = filename.split('.')
- for split in splits:
- if parts[0] == split and parts[-1] == 'idx':
- src, dst = parts[1].split('-')
- break
- return src, dst
-
-
-def load_dictionaries(path, src_lang, dst_lang):
- """Load dictionaries for a given language pair."""
- src_dict = Dictionary.load(os.path.join(path, 'dict.{}.txt'.format(src_lang)))
- dst_dict = Dictionary.load(os.path.join(path, 'dict.{}.txt'.format(dst_lang)))
- return src_dict, dst_dict
-
-
-def load_dataset(path, load_splits, src=None, dst=None):
- """Loads specified data splits (e.g., test, train or valid) from the
- specified folder and check that files exist."""
- if src is None and dst is None:
- # find language pair automatically
- src, dst = infer_language_pair(path, load_splits)
- assert src is not None and dst is not None, 'Source and target languages should be provided'
-
- src_dict, dst_dict = load_dictionaries(path, src, dst)
- dataset = LanguageDatasets(src, dst, src_dict, dst_dict)
-
- # Load dataset from binary files
- def all_splits_exist(src, dst):
- for split in load_splits:
- filename = '{0}.{1}-{2}.{1}.idx'.format(split, src, dst)
- if not os.path.exists(os.path.join(path, filename)):
- return False
- return True
-
- # infer langcode
- if all_splits_exist(src, dst):
- langcode = '{}-{}'.format(src, dst)
- elif all_splits_exist(dst, src):
- langcode = '{}-{}'.format(dst, src)
- else:
- raise Exception('Dataset cannot be loaded from path: ' + path)
-
- def fmt_path(fmt, *args):
- return os.path.join(path, fmt.format(*args))
-
- for split in load_splits:
- for k in itertools.count():
- prefix = "{}{}".format(split, k if k > 0 else '')
- src_path = fmt_path('{}.{}.{}', prefix, langcode, src)
- dst_path = fmt_path('{}.{}.{}', prefix, langcode, dst)
-
- if not IndexedInMemoryDataset.exists(src_path):
- break
-
- dataset.splits[prefix] = LanguagePairDataset(
- IndexedInMemoryDataset(src_path),
- IndexedInMemoryDataset(dst_path),
- pad_idx=dataset.src_dict.pad(),
- eos_idx=dataset.src_dict.eos(),
- )
-
- return dataset
-
-
-def load_raw_text_dataset(path, load_splits, src=None, dst=None):
- """Loads specified data splits (e.g., test, train or valid) from raw text
- files in the specified folder."""
- if src is None and dst is None:
- # find language pair automatically
- src, dst = infer_language_pair(path, load_splits)
- assert src is not None and dst is not None, 'Source and target languages should be provided'
-
- src_dict, dst_dict = load_dictionaries(path, src, dst)
- dataset = LanguageDatasets(src, dst, src_dict, dst_dict)
-
- # Load dataset from raw text files
- for split in load_splits:
- src_path = os.path.join(path, '{}.{}'.format(split, src))
- dst_path = os.path.join(path, '{}.{}'.format(split, dst))
- dataset.splits[split] = LanguagePairDataset(
- IndexedRawTextDataset(src_path, src_dict),
- IndexedRawTextDataset(dst_path, dst_dict),
- pad_idx=dataset.src_dict.pad(),
- eos_idx=dataset.src_dict.eos(),
- )
- return dataset
-
-
-class LanguageDatasets(object):
- def __init__(self, src, dst, src_dict, dst_dict):
- self.src = src
- self.dst = dst
- self.src_dict = src_dict
- self.dst_dict = dst_dict
- self.splits = {}
-
- assert self.src_dict.pad() == self.dst_dict.pad()
- assert self.src_dict.eos() == self.dst_dict.eos()
- assert self.src_dict.unk() == self.dst_dict.unk()
-
- def train_dataloader(self, split, num_workers=0, max_tokens=None,
- max_sentences=None, max_positions=(1024, 1024),
- seed=None, epoch=1, sample_without_replacement=0,
- sort_by_source_size=False):
- dataset = self.splits[split]
- with numpy_seed(seed):
- batch_sampler = shuffled_batches_by_size(
- dataset.src, dataset.dst, max_tokens=max_tokens,
- max_sentences=max_sentences, epoch=epoch,
- sample=sample_without_replacement, max_positions=max_positions,
- sort_by_source_size=sort_by_source_size)
- return torch.utils.data.DataLoader(
- dataset, num_workers=num_workers, collate_fn=dataset.collater,
- batch_sampler=batch_sampler)
-
- def eval_dataloader(self, split, num_workers=0, max_tokens=None,
- max_sentences=None, max_positions=(1024, 1024),
- skip_invalid_size_inputs_valid_test=False,
- descending=False):
- dataset = self.splits[split]
- batch_sampler = list(batches_by_size(
- dataset.src, dataset.dst, max_tokens, max_sentences,
- max_positions=max_positions,
- ignore_invalid_inputs=skip_invalid_size_inputs_valid_test,
- descending=descending))
- return torch.utils.data.DataLoader(
- dataset, num_workers=num_workers, collate_fn=dataset.collater,
- batch_sampler=batch_sampler)
-
-
-def skip_group_enumerator(it, ngpus, offset=0):
- res = []
- idx = 0
- for i, sample in enumerate(it):
- if i < offset:
- continue
- res.append(sample)
- if len(res) >= ngpus:
- yield (i, res)
- res = []
- idx = i + 1
- if len(res) > 0:
- yield (idx, res)
-
-
-class LanguagePairDataset(object):
-
- # padding constants
- LEFT_PAD_SOURCE = True
- LEFT_PAD_TARGET = False
-
- def __init__(self, src, dst, pad_idx, eos_idx):
- self.src = src
- self.dst = dst
- self.pad_idx = pad_idx
- self.eos_idx = eos_idx
-
- def __getitem__(self, i):
- # subtract 1 for 0-based indexing
- source = self.src[i].long() - 1
- target = self.dst[i].long() - 1
- return {
- 'id': i,
- 'source': source,
- 'target': target,
- }
-
- def __len__(self):
- return len(self.src)
-
- def collater(self, samples):
- return LanguagePairDataset.collate(samples, self.pad_idx, self.eos_idx)
-
- @staticmethod
- def collate(samples, pad_idx, eos_idx):
-
- def merge(key, left_pad, move_eos_to_beginning=False):
- return LanguagePairDataset.collate_tokens(
- [s[key] for s in samples], pad_idx, eos_idx, left_pad, move_eos_to_beginning)
-
- return {
- 'id': torch.LongTensor([s['id'].item() for s in samples]),
- 'src_tokens': merge('source', left_pad=LanguagePairDataset.LEFT_PAD_SOURCE),
- # we create a shifted version of targets for feeding the previous
- # output token(s) into the next decoder step
- 'input_tokens': merge('target', left_pad=LanguagePairDataset.LEFT_PAD_TARGET,
- move_eos_to_beginning=True),
- 'target': merge('target', left_pad=LanguagePairDataset.LEFT_PAD_TARGET),
- 'ntokens': sum(len(s['target']) for s in samples),
- }
-
- @staticmethod
- def collate_tokens(values, pad_idx, eos_idx, left_pad, move_eos_to_beginning):
- size = max(v.size(0) for v in values)
- res = values[0].new(len(values), size).fill_(pad_idx)
-
- def copy_tensor(src, dst):
- assert dst.numel() == src.numel()
- if move_eos_to_beginning:
- assert src[-1] == eos_idx
- dst[0] = eos_idx
- dst[1:] = src[:-1]
- else:
- dst.copy_(src)
-
- for i, v in enumerate(values):
- if left_pad:
- copy_tensor(v, res[i][size-len(v):])
- else:
- copy_tensor(v, res[i][:len(v)])
- return res
-
-
-def _valid_size(src_size, dst_size, max_positions):
- if isinstance(max_positions, numbers.Number):
- max_src_positions, max_dst_positions = max_positions, max_positions
- else:
- max_src_positions, max_dst_positions = max_positions
- if src_size < 2 or src_size > max_src_positions:
- return False
- if dst_size is not None and (dst_size < 2 or dst_size > max_dst_positions):
- return False
- return True
-
-
-def _make_batches(src, dst, indices, max_tokens, max_sentences, max_positions,
- ignore_invalid_inputs=False, allow_different_src_lens=False):
- batch = []
-
- def yield_batch(next_idx, num_tokens):
- if len(batch) == 0:
- return False
- if len(batch) == max_sentences:
- return True
- if num_tokens > max_tokens:
- return True
- if not allow_different_src_lens and \
- (src.sizes[batch[0]] != src.sizes[next_idx]):
- return True
- return False
-
- sample_len = 0
- ignored = []
- for idx in indices:
- if not _valid_size(src.sizes[idx], dst.sizes[idx], max_positions):
- if ignore_invalid_inputs:
- ignored.append(idx)
- continue
- raise Exception(
- "Unable to handle input id {} of size {} / {}.".format(
- idx, src.sizes[idx], dst.sizes[idx]))
-
- sample_len = max(sample_len, src.sizes[idx], dst.sizes[idx])
- num_tokens = (len(batch) + 1) * sample_len
- if yield_batch(idx, num_tokens):
- yield batch
- batch = []
- sample_len = max(src.sizes[idx], dst.sizes[idx])
-
- batch.append(idx)
-
- if len(batch) > 0:
- yield batch
-
- if len(ignored) > 0:
- print("Warning! {} samples are either too short or too long "
- "and will be ignored, first few sample ids={}".format(len(ignored), ignored[:10]))
-
-
-def batches_by_size(src, dst, max_tokens=None, max_sentences=None,
- max_positions=(1024, 1024), ignore_invalid_inputs=False,
- descending=False):
- """Returns batches of indices sorted by size. Sequences with different
- source lengths are not allowed in the same batch."""
- assert isinstance(src, IndexedDataset) and isinstance(dst, IndexedDataset)
- if max_tokens is None:
- max_tokens = float('Inf')
- if max_sentences is None:
- max_sentences = float('Inf')
- indices = np.argsort(src.sizes, kind='mergesort')
- if descending:
- indices = np.flip(indices, 0)
- return _make_batches(
- src, dst, indices, max_tokens, max_sentences, max_positions,
- ignore_invalid_inputs, allow_different_src_lens=False)
-
-
-def shuffled_batches_by_size(src, dst, max_tokens=None, max_sentences=None,
- epoch=1, sample=0, max_positions=(1024, 1024),
- sort_by_source_size=False):
- """Returns batches of indices, bucketed by size and then shuffled. Batches
- may contain sequences of different lengths."""
- assert isinstance(src, IndexedDataset) and isinstance(dst, IndexedDataset)
- if max_tokens is None:
- max_tokens = float('Inf')
- if max_sentences is None:
- max_sentences = float('Inf')
-
- indices = np.random.permutation(len(src))
-
- # sort by sizes
- indices = indices[np.argsort(dst.sizes[indices], kind='mergesort')]
- indices = indices[np.argsort(src.sizes[indices], kind='mergesort')]
-
- batches = list(_make_batches(
- src, dst, indices, max_tokens, max_sentences, max_positions,
- ignore_invalid_inputs=True, allow_different_src_lens=True))
-
- if not sort_by_source_size:
- np.random.shuffle(batches)
-
- if sample:
- offset = (epoch - 1) * sample
- while offset > len(batches):
- np.random.shuffle(batches)
- offset -= len(batches)
-
- result = batches[offset:(offset + sample)]
- while len(result) < sample:
- np.random.shuffle(batches)
- result += batches[:(sample - len(result))]
-
- assert len(result) == sample, \
- "batch length is not correct {}".format(len(result))
-
- batches = result
-
- return batches
-
-
[email protected]
-def numpy_seed(seed):
- """Context manager which seeds the NumPy PRNG with the specified seed and
- restores the state afterward"""
- if seed is None:
- yield
- return
- state = np.random.get_state()
- np.random.seed(seed)
- try:
- yield
- finally:
- np.random.set_state(state)
diff --git a/parlai/agents/fairseq/fairseq_py/fairseq/dictionary.py b/parlai/agents/fairseq/fairseq_py/fairseq/dictionary.py
deleted file mode 100644
--- a/parlai/agents/fairseq/fairseq_py/fairseq/dictionary.py
+++ /dev/null
@@ -1,140 +0,0 @@
-# Copyright (c) 2017-present, Facebook, Inc.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the LICENSE file in
-# the root directory of this source tree. An additional grant of patent rights
-# can be found in the PATENTS file in the same directory.
-#
-
-import math
-import torch
-
-
-class Dictionary(object):
- """A mapping from symbols to consecutive integers"""
- def __init__(self, pad='<pad>', eos='</s>', unk='<unk>'):
- self.unk_word, self.pad_word, self.eos_word = unk, pad, eos
- self.symbols = []
- self.count = []
- self.indices = {}
- # dictionary indexing starts at 1 for consistency with Lua
- self.add_symbol('<Lua heritage>')
- self.pad_index = self.add_symbol(pad)
- self.eos_index = self.add_symbol(eos)
- self.unk_index = self.add_symbol(unk)
- self.nspecial = len(self.symbols)
-
- def __getitem__(self, idx):
- if idx < len(self.symbols):
- return self.symbols[idx]
- return self.unk_word
-
- def __len__(self):
- """Returns the number of symbols in the dictionary"""
- return len(self.symbols)
-
- def index(self, sym):
- """Returns the index of the specified symbol"""
- if sym in self.indices:
- return self.indices[sym]
- return self.unk_index
-
- def string(self, tensor, bpe_symbol=None, escape_unk=False):
- """Helper for converting a tensor of token indices to a string.
-
- Can optionally remove BPE symbols or escape <unk> words.
- """
- if torch.is_tensor(tensor) and tensor.dim() == 2:
- return '\n'.join(self.string(t) for t in tensor)
-
- def token_string(i):
- if i == self.unk():
- return self.unk_string(escape_unk)
- else:
- return self[i]
-
- sent = ' '.join(token_string(i) for i in tensor if i != self.eos())
- if bpe_symbol is not None:
- sent = sent.replace(bpe_symbol, '')
- return sent
-
- def unk_string(self, escape=False):
- """Return unknown string, optionally escaped as: <<unk>>"""
- if escape:
- return '<{}>'.format(self.unk_word)
- else:
- return self.unk_word
-
- def add_symbol(self, word, n=1):
- """Adds a word to the dictionary"""
- if word in self.indices:
- idx = self.indices[word]
- self.count[idx] = self.count[idx] + n
- return idx
- else:
- idx = len(self.symbols)
- self.indices[word] = idx
- self.symbols.append(word)
- self.count.append(n)
- return idx
-
- def finalize(self):
- """Sort symbols by frequency in descending order, ignoring special ones."""
- self.count, self.symbols = zip(
- *sorted(zip(self.count, self.symbols),
- key=(lambda x: math.inf if self.indices[x[1]] < self.nspecial else x[0]),
- reverse=True)
- )
-
- def pad(self):
- """Helper to get index of pad symbol"""
- return self.pad_index
-
- def eos(self):
- """Helper to get index of end-of-sentence symbol"""
- return self.eos_index
-
- def unk(self):
- """Helper to get index of unk symbol"""
- return self.unk_index
-
- @staticmethod
- def load(f):
- """Loads the dictionary from a text file with the format:
-
- ```
- <symbol0> <count0>
- <symbol1> <count1>
- ...
- ```
- """
-
- if isinstance(f, str):
- try:
- with open(f, 'r', encoding='utf-8') as fd:
- return Dictionary.load(fd)
- except:
- raise Exception("Incorrect encoding detected in {}, please "
- "rebuild the dataset".format(f))
-
- d = Dictionary()
- for line in f.readlines():
- idx = line.rfind(' ')
- word = line[:idx]
- count = int(line[idx+1:])
- d.indices[word] = len(d.symbols)
- d.symbols.append(word)
- d.count.append(count)
- return d
-
- def save(self, f, threshold=3, nwords=-1):
- """Stores dictionary into a text file"""
- if isinstance(f, str):
- with open(f, 'w', encoding='utf-8') as fd:
- return self.save(fd, threshold, nwords)
- cnt = 0
- for i, t in enumerate(zip(self.symbols, self.count)):
- if i >= self.nspecial and t[1] >= threshold \
- and (nwords < 0 or cnt < nwords):
- print('{} {}'.format(t[0], t[1]), file=f)
- cnt += 1
diff --git a/parlai/agents/fairseq/fairseq_py/fairseq/indexed_dataset.py b/parlai/agents/fairseq/fairseq_py/fairseq/indexed_dataset.py
deleted file mode 100644
--- a/parlai/agents/fairseq/fairseq_py/fairseq/indexed_dataset.py
+++ /dev/null
@@ -1,183 +0,0 @@
-# Copyright (c) 2017-present, Facebook, Inc.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the LICENSE file in
-# the root directory of this source tree. An additional grant of patent rights
-# can be found in the PATENTS file in the same directory.
-#
-
-import numpy as np
-import os
-import struct
-import torch
-
-from fairseq.tokenizer import Tokenizer
-
-
-def read_longs(f, n):
- a = np.empty(n, dtype=np.int64)
- f.readinto(a)
- return a
-
-
-def write_longs(f, a):
- f.write(np.array(a, dtype=np.int64))
-
-
-dtypes = {
- 1: np.uint8,
- 2: np.int8,
- 3: np.int16,
- 4: np.int32,
- 5: np.int64,
- 6: np.float,
- 7: np.double,
-}
-
-
-def code(dtype):
- for k in dtypes.keys():
- if dtypes[k] == dtype:
- return k
-
-
-class IndexedDataset(object):
- """Loader for TorchNet IndexedDataset"""
-
- def __init__(self, path):
- with open(path + '.idx', 'rb') as f:
- magic = f.read(8)
- assert magic == b'TNTIDX\x00\x00'
- version = f.read(8)
- assert struct.unpack('<Q', version) == (1,)
- code, self.element_size = struct.unpack('<QQ', f.read(16))
- self.dtype = dtypes[code]
- self.size, self.s = struct.unpack('<QQ', f.read(16))
- self.dim_offsets = read_longs(f, self.size + 1)
- self.data_offsets = read_longs(f, self.size + 1)
- self.sizes = read_longs(f, self.s)
- self.read_data(path)
-
- def read_data(self, path):
- self.data_file = open(path + '.bin', 'rb', buffering=0)
-
- def check_index(self, i):
- if i < 0 or i >= self.size:
- raise IndexError('index out of range')
-
- def __del__(self):
- self.data_file.close()
-
- def __getitem__(self, i):
- self.check_index(i)
- tensor_size = self.sizes[self.dim_offsets[i]:self.dim_offsets[i + 1]]
- a = np.empty(tensor_size, dtype=self.dtype)
- self.data_file.seek(self.data_offsets[i] * self.element_size)
- self.data_file.readinto(a)
- return torch.from_numpy(a)
-
- def __len__(self):
- return self.size
-
- @staticmethod
- def exists(path):
- return os.path.exists(path + '.idx')
-
-
-class IndexedInMemoryDataset(IndexedDataset):
- """Loader for TorchNet IndexedDataset, keeps all the data in memory"""
-
- def read_data(self, path):
- self.data_file = open(path + '.bin', 'rb')
- self.buffer = np.empty(self.data_offsets[-1], dtype=self.dtype)
- self.data_file.readinto(self.buffer)
- self.data_file.close()
-
- def __del__(self):
- pass
-
- def __getitem__(self, i):
- self.check_index(i)
- tensor_size = self.sizes[self.dim_offsets[i]:self.dim_offsets[i + 1]]
- a = np.empty(tensor_size, dtype=self.dtype)
- np.copyto(a, self.buffer[self.data_offsets[i]:self.data_offsets[i + 1]])
- return torch.from_numpy(a)
-
-
-class IndexedRawTextDataset(IndexedDataset):
- """Takes a text file as input and binarizes it in memory at instantiation.
- Original lines are also kept in memory"""
-
- def __init__(self, path, dictionary):
- self.tokens_list = []
- self.lines = []
- self.sizes = []
- self.read_data(path, dictionary)
- self.size = len(self.tokens_list)
-
- def read_data(self, path, dictionary):
- with open(path, 'r') as f:
- for line in f:
- self.lines.append(line.strip('\n'))
- # +1 for Lua compatibility
- tokens = Tokenizer.tokenize(line, dictionary, add_if_not_exist=False) + 1
- self.tokens_list.append(tokens)
- self.sizes.append(len(tokens))
- self.sizes = np.array(self.sizes)
-
- def __getitem__(self, i):
- self.check_index(i)
- return self.tokens_list[i]
-
- def get_original_text(self, i):
- self.check_index(i)
- return self.lines[i]
-
- def __del__(self):
- pass
-
- def __len__(self):
- return self.size
-
-
-class IndexedDatasetBuilder(object):
-
- element_sizes = {
- np.uint8: 1,
- np.int8: 1,
- np.int16: 2,
- np.int32: 4,
- np.int64: 8,
- np.float: 4,
- np.double: 8
- }
-
- def __init__(self, out_file, dtype=np.int32):
- self.out_file = open(out_file, 'wb')
- self.dtype = dtype
- self.data_offsets = [0]
- self.dim_offsets = [0]
- self.sizes = []
- self.element_size = self.element_sizes[self.dtype]
-
- def add_item(self, tensor):
- # +1 for Lua compatibility
- bytes = self.out_file.write(np.array(tensor.numpy() + 1, dtype=self.dtype))
- self.data_offsets.append(self.data_offsets[-1] + bytes / self.element_size)
- for s in tensor.size():
- self.sizes.append(s)
- self.dim_offsets.append(self.dim_offsets[-1] + len(tensor.size()))
-
- def finalize(self, index_file):
- self.out_file.close()
- index = open(index_file, 'wb')
- index.write(b'TNTIDX\x00\x00')
- index.write(struct.pack('<Q', 1))
- index.write(struct.pack('<QQ', code(self.dtype),
- self.element_size))
- index.write(struct.pack('<QQ', len(self.data_offsets) - 1,
- len(self.sizes)))
- write_longs(index, self.dim_offsets)
- write_longs(index, self.data_offsets)
- write_longs(index, self.sizes)
- index.close()
diff --git a/parlai/agents/fairseq/fairseq_py/fairseq/meters.py b/parlai/agents/fairseq/fairseq_py/fairseq/meters.py
deleted file mode 100644
--- a/parlai/agents/fairseq/fairseq_py/fairseq/meters.py
+++ /dev/null
@@ -1,74 +0,0 @@
-# Copyright (c) 2017-present, Facebook, Inc.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the LICENSE file in
-# the root directory of this source tree. An additional grant of patent rights
-# can be found in the PATENTS file in the same directory.
-#
-
-import time
-
-
-class AverageMeter(object):
- """Computes and stores the average and current value"""
- def __init__(self):
- self.reset()
-
- def reset(self):
- self.val = 0
- self.avg = 0
- self.sum = 0
- self.count = 0
-
- def update(self, val, n=1):
- self.val = val
- self.sum += val * n
- self.count += n
- self.avg = self.sum / self.count
-
-
-class TimeMeter(object):
- """Computes the average occurrence of some event per second"""
- def __init__(self):
- self.reset()
-
- def reset(self):
- self.start = time.time()
- self.n = 0
-
- def update(self, val=1):
- self.n += val
-
- @property
- def avg(self):
- delta = time.time() - self.start
- return self.n / delta
-
- @property
- def elapsed_time(self):
- return time.time() - self.start
-
-
-class StopwatchMeter(object):
- """Computes the sum/avg duration of some event in seconds"""
- def __init__(self):
- self.reset()
-
- def start(self):
- self.start_time = time.time()
-
- def stop(self, n=1):
- if self.start_time is not None:
- delta = time.time() - self.start_time
- self.sum += delta
- self.n += n
- self.start_time = None
-
- def reset(self):
- self.sum = 0
- self.n = 0
- self.start_time = None
-
- @property
- def avg(self):
- return self.sum / self.n
diff --git a/parlai/agents/fairseq/fairseq_py/fairseq/models/__init__.py b/parlai/agents/fairseq/fairseq_py/fairseq/models/__init__.py
deleted file mode 100644
--- a/parlai/agents/fairseq/fairseq_py/fairseq/models/__init__.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# Copyright (c) 2017-present, Facebook, Inc.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the LICENSE file in
-# the root directory of this source tree. An additional grant of patent rights
-# can be found in the PATENTS file in the same directory.
-#
-
-from .fairseq_decoder import FairseqDecoder
-from .fairseq_encoder import FairseqEncoder
-from .fairseq_incremental_decoder import FairseqIncrementalDecoder
-from .fairseq_model import FairseqModel
-
-from . import fconv, lstm
-
-
-__all__ = ['fconv', 'lstm']
-
-arch_model_map = {}
-for model in __all__:
- archs = locals()[model].get_archs()
- for arch in archs:
- assert arch not in arch_model_map, 'Duplicate model architecture detected: {}'.format(arch)
- arch_model_map[arch] = model
diff --git a/parlai/agents/fairseq/fairseq_py/fairseq/models/fairseq_decoder.py b/parlai/agents/fairseq/fairseq_py/fairseq/models/fairseq_decoder.py
deleted file mode 100644
--- a/parlai/agents/fairseq/fairseq_py/fairseq/models/fairseq_decoder.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# Copyright (c) 2017-present, Facebook, Inc.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the LICENSE file in
-# the root directory of this source tree. An additional grant of patent rights
-# can be found in the PATENTS file in the same directory.
-#
-
-import torch.nn as nn
-
-
-class FairseqDecoder(nn.Module):
- """Base class for decoders."""
-
- def __init__(self):
- super().__init__()
-
- def max_positions(self):
- """Maximum input length supported by the decoder."""
- raise NotImplementedError
diff --git a/parlai/agents/fairseq/fairseq_py/fairseq/models/fairseq_encoder.py b/parlai/agents/fairseq/fairseq_py/fairseq/models/fairseq_encoder.py
deleted file mode 100644
--- a/parlai/agents/fairseq/fairseq_py/fairseq/models/fairseq_encoder.py
+++ /dev/null
@@ -1,20 +0,0 @@
-# Copyright (c) 2017-present, Facebook, Inc.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the LICENSE file in
-# the root directory of this source tree. An additional grant of patent rights
-# can be found in the PATENTS file in the same directory.
-#
-
-import torch.nn as nn
-
-
-class FairseqEncoder(nn.Module):
- """Base class for encoders."""
-
- def __init__(self):
- super().__init__()
-
- def max_positions(self):
- """Maximum input length supported by the encoder."""
- raise NotImplementedError
diff --git a/parlai/agents/fairseq/fairseq_py/fairseq/models/fairseq_incremental_decoder.py b/parlai/agents/fairseq/fairseq_py/fairseq/models/fairseq_incremental_decoder.py
deleted file mode 100644
--- a/parlai/agents/fairseq/fairseq_py/fairseq/models/fairseq_incremental_decoder.py
+++ /dev/null
@@ -1,116 +0,0 @@
-# Copyright (c) 2017-present, Facebook, Inc.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the LICENSE file in
-# the root directory of this source tree. An additional grant of patent rights
-# can be found in the PATENTS file in the same directory.
-#
-
-from . import FairseqDecoder
-
-
-class FairseqIncrementalDecoder(FairseqDecoder):
- """Base class for incremental decoders."""
-
- def __init__(self):
- super().__init__()
- self._is_incremental_eval = False
- self._incremental_state = {}
-
- def forward(self, tokens, encoder_out):
- if self._is_incremental_eval:
- raise NotImplementedError
- else:
- raise NotImplementedError
-
- def incremental_inference(self):
- """Context manager for incremental inference.
-
- This provides an optimized forward pass for incremental inference
- (i.e., it predicts one time step at a time). If the input order changes
- between time steps, call reorder_incremental_state to update the
- relevant buffers. To generate a fresh sequence, first call
- clear_incremental_state.
-
- Usage:
- ```
- with model.decoder.incremental_inference():
- for step in range(maxlen):
- out, _ = model.decoder(tokens[:, :step], encoder_out)
- probs = torch.nn.functional.log_softmax(out[:, -1, :])
- ```
- """
- class IncrementalInference(object):
- def __init__(self, decoder):
- self.decoder = decoder
-
- def __enter__(self):
- self.decoder.incremental_eval(True)
-
- def __exit__(self, *args):
- self.decoder.incremental_eval(False)
- return IncrementalInference(self)
-
- def incremental_eval(self, mode=True):
- """Sets the decoder and all children in incremental evaluation mode."""
- assert self._is_incremental_eval != mode, \
- 'incremental_eval already set to mode {}'.format(mode)
-
- self._is_incremental_eval = mode
- if mode:
- self.clear_incremental_state()
-
- def apply_incremental_eval(module):
- if module != self and hasattr(module, 'incremental_eval'):
- module.incremental_eval(mode)
- self.apply(apply_incremental_eval)
-
- def get_incremental_state(self, key):
- """Return cached state or None if not in incremental inference mode."""
- if self._is_incremental_eval and key in self._incremental_state:
- return self._incremental_state[key]
- return None
-
- def set_incremental_state(self, key, value):
- """Cache state needed for incremental inference mode."""
- if self._is_incremental_eval:
- self._incremental_state[key] = value
- return value
-
- def clear_incremental_state(self):
- """Clear all state used for incremental generation.
-
- **For incremental inference only**
-
- This should be called before generating a fresh sequence.
- beam_size is required if using BeamableMM.
- """
- if self._is_incremental_eval:
- self._incremental_state = {}
-
- def apply_clear_incremental_state(module):
- if module != self and hasattr(module, 'clear_incremental_state'):
- module.clear_incremental_state()
- self.apply(apply_clear_incremental_state)
-
- def reorder_incremental_state(self, new_order):
- """Reorder buffered internal state (for incremental generation).
-
- **For incremental inference only**
-
- This should be called when the order of the input has changed from the
- previous time step. A typical use case is beam search, where the input
- order changes between time steps based on the choice of beams.
- """
- if self._is_incremental_eval:
- def apply_reorder_incremental_state(module):
- if module != self and hasattr(module, 'reorder_incremental_state'):
- module.reorder_incremental_state(new_order)
- self.apply(apply_reorder_incremental_state)
-
- def set_beam_size(self, beam_size):
- """Sets the beam size in the decoder and all children."""
- def apply_set_beam_size(module):
- if module != self and hasattr(module, 'set_beam_size'):
- module.set_beam_size(beam_size)
- self.apply(apply_set_beam_size)
diff --git a/parlai/agents/fairseq/fairseq_py/fairseq/models/fairseq_model.py b/parlai/agents/fairseq/fairseq_py/fairseq/models/fairseq_model.py
deleted file mode 100644
--- a/parlai/agents/fairseq/fairseq_py/fairseq/models/fairseq_model.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# Copyright (c) 2017-present, Facebook, Inc.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the LICENSE file in
-# the root directory of this source tree. An additional grant of patent rights
-# can be found in the PATENTS file in the same directory.
-#
-
-import torch.nn as nn
-
-from . import FairseqDecoder, FairseqEncoder
-
-
-class FairseqModel(nn.Module):
- """Base class for encoder-decoder models."""
-
- def __init__(self, encoder, decoder):
- super().__init__()
-
- self.encoder = encoder
- self.decoder = decoder
- assert isinstance(self.encoder, FairseqEncoder)
- assert isinstance(self.decoder, FairseqDecoder)
-
- self.src_dict = encoder.dictionary
- self.dst_dict = decoder.dictionary
- assert self.src_dict.pad() == self.dst_dict.pad()
- assert self.src_dict.eos() == self.dst_dict.eos()
- assert self.src_dict.unk() == self.dst_dict.unk()
-
- self._is_generation_fast = False
-
- def forward(self, src_tokens, input_tokens):
- encoder_out = self.encoder(src_tokens)
- decoder_out, _ = self.decoder(input_tokens, encoder_out)
- return decoder_out.view(-1, decoder_out.size(-1))
-
- def max_encoder_positions(self):
- """Maximum input length supported by the encoder."""
- return self.encoder.max_positions()
-
- def max_decoder_positions(self):
- """Maximum output length supported by the decoder."""
- return self.decoder.max_positions()
-
- def make_generation_fast_(self, **kwargs):
- """Optimize model for faster generation."""
- if self._is_generation_fast:
- return # only apply once
- self._is_generation_fast = True
-
- # remove weight norm from all modules in the network
- def apply_remove_weight_norm(module):
- try:
- nn.utils.remove_weight_norm(module)
- except ValueError: # this module didn't have weight norm
- return
- self.apply(apply_remove_weight_norm)
-
- def train(mode):
- if mode:
- raise RuntimeError('cannot train after make_generation_fast')
-
- # this model should no longer be used for training
- self.eval()
- self.train = train
-
- def apply_make_generation_fast_(module):
- if module != self and hasattr(module, 'make_generation_fast_'):
- module.make_generation_fast_(**kwargs)
- self.apply(apply_make_generation_fast_)
diff --git a/parlai/agents/fairseq/fairseq_py/fairseq/models/fconv.py b/parlai/agents/fairseq/fairseq_py/fairseq/models/fconv.py
deleted file mode 100644
--- a/parlai/agents/fairseq/fairseq_py/fairseq/models/fconv.py
+++ /dev/null
@@ -1,406 +0,0 @@
-# Copyright (c) 2017-present, Facebook, Inc.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the LICENSE file in
-# the root directory of this source tree. An additional grant of patent rights
-# can be found in the PATENTS file in the same directory.
-#
-
-import math
-import torch
-from torch.autograd import Variable
-import torch.nn as nn
-import torch.nn.functional as F
-
-from fairseq.data import LanguagePairDataset
-from fairseq.modules import BeamableMM, GradMultiply, LinearizedConvolution
-
-from . import FairseqEncoder, FairseqIncrementalDecoder, FairseqModel
-
-
-def make_positions(tokens, padding_idx, left_pad, offset=0):
- seqlen = tokens.size(1)
- if not hasattr(make_positions, 'range'):
- make_positions.range = tokens.new()
- if make_positions.range.numel() < offset + seqlen:
- # offset positions by the padding index
- torch.arange(padding_idx + 1, padding_idx + 1 + offset + seqlen,
- out=make_positions.range)
- mask = tokens.ne(padding_idx)
- positions = make_positions.range[offset:offset+seqlen].expand_as(tokens)
- if left_pad:
- positions = positions - mask.size(1) + mask.long().sum(dim=1).unsqueeze(1)
- return tokens.clone().masked_scatter_(mask, positions[mask])
-
-
-class FConvModel(FairseqModel):
- def __init__(self, encoder, decoder):
- super().__init__(encoder, decoder)
- self.encoder.num_attention_layers = sum(layer is not None for layer in decoder.attention)
-
-
-class FConvEncoder(FairseqEncoder):
- """Convolutional encoder"""
- def __init__(self, dictionary, embed_dim=512, max_positions=1024,
- convolutions=((512, 3),) * 20, dropout=0.1):
- super().__init__()
- self.dictionary = dictionary
- self.dropout = dropout
- self.num_attention_layers = None
-
- num_embeddings = len(dictionary)
- padding_idx = dictionary.pad()
- self.embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx)
- self.embed_positions = Embedding(max_positions, embed_dim, padding_idx)
-
- in_channels = convolutions[0][0]
- self.fc1 = Linear(embed_dim, in_channels, dropout=dropout)
- self.projections = nn.ModuleList()
- self.convolutions = nn.ModuleList()
- for (out_channels, kernel_size) in convolutions:
- pad = (kernel_size - 1) // 2
- self.projections.append(Linear(in_channels, out_channels)
- if in_channels != out_channels else None)
- self.convolutions.append(
- ConvTBC(in_channels, out_channels * 2, kernel_size, padding=pad,
- dropout=dropout))
- in_channels = out_channels
- self.fc2 = Linear(in_channels, embed_dim)
-
- def forward(self, src_tokens):
- positions = Variable(make_positions(src_tokens.data, self.dictionary.pad(),
- left_pad=LanguagePairDataset.LEFT_PAD_SOURCE))
-
- # embed tokens and positions
- x = self.embed_tokens(src_tokens) + self.embed_positions(positions)
- x = F.dropout(x, p=self.dropout, training=self.training)
- input_embedding = x
-
- # project to size of convolution
- x = self.fc1(x)
-
- # B x T x C -> T x B x C
- x = x.transpose(0, 1)
-
- # temporal convolutions
- for proj, conv in zip(self.projections, self.convolutions):
- residual = x if proj is None else proj(x)
- x = F.dropout(x, p=self.dropout, training=self.training)
- x = conv(x)
- x = F.glu(x, dim=-1)
- x = (x + residual) * math.sqrt(0.5)
-
- # T x B x C -> B x T x C
- x = x.transpose(1, 0)
-
- # project back to size of embedding
- x = self.fc2(x)
-
- # scale gradients (this only affects backward, not forward)
- x = GradMultiply.apply(x, 1.0 / (2.0 * self.num_attention_layers))
-
- # add output to input embedding for attention
- y = (x + input_embedding) * math.sqrt(0.5)
-
- return x, y
-
- def max_positions(self):
- """Maximum input length supported by the encoder."""
- return self.embed_positions.num_embeddings - self.dictionary.pad() - 1
-
-
-class AttentionLayer(nn.Module):
- def __init__(self, conv_channels, embed_dim, bmm=None):
- super().__init__()
- # projects from output of convolution to embedding dimension
- self.in_projection = Linear(conv_channels, embed_dim)
- # projects from embedding dimension to convolution size
- self.out_projection = Linear(embed_dim, conv_channels)
-
- self.bmm = bmm if bmm is not None else torch.bmm
-
- def forward(self, x, target_embedding, encoder_out):
- residual = x
-
- # attention
- x = (self.in_projection(x) + target_embedding) * math.sqrt(0.5)
- x = self.bmm(x, encoder_out[0])
-
- # softmax over last dim
- sz = x.size()
- x = F.softmax(x.view(sz[0] * sz[1], sz[2]))
- x = x.view(sz)
- attn_scores = x
-
- x = self.bmm(x, encoder_out[1])
-
- # scale attention output
- s = encoder_out[1].size(1)
- x = x * (s * math.sqrt(1.0 / s))
-
- # project back
- x = (self.out_projection(x) + residual) * math.sqrt(0.5)
- return x, attn_scores
-
- def make_generation_fast_(self, beamable_mm_beam_size=None, **kwargs):
- """Replace torch.bmm with BeamableMM."""
- if beamable_mm_beam_size is not None:
- self.bmm = BeamableMM(beamable_mm_beam_size)
-
-
-class FConvDecoder(FairseqIncrementalDecoder):
- """Convolutional decoder"""
- def __init__(self, dictionary, embed_dim=512, out_embed_dim=256,
- max_positions=1024, convolutions=((512, 3),) * 20,
- attention=True, dropout=0.1):
- super().__init__()
- self.dictionary = dictionary
- self.dropout = dropout
-
- in_channels = convolutions[0][0]
- if isinstance(attention, bool):
- # expand True into [True, True, ...] and do the same with False
- attention = [attention] * len(convolutions)
-
- num_embeddings = len(dictionary)
- padding_idx = dictionary.pad()
- self.embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx)
- self.embed_positions = Embedding(max_positions, embed_dim, padding_idx)
-
- self.fc1 = Linear(embed_dim, in_channels, dropout=dropout)
- self.projections = nn.ModuleList()
- self.convolutions = nn.ModuleList()
- self.attention = nn.ModuleList()
- for i, (out_channels, kernel_size) in enumerate(convolutions):
- pad = kernel_size - 1
- self.projections.append(Linear(in_channels, out_channels)
- if in_channels != out_channels else None)
- self.convolutions.append(
- LinearizedConv1d(in_channels, out_channels * 2, kernel_size,
- padding=pad, dropout=dropout))
- self.attention.append(AttentionLayer(out_channels, embed_dim)
- if attention[i] else None)
- in_channels = out_channels
- self.fc2 = Linear(in_channels, out_embed_dim)
- self.fc3 = Linear(out_embed_dim, num_embeddings, dropout=dropout)
-
- def forward(self, input_tokens, encoder_out):
- if self._is_incremental_eval:
- return self.incremental_forward(input_tokens, encoder_out)
- else:
- return self.batch_forward(input_tokens, encoder_out)
-
- def batch_forward(self, input_tokens, encoder_out):
- """Forward pass for decoding multiple time steps in batch mode."""
- positions = Variable(make_positions(input_tokens.data, self.dictionary.pad(),
- left_pad=LanguagePairDataset.LEFT_PAD_TARGET))
- return self._forward(input_tokens, positions, encoder_out)
-
- def incremental_forward(self, input_tokens, encoder_out):
- """Forward pass for one time step."""
- # positions is the same for every token when decoding a single step
- positions = Variable(input_tokens.data.new(1, 1).fill_(
- self.dictionary.pad() + input_tokens.size(1)))
-
- # keep only the last token for incremental forward pass
- return self._forward(input_tokens[:, -1:], positions, encoder_out)
-
- def _forward(self, input_tokens, positions, encoder_out):
- # split and transpose encoder outputs
- encoder_a, encoder_b = self._split_encoder_out(encoder_out)
-
- # embed tokens and positions
- x = self.embed_tokens(input_tokens) + self.embed_positions(positions)
- x = F.dropout(x, p=self.dropout, training=self.training)
- target_embedding = x
-
- # project to size of convolution
- x = self.fc1(x)
-
- # B x T x C -> T x B x C
- x = self._transpose_unless_incremental_eval(x)
-
- # temporal convolutions
- avg_attn_scores = None
- num_attn_layers = len(self.attention)
- for proj, conv, attention in zip(self.projections, self.convolutions, self.attention):
- residual = x if proj is None else proj(x)
-
- x = F.dropout(x, p=self.dropout, training=self.training)
- x = conv(x)
- x = conv.remove_future_timesteps(x)
- x = F.glu(x)
-
- # attention
- if attention is not None:
- x = self._transpose_unless_incremental_eval(x)
-
- x, attn_scores = attention(x, target_embedding, (encoder_a, encoder_b))
- attn_scores = attn_scores / num_attn_layers
- if avg_attn_scores is None:
- avg_attn_scores = attn_scores
- else:
- avg_attn_scores.add_(attn_scores)
-
- x = self._transpose_unless_incremental_eval(x)
-
- # residual
- x = (x + residual) * math.sqrt(0.5)
-
- # T x B x C -> B x T x C
- x = self._transpose_unless_incremental_eval(x)
-
- # project back to size of vocabulary
- x = self.fc2(x)
- x = F.dropout(x, p=self.dropout, training=self.training)
- x = self.fc3(x)
-
- return x, avg_attn_scores
-
- def reorder_incremental_state(self, new_order):
- """Reorder buffered internal state (for incremental generation)."""
- super().reorder_incremental_state(new_order)
-
- def max_positions(self):
- """Maximum output length supported by the decoder."""
- return self.embed_positions.num_embeddings - self.dictionary.pad() - 1
-
- def _split_encoder_out(self, encoder_out):
- """Split and transpose encoder outputs.
-
- This is cached when doing incremental inference.
- """
- cached_result = self.get_incremental_state('encoder_out')
- if cached_result:
- return cached_result
-
- # transpose only once to speed up attention layers
- encoder_a, encoder_b = encoder_out
- encoder_a = encoder_a.transpose(1, 2).contiguous()
- result = (encoder_a, encoder_b)
-
- return self.set_incremental_state('encoder_out', result)
-
- def _transpose_unless_incremental_eval(self, x):
- if self._is_incremental_eval:
- return x
- return x.transpose(0, 1)
-
-
-def Embedding(num_embeddings, embedding_dim, padding_idx):
- m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
- m.weight.data.normal_(0, 0.1)
- return m
-
-
-def Linear(in_features, out_features, dropout=0):
- """Weight-normalized Linear layer (input: N x T x C)"""
- m = nn.Linear(in_features, out_features)
- m.weight.data.normal_(mean=0, std=math.sqrt((1 - dropout) / in_features))
- m.bias.data.zero_()
- return nn.utils.weight_norm(m)
-
-
-def LinearizedConv1d(in_channels, out_channels, kernel_size, dropout=0, **kwargs):
- """Weight-normalized Conv1d layer optimized for decoding"""
- m = LinearizedConvolution(in_channels, out_channels, kernel_size, **kwargs)
- std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels))
- m.weight.data.normal_(mean=0, std=std)
- m.bias.data.zero_()
- return nn.utils.weight_norm(m)
-
-
-def ConvTBC(in_channels, out_channels, kernel_size, dropout=0, **kwargs):
- """Weight-normalized Conv1d layer"""
- from fairseq.modules import ConvTBC
- m = ConvTBC(in_channels, out_channels, kernel_size, **kwargs)
- std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels))
- m.weight.data.normal_(mean=0, std=std)
- m.bias.data.zero_()
- return nn.utils.weight_norm(m, dim=2)
-
-
-def get_archs():
- return [
- 'fconv', 'fconv_iwslt_de_en', 'fconv_wmt_en_ro', 'fconv_wmt_en_de', 'fconv_wmt_en_fr',
- ]
-
-
-def _check_arch(args):
- """Check that the specified architecture is valid and not ambiguous."""
- if args.arch not in get_archs():
- raise ValueError('Unknown fconv model architecture: {}'.format(args.arch))
- if args.arch != 'fconv':
- # check that architecture is not ambiguous
- for a in ['encoder_embed_dim', 'encoder_layers', 'decoder_embed_dim', 'decoder_layers',
- 'decoder_out_embed_dim']:
- if hasattr(args, a):
- raise ValueError('--{} cannot be combined with --arch={}'.format(a, args.arch))
-
-
-def parse_arch(args):
- _check_arch(args)
-
- if args.arch == 'fconv_iwslt_de_en':
- args.encoder_embed_dim = 256
- args.encoder_layers = '[(256, 3)] * 4'
- args.decoder_embed_dim = 256
- args.decoder_layers = '[(256, 3)] * 3'
- args.decoder_out_embed_dim = 256
- elif args.arch == 'fconv_wmt_en_ro':
- args.encoder_embed_dim = 512
- args.encoder_layers = '[(512, 3)] * 20'
- args.decoder_embed_dim = 512
- args.decoder_layers = '[(512, 3)] * 20'
- args.decoder_out_embed_dim = 512
- elif args.arch == 'fconv_wmt_en_de':
- convs = '[(512, 3)] * 9' # first 9 layers have 512 units
- convs += ' + [(1024, 3)] * 4' # next 4 layers have 1024 units
- convs += ' + [(2048, 1)] * 2' # final 2 layers use 1x1 convolutions
- args.encoder_embed_dim = 768
- args.encoder_layers = convs
- args.decoder_embed_dim = 768
- args.decoder_layers = convs
- args.decoder_out_embed_dim = 512
- elif args.arch == 'fconv_wmt_en_fr':
- convs = '[(512, 3)] * 6' # first 6 layers have 512 units
- convs += ' + [(768, 3)] * 4' # next 4 layers have 768 units
- convs += ' + [(1024, 3)] * 3' # next 3 layers have 1024 units
- convs += ' + [(2048, 1)] * 1' # next 1 layer uses 1x1 convolutions
- convs += ' + [(4096, 1)] * 1' # final 1 layer uses 1x1 convolutions
- args.encoder_embed_dim = 768
- args.encoder_layers = convs
- args.decoder_embed_dim = 768
- args.decoder_layers = convs
- args.decoder_out_embed_dim = 512
- else:
- assert args.arch == 'fconv'
-
- # default architecture
- args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)
- args.encoder_layers = getattr(args, 'encoder_layers', '[(512, 3)] * 20')
- args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)
- args.decoder_layers = getattr(args, 'decoder_layers', '[(512, 3)] * 20')
- args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 256)
- args.decoder_attention = getattr(args, 'decoder_attention', 'True')
- return args
-
-
-def build_model(args, src_dict, dst_dict):
- encoder = FConvEncoder(
- src_dict,
- embed_dim=args.encoder_embed_dim,
- convolutions=eval(args.encoder_layers),
- dropout=args.dropout,
- max_positions=args.max_source_positions,
- )
- decoder = FConvDecoder(
- dst_dict,
- embed_dim=args.decoder_embed_dim,
- convolutions=eval(args.decoder_layers),
- out_embed_dim=args.decoder_out_embed_dim,
- attention=eval(args.decoder_attention),
- dropout=args.dropout,
- max_positions=args.max_target_positions,
- )
- return FConvModel(encoder, decoder)
diff --git a/parlai/agents/fairseq/fairseq_py/fairseq/models/lstm.py b/parlai/agents/fairseq/fairseq_py/fairseq/models/lstm.py
deleted file mode 100644
--- a/parlai/agents/fairseq/fairseq_py/fairseq/models/lstm.py
+++ /dev/null
@@ -1,336 +0,0 @@
-# Copyright (c) 2017-present, Facebook, Inc.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the LICENSE file in
-# the root directory of this source tree. An additional grant of patent rights
-# can be found in the PATENTS file in the same directory.
-#
-
-import torch
-from torch.autograd import Variable
-import torch.nn as nn
-import torch.nn.functional as F
-
-from . import FairseqEncoder, FairseqIncrementalDecoder, FairseqModel
-
-
-class LSTMModel(FairseqModel):
- def __init__(self, encoder, decoder):
- super().__init__(encoder, decoder)
-
-
-class LSTMEncoder(FairseqEncoder):
- """LSTM encoder."""
- def __init__(self, dictionary, embed_dim=512, num_layers=1, dropout_in=0.1,
- dropout_out=0.1):
- super().__init__()
- self.dictionary = dictionary
- self.dropout_in = dropout_in
- self.dropout_out = dropout_out
-
- num_embeddings = len(dictionary)
- padding_idx = dictionary.pad()
- self.embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx)
-
- self.layers = nn.ModuleList([
- LSTMCell(embed_dim, embed_dim)
- for layer in range(num_layers)
- ])
-
- def forward(self, src_tokens):
- bsz, seqlen = src_tokens.size()
- num_layers = len(self.layers)
-
- # embed tokens
- x = self.embed_tokens(src_tokens)
- x = F.dropout(x, p=self.dropout_in, training=self.training)
- embed_dim = x.size(2)
-
- # B x T x C -> T x B x C
- x = x.transpose(0, 1)
-
- final_hiddens, final_cells = [], []
- outs = [x[j] for j in range(seqlen)]
- for i, rnn in enumerate(self.layers):
- hidden = Variable(x.data.new(bsz, embed_dim).zero_())
- cell = Variable(x.data.new(bsz, embed_dim).zero_())
- for j in range(seqlen):
- # recurrent cell
- hidden, cell = rnn(outs[j], (hidden, cell))
-
- # store the most recent hidden state in outs, either to be used
- # as the input for the next layer, or as the final output
- outs[j] = F.dropout(hidden, p=self.dropout_out, training=self.training)
-
- # save the final hidden and cell states for every layer
- final_hiddens.append(hidden)
- final_cells.append(cell)
-
- # collect outputs across time steps
- x = torch.cat(outs, dim=0).view(seqlen, bsz, embed_dim)
- final_hiddens = torch.cat(final_hiddens, dim=0).view(num_layers, bsz, embed_dim)
- final_cells = torch.cat(final_cells, dim=0).view(num_layers, bsz, embed_dim)
-
- return x, final_hiddens, final_cells
-
- def max_positions(self):
- """Maximum input length supported by the encoder."""
- return int(1e5) # an arbitrary large number
-
-
-class AttentionLayer(nn.Module):
- def __init__(self, input_embed_dim, output_embed_dim):
- super().__init__()
-
- self.input_proj = Linear(input_embed_dim, output_embed_dim, bias=False)
- self.output_proj = Linear(2*output_embed_dim, output_embed_dim, bias=False)
-
- def forward(self, input, source_hids):
- # input: bsz x input_embed_dim
- # source_hids: srclen x bsz x output_embed_dim
-
- # x: bsz x output_embed_dim
- x = self.input_proj(input)
-
- # compute attention
- attn_scores = (source_hids * x.unsqueeze(0)).sum(dim=2)
- attn_scores = F.softmax(attn_scores.t()).t() # srclen x bsz
-
- # sum weighted sources
- x = (attn_scores.unsqueeze(2) * source_hids).sum(dim=0)
-
- x = F.tanh(self.output_proj(torch.cat((x, input), dim=1)))
- return x, attn_scores
-
-
-class LSTMDecoder(FairseqIncrementalDecoder):
- """LSTM decoder."""
- def __init__(self, dictionary, encoder_embed_dim=512, embed_dim=512,
- out_embed_dim=512, num_layers=1, dropout_in=0.1,
- dropout_out=0.1, attention=True):
- super().__init__()
- self.dictionary = dictionary
- self.dropout_in = dropout_in
- self.dropout_out = dropout_out
-
- num_embeddings = len(dictionary)
- padding_idx = dictionary.pad()
- self.embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx)
-
- self.layers = nn.ModuleList([
- LSTMCell(encoder_embed_dim + embed_dim if layer == 0 else embed_dim, embed_dim)
- for layer in range(num_layers)
- ])
- self.attention = AttentionLayer(encoder_embed_dim, embed_dim)
- if embed_dim != out_embed_dim:
- self.additional_fc = Linear(embed_dim, out_embed_dim)
- self.fc_out = Linear(out_embed_dim, num_embeddings, dropout=dropout_out)
-
- def forward(self, input_tokens, encoder_out):
- if self._is_incremental_eval:
- input_tokens = input_tokens[:, -1:]
- return self._forward(input_tokens, encoder_out)
-
- def _forward(self, input_tokens, encoder_out):
- bsz, seqlen = input_tokens.size()
-
- # get outputs from encoder
- encoder_outs, _, _ = encoder_out
- srclen = encoder_outs.size(0)
-
- # embed tokens
- x = self.embed_tokens(input_tokens)
- x = F.dropout(x, p=self.dropout_in, training=self.training)
- embed_dim = x.size(2)
-
- # B x T x C -> T x B x C
- x = x.transpose(0, 1)
-
- # initialize previous states (or get from cache during incremental generation)
- prev_hiddens = self.get_incremental_state('prev_hiddens')
- if not prev_hiddens:
- # first time step, initialize previous states
- prev_hiddens, prev_cells = self._init_prev_states(input_tokens, encoder_out)
- input_feed = Variable(x.data.new(bsz, embed_dim).zero_())
- else:
- # previous states are cached
- prev_cells = self.get_incremental_state('prev_cells')
- input_feed = self.get_incremental_state('input_feed')
-
- attn_scores = Variable(x.data.new(srclen, seqlen, bsz).zero_())
- outs = []
- for j in range(seqlen):
- # input feeding: concatenate context vector from previous time step
- input = torch.cat((x[j, :, :], input_feed), dim=1)
-
- for i, rnn in enumerate(self.layers):
- # recurrent cell
- hidden, cell = rnn(input, (prev_hiddens[i], prev_cells[i]))
-
- # hidden state becomes the input to the next layer
- input = F.dropout(hidden, p=self.dropout_out, training=self.training)
-
- # save state for next time step
- prev_hiddens[i] = hidden
- prev_cells[i] = cell
-
- # apply attention using the last layer's hidden state
- out, attn_scores[:, j, :] = self.attention(hidden, encoder_outs)
- out = F.dropout(out, p=self.dropout_out, training=self.training)
-
- # input feeding
- input_feed = out
-
- # save final output
- outs.append(out)
-
- # cache previous states (no-op except during incremental generation)
- self.set_incremental_state('prev_hiddens', prev_hiddens)
- self.set_incremental_state('prev_cells', prev_cells)
- self.set_incremental_state('input_feed', input_feed)
-
- # collect outputs across time steps
- x = torch.cat(outs, dim=0).view(seqlen, bsz, embed_dim)
-
- # T x B x C -> B x T x C
- x = x.transpose(1, 0)
-
- # srclen x tgtlen x bsz -> bsz x tgtlen x srclen
- attn_scores = attn_scores.transpose(0, 2)
-
- # project back to size of vocabulary
- if hasattr(self, 'additional_fc'):
- x = self.additional_fc(x)
- x = F.dropout(x, p=self.dropout_out, training=self.training)
- x = self.fc_out(x)
-
- return x, attn_scores
-
- def reorder_incremental_state(self, new_order):
- """Reorder buffered internal state (for incremental generation)."""
- super().reorder_incremental_state(new_order)
- new_order = Variable(new_order)
-
- def reorder_state(key):
- old = self.get_incremental_state(key)
- if isinstance(old, list):
- new = [old_i.index_select(0, new_order) for old_i in old]
- else:
- new = old.index_select(0, new_order)
- self.set_incremental_state(key, new)
-
- reorder_state('prev_hiddens')
- reorder_state('prev_cells')
- reorder_state('input_feed')
-
- def max_positions(self):
- """Maximum output length supported by the decoder."""
- return int(1e5) # an arbitrary large number
-
- def _init_prev_states(self, input_tokens, encoder_out):
- _, encoder_hiddens, encoder_cells = encoder_out
- num_layers = len(self.layers)
- prev_hiddens = [encoder_hiddens[i] for i in range(num_layers)]
- prev_cells = [encoder_cells[i] for i in range(num_layers)]
- return prev_hiddens, prev_cells
-
-
-def Embedding(num_embeddings, embedding_dim, padding_idx):
- m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
- m.weight.data.uniform_(-0.1, 0.1)
- return m
-
-
-def LSTMCell(input_dim, hidden_dim, **kwargs):
- m = nn.LSTMCell(input_dim, hidden_dim, **kwargs)
- for name, param in m.named_parameters():
- if 'weight' in name or 'bias' in name:
- param.data.uniform_(-0.1, 0.1)
- return m
-
-
-def Linear(in_features, out_features, bias=True, dropout=0):
- """Weight-normalized Linear layer (input: N x T x C)"""
- m = nn.Linear(in_features, out_features, bias=bias)
- m.weight.data.uniform_(-0.1, 0.1)
- if bias:
- m.bias.data.uniform_(-0.1, 0.1)
- return m
-
-
-def get_archs():
- return [
- 'lstm', 'lstm_wiseman_iwslt_de_en', 'lstm_luong_wmt_en_de',
- ]
-
-
-def _check_arch(args):
- """Check that the specified architecture is valid and not ambiguous."""
- if args.arch not in get_archs():
- raise ValueError('Unknown LSTM model architecture: {}'.format(args.arch))
- if args.arch != 'lstm':
- # check that architecture is not ambiguous
- for a in ['encoder_embed_dim', 'encoder_layers', 'decoder_embed_dim', 'decoder_layers',
- 'decoder_out_embed_dim']:
- if hasattr(args, a):
- raise ValueError('--{} cannot be combined with --arch={}'.format(a, args.arch))
-
-
-def parse_arch(args):
- _check_arch(args)
-
- if args.arch == 'lstm_wiseman_iwslt_de_en':
- args.encoder_embed_dim = 256
- args.encoder_layers = 1
- args.encoder_dropout_in = 0
- args.encoder_dropout_out = 0
- args.decoder_embed_dim = 256
- args.decoder_layers = 1
- args.decoder_out_embed_dim = 256
- args.decoder_attention = True
- args.decoder_dropout_in = 0
- elif args.arch == 'lstm_luong_wmt_en_de':
- args.encoder_embed_dim = 1000
- args.encoder_layers = 4
- args.encoder_dropout_out = 0
- args.decoder_embed_dim = 1000
- args.decoder_layers = 4
- args.decoder_out_embed_dim = 1000
- args.decoder_attention = True
- args.decoder_dropout_out = 0
- else:
- assert args.arch == 'lstm'
-
- # default architecture
- args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512)
- args.encoder_layers = getattr(args, 'encoder_layers', 1)
- args.encoder_dropout_in = getattr(args, 'encoder_dropout_in', args.dropout)
- args.encoder_dropout_out = getattr(args, 'encoder_dropout_out', args.dropout)
- args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)
- args.decoder_layers = getattr(args, 'decoder_layers', 1)
- args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 512)
- args.decoder_attention = getattr(args, 'decoder_attention', True)
- args.decoder_dropout_in = getattr(args, 'decoder_dropout_in', args.dropout)
- args.decoder_dropout_out = getattr(args, 'decoder_dropout_out', args.dropout)
- return args
-
-
-def build_model(args, src_dict, dst_dict):
- encoder = LSTMEncoder(
- src_dict,
- embed_dim=args.encoder_embed_dim,
- num_layers=int(args.encoder_layers),
- dropout_in=args.encoder_dropout_in,
- dropout_out=args.encoder_dropout_out,
- )
- decoder = LSTMDecoder(
- dst_dict,
- encoder_embed_dim=args.encoder_embed_dim,
- embed_dim=args.decoder_embed_dim,
- out_embed_dim=args.decoder_out_embed_dim,
- num_layers=int(args.decoder_layers),
- attention=bool(args.decoder_attention),
- dropout_in=args.decoder_dropout_in,
- dropout_out=args.decoder_dropout_out,
- )
- return LSTMModel(encoder, decoder)
diff --git a/parlai/agents/fairseq/fairseq_py/fairseq/modules/__init__.py b/parlai/agents/fairseq/fairseq_py/fairseq/modules/__init__.py
deleted file mode 100644
--- a/parlai/agents/fairseq/fairseq_py/fairseq/modules/__init__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# Copyright (c) 2017-present, Facebook, Inc.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the LICENSE file in
-# the root directory of this source tree. An additional grant of patent rights
-# can be found in the PATENTS file in the same directory.
-#
-
-from .beamable_mm import BeamableMM
-from .conv_tbc import ConvTBC
-from .grad_multiply import GradMultiply
-from .linearized_convolution import LinearizedConvolution
-
-__all__ = [
- 'BeamableMM',
- 'ConvTBC',
- 'GradMultiply',
- 'LinearizedConvolution',
-]
diff --git a/parlai/agents/fairseq/fairseq_py/fairseq/modules/beamable_mm.py b/parlai/agents/fairseq/fairseq_py/fairseq/modules/beamable_mm.py
deleted file mode 100644
--- a/parlai/agents/fairseq/fairseq_py/fairseq/modules/beamable_mm.py
+++ /dev/null
@@ -1,50 +0,0 @@
-# Copyright (c) 2017-present, Facebook, Inc.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the LICENSE file in
-# the root directory of this source tree. An additional grant of patent rights
-# can be found in the PATENTS file in the same directory.
-#
-
-import torch
-import torch.nn as nn
-
-
-class BeamableMM(nn.Module):
- """This module provides an optimized MM for beam decoding with attention.
-
- It leverage the fact that the source-side of the input is replicated beam
- times and the target-side of the input is of width one. This layer speeds up
- inference by replacing the inputs {(bsz x 1 x nhu), (bsz x sz2 x nhu)}
- with smaller inputs {(bsz/beam x beam x nhu), (bsz/beam x sz2 x nhu)}.
- """
- def __init__(self, beam_size=None):
- super(BeamableMM, self).__init__()
- self.beam_size = beam_size
-
- def forward(self, input1, input2):
- if (
- not self.training and # test mode
- self.beam_size is not None and # beam size is set
- input1.dim() == 3 and # only support batched input
- input1.size(1) == 1 # single time step update
- ):
- bsz, beam = input1.size(0), self.beam_size
-
- # bsz x 1 x nhu --> bsz/beam x beam x nhu
- input1 = input1[:, 0, :].unfold(0, beam, beam).transpose(2, 1)
-
- # bsz x sz2 x nhu --> bsz/beam x sz2 x nhu
- input2 = input2.unfold(0, beam, beam)[:, :, :, 0]
-
- # use non batched operation if bsz = beam
- if input1.size(0) == 1:
- output = torch.mm(input1[0, :, :], input2[0, :, :])
- else:
- output = input1.bmm(input2)
- return output.view(bsz, 1, -1)
- else:
- return input1.bmm(input2)
-
- def set_beam_size(self, beam_size):
- self.beam_size = beam_size
diff --git a/parlai/agents/fairseq/fairseq_py/fairseq/modules/conv_tbc.py b/parlai/agents/fairseq/fairseq_py/fairseq/modules/conv_tbc.py
deleted file mode 100644
--- a/parlai/agents/fairseq/fairseq_py/fairseq/modules/conv_tbc.py
+++ /dev/null
@@ -1,105 +0,0 @@
-# Copyright (c) 2017-present, Facebook, Inc.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the LICENSE file in
-# the root directory of this source tree. An additional grant of patent rights
-# can be found in the PATENTS file in the same directory.
-#
-
-import torch
-from torch.autograd import Variable, Function
-from torch.nn.modules.utils import _single
-
-try:
- from fairseq import temporal_convolution_tbc
-except ImportError as e:
- import sys
- sys.stderr.write('ERROR: missing temporal_convolution_tbc, run `python setup.py install`\n')
- raise e
-
-
-class ConvTBC(torch.nn.Module):
- """1D convolution over an input of shape (time x batch x channel)
-
- The implementation uses gemm to perform the convolution. This implementation
- is faster than cuDNN for small kernel sizes.
- """
- def __init__(self, in_channels, out_channels, kernel_size, stride=1,
- padding=0):
- super(ConvTBC, self).__init__()
- self.in_channels = in_channels
- self.out_channels = out_channels
- self.kernel_size = _single(kernel_size)
- self.stride = _single(stride)
- self.padding = _single(padding)
- assert self.stride == (1,)
-
- self.weight = torch.nn.Parameter(torch.Tensor(
- self.kernel_size[0], in_channels, out_channels))
- self.bias = torch.nn.Parameter(torch.Tensor(out_channels))
-
- def forward(self, input):
- return ConvTBCFunction.apply(
- input.contiguous(), self.weight, self.bias, self.padding[0])
-
- def __repr__(self):
- s = ('{name}({in_channels}, {out_channels}, kernel_size={kernel_size}'
- ', padding={padding}')
- if self.bias is None:
- s += ', bias=False'
- s += ')'
- return s.format(name=self.__class__.__name__, **self.__dict__)
-
-
-class ConvTBCFunction(Function):
- @staticmethod
- def forward(ctx, input, weight, bias, pad):
- input_size = input.size()
- weight_size = weight.size()
- kernel_size = weight_size[0]
-
- output = input.new(
- input_size[0] - kernel_size + 1 + pad * 2,
- input_size[1],
- weight_size[2])
-
- ctx.input_size = input_size
- ctx.weight_size = weight_size
- ctx.save_for_backward(input, weight)
- temporal_convolution_tbc.TemporalConvolutionTBC_forward(
- input.type().encode('utf-8'),
- input,
- output,
- weight,
- bias)
-
- return output
-
- @staticmethod
- def backward(ctx, grad_output):
- input, weight = ctx.saved_tensors
-
- grad_output = grad_output.data.contiguous()
- grad_input = grad_output.new(ctx.input_size).zero_()
- grad_weight = grad_output.new(ctx.weight_size).zero_()
- grad_bias = grad_output.new(ctx.weight_size[2])
-
- temporal_convolution_tbc.TemporalConvolutionTBC_backward(
- input.type().encode('utf-8'),
- grad_output,
- grad_input,
- grad_weight,
- grad_bias,
- input,
- weight)
-
- grad_input = Variable(grad_input, volatile=True)
- grad_weight = Variable(grad_weight, volatile=True)
- grad_bias = Variable(grad_bias, volatile=True)
-
- return grad_input, grad_weight, grad_bias, None
-
-
-def conv_tbc(input, weight, bias=None, stride=1, padding=0):
- return ConvTBCFunction.apply(
- input.contiguous(), weight, bias, padding[0])
diff --git a/parlai/agents/fairseq/fairseq_py/fairseq/modules/grad_multiply.py b/parlai/agents/fairseq/fairseq_py/fairseq/modules/grad_multiply.py
deleted file mode 100644
--- a/parlai/agents/fairseq/fairseq_py/fairseq/modules/grad_multiply.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Copyright (c) 2017-present, Facebook, Inc.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the LICENSE file in
-# the root directory of this source tree. An additional grant of patent rights
-# can be found in the PATENTS file in the same directory.
-#
-
-import torch
-
-
-class GradMultiply(torch.autograd.Function):
- @staticmethod
- def forward(ctx, x, scale):
- ctx.scale = scale
- res = x.new(x)
- ctx.mark_shared_storage((x, res))
- return res
-
- @staticmethod
- def backward(ctx, grad):
- return grad * ctx.scale, None
diff --git a/parlai/agents/fairseq/fairseq_py/fairseq/modules/linearized_convolution.py b/parlai/agents/fairseq/fairseq_py/fairseq/modules/linearized_convolution.py
deleted file mode 100644
--- a/parlai/agents/fairseq/fairseq_py/fairseq/modules/linearized_convolution.py
+++ /dev/null
@@ -1,88 +0,0 @@
-# Copyright (c) 2017-present, Facebook, Inc.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the LICENSE file in
-# the root directory of this source tree. An additional grant of patent rights
-# can be found in the PATENTS file in the same directory.
-#
-
-import torch
-import torch.nn.functional as F
-from .conv_tbc import ConvTBC
-
-
-class LinearizedConvolution(ConvTBC):
- """An optimized version of nn.Conv1d.
-
- At training time, this module uses ConvTBC, which is an optimized version
- of Conv1d. At inference time, it optimizes incremental generation (i.e.,
- one time step at a time) by replacing the convolutions with linear layers.
- """
-
- def __init__(self, in_channels, out_channels, kernel_size, **kwargs):
- super().__init__(in_channels, out_channels, kernel_size, **kwargs)
- self._is_incremental_eval = False
- self._linearized_weight = None
- self.register_backward_hook(self._clear_linearized_weight)
-
- def remove_future_timesteps(self, x):
- """Remove future time steps created by padding."""
- if not self._is_incremental_eval and self.kernel_size[0] > 1 and self.padding[0] > 0:
- x = x[:-self.padding[0], :, :]
- return x
-
- def incremental_eval(self, mode=True):
- self._is_incremental_eval = mode
- if mode:
- self.clear_incremental_state()
-
- def forward(self, input):
- if self._is_incremental_eval:
- return self.incremental_forward(input)
- else:
- return super().forward(input)
-
- def incremental_forward(self, input):
- """Forward convolution one time step at a time.
-
- This function maintains an internal state to buffer signal and accepts
- a single frame as input. If the input order changes between time steps,
- call reorder_incremental_state. To apply to fresh inputs, call
- clear_incremental_state.
- """
- # reshape weight
- weight = self._get_linearized_weight()
- kw = self.kernel_size[0]
-
- bsz = input.size(0) # input: bsz x len x dim
- if kw > 1:
- input = input.data
- if self.input_buffer is None:
- self.input_buffer = input.new(bsz, kw, input.size(2))
- self.input_buffer.zero_()
- else:
- # shift buffer
- self.input_buffer[:, :-1, :] = self.input_buffer[:, 1:, :].clone()
- # append next input
- self.input_buffer[:, -1, :] = input[:, -1, :]
- input = torch.autograd.Variable(self.input_buffer, volatile=True)
- output = F.linear(input.view(bsz, -1), weight, self.bias)
- return output.view(bsz, 1, -1)
-
- def clear_incremental_state(self):
- self.input_buffer = None
-
- def reorder_incremental_state(self, new_order):
- if self.input_buffer is not None:
- self.input_buffer = self.input_buffer.index_select(0, new_order)
-
- def _get_linearized_weight(self):
- if self._linearized_weight is None:
- kw = self.kernel_size[0]
- weight = self.weight.transpose(2, 1).transpose(1, 0).contiguous()
- assert weight.size() == (self.out_channels, kw, self.in_channels)
- self._linearized_weight = weight.view(self.out_channels, -1)
- return self._linearized_weight
-
- def _clear_linearized_weight(self, *args):
- self._linearized_weight = None
diff --git a/parlai/agents/fairseq/fairseq_py/fairseq/multiprocessing_event_loop.py b/parlai/agents/fairseq/fairseq_py/fairseq/multiprocessing_event_loop.py
deleted file mode 100644
--- a/parlai/agents/fairseq/fairseq_py/fairseq/multiprocessing_event_loop.py
+++ /dev/null
@@ -1,167 +0,0 @@
-# Copyright (c) 2017-present, Facebook, Inc.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the LICENSE file in
-# the root directory of this source tree. An additional grant of patent rights
-# can be found in the PATENTS file in the same directory.
-#
-
-import os
-import signal
-import threading
-from torch import multiprocessing
-
-
-class MultiprocessingEventLoop(object):
- """Start a multiprocessing event loop."""
-
- def __init__(self, device_ids=None, multiprocessing_method='spawn'):
- super().__init__()
- self.device_ids = tuple(device_ids)
- self.num_replicas = len(device_ids)
- self.rank = None
-
- self._mp = multiprocessing.get_context(multiprocessing_method)
-
- self._start_error_handler()
- self._start_multiprocessing()
-
- def call_async(self, rank, action, **kwargs):
- """Asynchronously call a function in each child process.
-
- Call a function named `action` on the rank'th process and return
- a Future with the result.
- """
-
- def result_generator():
- yield self.return_pipes[rank].recv()
-
- assert not self.return_pipes[rank].poll(), \
- 'return pipe must be consumed before calling another function'
- self.input_pipes[rank].send((action, kwargs))
-
- return Future(result_generator())
-
- def stop(self, interrupt_children=False):
- """Stop multiprocessing."""
- for rank in range(self.num_replicas):
- self.input_pipes[rank].close()
- self.return_pipes[rank].close()
- if interrupt_children:
- # send KeyboardInterrupt to children
- os.kill(self.procs[rank].pid, signal.SIGINT)
- else:
- self.procs[rank].join()
- self.error_queue.put((None, None)) # poison pill
-
- def _start_error_handler(self):
- """Error handler to catch exceptions in child processes."""
- # create a thread to listen for errors in the child processes
- self.error_queue = self._mp.SimpleQueue()
- error_thread = threading.Thread(target=self._error_listener,
- daemon=True)
- error_thread.start()
-
- # create signal handler that executes in the main process/thread and
- # handles errors from child processes
- signal.signal(signal.SIGUSR1, self._signal_handler)
-
- def _error_listener(self):
- """A thread that listens for errors in the child processes.
-
- Errors are handled in a signal handler in the main thread.
- """
- (rank, original_trace) = self.error_queue.get()
- if rank is None: # poison pill, return
- return
-
- # requeue error and switch to main thread for handling the error
- self.error_queue.put((rank, original_trace))
- os.kill(os.getpid(), signal.SIGUSR1)
-
- def _signal_handler(self, signal, frame):
- """Signal handler that handles errors from child processes.
-
- This signal handler executes in the main/process thread.
- """
- self.stop(interrupt_children=True)
- (rank, original_trace) = self.error_queue.get()
- msg = "\n\n-- Tracebacks above this line can probably be ignored --\n\n"
- msg += original_trace
- raise Exception(msg)
-
- def _start_multiprocessing(self):
- """Create child processes to run async event loop.
-
- Each process reads input from a Pipe, performs some computation,
- and returns its output to another Pipe.
- """
- # create child processes
- input_pipes = []
- return_pipes = []
- procs = []
- for rank, id in enumerate(self.device_ids):
- recv_input_pipe, send_input_pipe = self._mp.Pipe(duplex=False)
- recv_return_pipe, send_return_pipe = self._mp.Pipe(duplex=False)
- proc = self._mp.Process(
- target=self._process_event_loop,
- args=(rank, id, recv_input_pipe, send_return_pipe),
- daemon=True)
- proc.start()
- input_pipes.append(send_input_pipe)
- return_pipes.append(recv_return_pipe)
- procs.append(proc)
- self.input_pipes = input_pipes
- self.return_pipes = return_pipes
- self.procs = procs
-
- def _process_event_loop(self, rank, device_id, input_pipe, return_pipe):
- """Event loop that runs in each child process.
-
- Event loop:
- - take an action from the input pipe
- - call the corresponding function in this process
- - put the return value in the return pipe
-
- Any exceptions are put in the error queue.
- """
- self.rank = rank
- try:
- # event loop
- while True:
- action, kwargs = input_pipe.recv()
- action_fn = getattr(self, action)
- return_pipe.send(action_fn(rank, device_id, **kwargs))
- except EOFError:
- # input pipe was closed, do nothing
- pass
- except KeyboardInterrupt:
- # killed by parent, do nothing
- pass
- except Exception:
- # propagate exception from child to parent process, keeping
- # original traceback
- import traceback
- self.error_queue.put((rank, traceback.format_exc()))
- finally:
- # cleanup pipes
- input_pipe.close()
- return_pipe.close()
-
-
-class Future(object):
- """A wrapper around a Python generator, with syntactic sugar."""
- def __init__(self, generator):
- self.generator = generator
-
- def gen(self):
- return next(self.generator)
-
- @staticmethod
- def gen_list(gens):
- return [g.gen() for g in gens]
-
- @staticmethod
- def gen_tuple_list(gens):
- list = [g.gen() for g in gens]
- return zip(*list)
diff --git a/parlai/agents/fairseq/fairseq_py/fairseq/multiprocessing_pdb.py b/parlai/agents/fairseq/fairseq_py/fairseq/multiprocessing_pdb.py
deleted file mode 100644
--- a/parlai/agents/fairseq/fairseq_py/fairseq/multiprocessing_pdb.py
+++ /dev/null
@@ -1,40 +0,0 @@
-# Copyright (c) 2017-present, Facebook, Inc.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the LICENSE file in
-# the root directory of this source tree. An additional grant of patent rights
-# can be found in the PATENTS file in the same directory.
-#
-
-import multiprocessing
-import os
-import pdb
-import sys
-
-
-class MultiprocessingPdb(pdb.Pdb):
- """A Pdb wrapper that works in a multiprocessing environment.
-
- Usage: `from fairseq import pdb; pdb.set_trace()`
- """
-
- _stdin_fd = sys.stdin.fileno()
- _stdin = None
- _stdin_lock = multiprocessing.Lock()
-
- def __init__(self):
- pdb.Pdb.__init__(self, nosigint=True)
-
- def _cmdloop(self):
- stdin_bak = sys.stdin
- with self._stdin_lock:
- try:
- if not self._stdin:
- self._stdin = os.fdopen(self._stdin_fd)
- sys.stdin = self._stdin
- self.cmdloop()
- finally:
- sys.stdin = stdin_bak
-
-
-pdb = MultiprocessingPdb()
diff --git a/parlai/agents/fairseq/fairseq_py/fairseq/multiprocessing_trainer.py b/parlai/agents/fairseq/fairseq_py/fairseq/multiprocessing_trainer.py
deleted file mode 100644
--- a/parlai/agents/fairseq/fairseq_py/fairseq/multiprocessing_trainer.py
+++ /dev/null
@@ -1,317 +0,0 @@
-# Copyright (c) 2017-present, Facebook, Inc.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the LICENSE file in
-# the root directory of this source tree. An additional grant of patent rights
-# can be found in the PATENTS file in the same directory.
-#
-
-"""
-Train a network on multiple GPUs using multiprocessing.
-"""
-
-from itertools import cycle, islice
-import torch
-from torch.optim.lr_scheduler import LambdaLR, ReduceLROnPlateau
-
-from fairseq import nccl, utils
-from fairseq.multiprocessing_event_loop import MultiprocessingEventLoop, Future
-from fairseq.nag import NAG
-
-
-class MultiprocessingTrainer(MultiprocessingEventLoop):
- """Main class for multi-GPU training.
-
- Each GPU has a full copy of the model and is assigned to its own Python
- process. Gradients are accumulated with all-reduce and all model replicas
- are updated synchronously after each batch.
-
- The methods in this class are divided into synchronous functions, which
- prepare and dispatch the input to each process, and asynchronous functions
- (prefixed with `_async_`), which run on each process in parallel.
- """
-
- OPTIMIZERS = ['adagrad', 'adam', 'nag', 'sgd']
-
- def __init__(self, args, model, criterion, device_ids=None,
- multiprocessing_method='spawn'):
- if device_ids is None:
- device_ids = tuple(range(torch.cuda.device_count()))
- super().__init__(device_ids, multiprocessing_method)
-
- if not torch.cuda.is_available():
- raise NotImplementedError('Training on CPU is not supported')
- model = model.share_memory()
- nccl_uid = nccl.get_unique_id()
- self.criterion = criterion
-
- Future.gen_list([
- self.call_async(rank, '_async_init', args=args, model=model,
- criterion=criterion, nccl_uid=nccl_uid)
- for rank in range(self.num_replicas)
- ])
-
- self._grads_initialized = False
-
- def _async_init(self, rank, device_id, args, model, criterion, nccl_uid):
- """Initialize child processes."""
- self.args = args
-
- # set CUDA device
- torch.cuda.set_device(device_id)
-
- # initialize NCCL
- nccl.initialize(self.num_replicas, nccl_uid, device_id)
-
- # copy model and criterion to current device
- self.model = model.cuda()
- self.criterion = criterion.cuda()
-
- # initialize optimizer
- self.optimizer = self._build_optimizer()
- self.loss = None
-
- # initialize LR scheduler
- self.lr_scheduler = self._build_lr_scheduler()
-
- def _build_optimizer(self):
- if self.args.optimizer == 'adagrad':
- return torch.optim.Adagrad(self.model.parameters(), lr=self.args.lr,
- weight_decay=self.args.weight_decay)
- elif self.args.optimizer == 'adam':
- return torch.optim.Adam(self.model.parameters(), lr=self.args.lr,
- betas=eval(self.args.adam_betas),
- weight_decay=self.args.weight_decay)
- elif self.args.optimizer == 'nag':
- return NAG(self.model.parameters(), lr=self.args.lr,
- momentum=self.args.momentum,
- weight_decay=self.args.weight_decay)
- elif self.args.optimizer == 'sgd':
- return torch.optim.SGD(self.model.parameters(), lr=self.args.lr,
- momentum=self.args.momentum,
- weight_decay=self.args.weight_decay)
- else:
- raise ValueError('Unknown optimizer: {}'.format(self.args.optimizer))
-
- def _build_lr_scheduler(self):
- if self.args.force_anneal > 0:
- def anneal(e):
- if e < self.args.force_anneal:
- return 1
- else:
- return self.args.lrshrink ** (e + 1 - self.args.force_anneal)
- lr_scheduler = LambdaLR(self.optimizer, anneal)
- lr_scheduler.best = None
- else:
- # decay the LR by a factor every time the validation loss plateaus
- lr_scheduler = ReduceLROnPlateau(self.optimizer, patience=0,
- factor=self.args.lrshrink)
- return lr_scheduler
-
- def get_model(self):
- """Get one of the model replicas."""
- # just return the first model, since all replicas are the same
- return self.call_async(0, '_async_get_model').gen()
-
- def _async_get_model(self, rank, device_id):
- return self.model
-
- def save_checkpoint(self, filename, extra_state):
- """Save a checkpoint for the current model."""
- self.call_async(0, '_async_save_checkpoint', filename=filename, extra_state=extra_state).gen()
-
- def _async_save_checkpoint(self, rank, device_id, filename, extra_state):
- utils.save_state(filename, self.args, self.model, self.criterion, self.optimizer,
- self.lr_scheduler, self._optim_history, extra_state)
-
- def load_checkpoint(self, filename):
- """Load a checkpoint into the model replicas in each process."""
- results = Future.gen_list([
- self.call_async(rank, '_async_load_checkpoint', filename=filename)
- for rank in range(self.num_replicas)
- ])
- extra_state = results[0]
- return extra_state
-
- def _async_load_checkpoint(self, rank, device_id, filename):
- extra_state, self._optim_history = utils.load_state(
- filename, self.model, self.criterion, self.optimizer,
- self.lr_scheduler, cuda_device=device_id)
- return extra_state
-
- def set_seed(self, seed):
- Future.gen_list([
- self.call_async(rank, '_async_set_seed', seed=seed)
- for rank in range(self.num_replicas)
- ])
-
- def _async_set_seed(self, rank, device_id, seed):
- torch.manual_seed(seed)
-
- def train_step(self, samples):
- """Do forward, backward and gradient step in parallel."""
- # PyTorch initializes gradient buffers lazily, so the first
- # train step needs to send non-empty samples to all replicas
- replace_empty_samples = False
- if not self._grads_initialized:
- replace_empty_samples = True
- self._grads_initialized = True
-
- # scatter sample across GPUs
- self._scatter_samples(samples, replace_empty_samples=replace_empty_samples)
-
- # forward pass
- sample_sizes, logging_outputs = Future.gen_tuple_list([
- self.call_async(rank, '_async_forward')
- for rank in range(self.num_replicas)
- ])
-
- # backward pass, all-reduce gradients and take an optimization step
- grad_denom = self.criterion.__class__.grad_denom(sample_sizes)
- grad_norms = Future.gen_list([
- self.call_async(rank, '_async_backward_and_opt', grad_denom=grad_denom)
- for rank in range(self.num_replicas)
- ])
-
- # aggregate logging output
- logging_output = self.criterion.__class__.aggregate_logging_outputs(logging_outputs)
- logging_output['gnorm'] = grad_norms[0] # log the gradient norm
-
- return logging_output
-
- def _async_forward(self, rank, device_id, eval=False):
- if eval:
- self.model.eval()
- else:
- self.model.train()
- self.optimizer.zero_grad()
-
- if self._sample is None:
- return 0, {}
-
- # calculate loss and sample size
- self.loss, sample_size, logging_output = self.criterion(self.model, self._sample)
-
- return sample_size, logging_output
-
- def _async_backward_and_opt(self, rank, device_id, grad_denom):
- if self.loss is not None:
- # backward pass
- self.loss.backward()
-
- # get model parameters as a flattened (contiguous) tensor
- flat_grads = self._flat_model_grads()
-
- # all-reduce grads
- nccl.all_reduce(flat_grads)
-
- # normalize grads
- if grad_denom != 0:
- flat_grads.div_(grad_denom)
-
- # clip grads
- grad_norm = self._clip_grads_(flat_grads, self.args.clip_norm)
-
- # copy reduced grads back
- self._set_model_grads_(flat_grads)
-
- # take an optimization step
- self.optimizer.step()
-
- # reset loss
- self.loss = None
-
- return grad_norm
-
- def _model_grads(self):
- return [p.grad for p in self.model.parameters() if p.requires_grad]
-
- def _flat_model_grads(self):
- grads = self._model_grads()
- if not hasattr(self, '_flat_grads'):
- num_params = sum(g.data.numel() for g in grads)
- self._flat_grads = grads[0].data.new(num_params)
- offset = 0
- for grad in grads:
- grad = grad.data.view(-1)
- numel = grad.numel()
- self._flat_grads[offset:offset+numel].copy_(grad)
- offset += numel
- return self._flat_grads
-
- def _set_model_grads_(self, flat_grads):
- grads = self._model_grads()
- offset = 0
- for grad in grads:
- grad = grad.data.view(-1)
- numel = grad.numel()
- grad.copy_(flat_grads[offset:offset+numel])
- offset += numel
- assert offset == flat_grads.numel()
-
- def _clip_grads_(self, flat_grads, clipv):
- """nn.utils.clip_grad_norm for flattened (contiguous) tensors."""
- norm = flat_grads.norm()
- if clipv > 0 and norm > clipv:
- coef = max(norm, 1e-6) / clipv
- flat_grads.div_(coef)
- return norm
-
- def valid_step(self, samples):
- """Do forward pass in parallel."""
- # scatter sample across GPUs
- self._scatter_samples(samples, volatile=True)
-
- # forward pass
- _sample_sizes, logging_outputs = Future.gen_tuple_list([
- self.call_async(rank, '_async_forward', eval=True)
- for rank in range(self.num_replicas)
- ])
-
- # aggregate logging output
- logging_output = self.criterion.__class__.aggregate_logging_outputs(logging_outputs)
-
- return logging_output
-
- def get_lr(self):
- """Get the current learning rate."""
- return self.call_async(0, '_async_get_lr').gen()
-
- def _async_get_lr(self, rank, device_id):
- return self.optimizer.param_groups[0]['lr']
-
- def lr_step(self, val_loss=None, epoch=None):
- """Adjust the learning rate depending on the validation loss."""
- lr = Future.gen_list([
- self.call_async(rank, '_async_lr_step', val_loss=val_loss, epoch=epoch)
- for rank in range(self.num_replicas)
- ])
- return lr[0]
-
- def _async_lr_step(self, rank, device_id, epoch, val_loss):
- # update the learning rate
- if self.args.force_anneal > 0:
- self.lr_scheduler.step(epoch)
- else:
- self.lr_scheduler.step(val_loss, epoch)
- return self.optimizer.param_groups[0]['lr']
-
- def _scatter_samples(self, samples, volatile=False, replace_empty_samples=False):
- """Split and distribute a sample across GPUs."""
- if not replace_empty_samples:
- # pad with None until its size is equal to the number of replicas
- samples = samples + [None]*(self.num_replicas - len(samples))
- else:
- # pad by cycling through the given samples
- samples = list(islice(cycle(samples), self.num_replicas))
-
- Future.gen_list([
- self.call_async(rank, '_async_prepare_sample', sample=samples[rank], volatile=volatile)
- for rank in range(self.num_replicas)
- ])
-
- def _async_prepare_sample(self, rank, device_id, sample, volatile):
- if sample is None:
- self._sample = None
- else:
- self._sample = utils.prepare_sample(sample, volatile=volatile, cuda_device=device_id)
diff --git a/parlai/agents/fairseq/fairseq_py/fairseq/nag.py b/parlai/agents/fairseq/fairseq_py/fairseq/nag.py
deleted file mode 100644
--- a/parlai/agents/fairseq/fairseq_py/fairseq/nag.py
+++ /dev/null
@@ -1,52 +0,0 @@
-# Copyright (c) 2017-present, Facebook, Inc.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the LICENSE file in
-# the root directory of this source tree. An additional grant of patent rights
-# can be found in the PATENTS file in the same directory.
-#
-
-from torch.optim.optimizer import Optimizer, required
-
-
-class NAG(Optimizer):
- def __init__(self, params, lr=required, momentum=0, weight_decay=0):
- defaults = dict(lr=lr, momentum=momentum, weight_decay=weight_decay)
- super(NAG, self).__init__(params, defaults)
-
- def step(self, closure=None):
- """Performs a single optimization step.
-
- Arguments:
- closure (callable, optional): A closure that reevaluates the model
- and returns the loss.
- """
- loss = None
- if closure is not None:
- loss = closure()
-
- for group in self.param_groups:
- weight_decay = group['weight_decay']
- momentum = group['momentum']
- lr = group['lr']
-
- for p in group['params']:
- if p.grad is None:
- continue
-
- d_p = p.grad.data
- if weight_decay != 0:
- d_p.add_(weight_decay, p.data)
-
- param_state = self.state[p]
- if 'momentum_buffer' not in param_state:
- param_state['momentum_buffer'] = d_p.clone().zero_()
-
- buf = param_state['momentum_buffer']
-
- p.data.add_(momentum * momentum, buf)
- p.data.add_(-(1 + momentum) * lr, d_p)
-
- buf.mul_(momentum).add_(-lr, d_p)
-
- return loss
diff --git a/parlai/agents/fairseq/fairseq_py/fairseq/nccl.py b/parlai/agents/fairseq/fairseq_py/fairseq/nccl.py
deleted file mode 100644
--- a/parlai/agents/fairseq/fairseq_py/fairseq/nccl.py
+++ /dev/null
@@ -1,181 +0,0 @@
-# Copyright (c) 2017-present, Facebook, Inc.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the LICENSE file in
-# the root directory of this source tree. An additional grant of patent rights
-# can be found in the PATENTS file in the same directory.
-#
-
-"""
-A modified version of torch.cuda.nccl.all_reduce for launching kernels on each
-GPU separately.
-"""
-
-import ctypes
-from ctypes.util import find_library
-
-lib = None
-nccl_2_0 = None
-_uid = None
-_rank = None
-_num_devices = None
-_comm = None
-
-__all__ = ['all_reduce', 'initialize', 'get_unique_id']
-
-# ncclDataType_t
-nccl_types = {
- 'torch.cuda.ByteTensor': 0,
- 'torch.cuda.CharTensor': 0,
- 'torch.cuda.IntTensor': 1,
- 'torch.cuda.HalfTensor': 2,
- 'torch.cuda.FloatTensor': 3,
- 'torch.cuda.DoubleTensor': 4,
- 'torch.cuda.LongTensor': 5,
-}
-nccl_types_2_0 = {
- 'torch.cuda.ByteTensor': 0,
- 'torch.cuda.CharTensor': 0,
- 'torch.cuda.IntTensor': 2,
- 'torch.cuda.HalfTensor': 6,
- 'torch.cuda.FloatTensor': 7,
- 'torch.cuda.DoubleTensor': 8,
- 'torch.cuda.LongTensor': 4,
-}
-
-# ncclRedOp_t
-SUM = 0
-PROD = 1
-MAX = 2
-MIN = 3
-
-status_codes_2_0 = {
- 0: "Success",
- 1: "Unhandled Cuda Error",
- 2: "System Error",
- 3: "Internal Error",
- 4: "Invalid Argument Error",
- 5: "Invalid Usage Error",
-}
-
-status_codes = {
- 0: "Success",
- 1: "Unhandled Cuda Error",
- 2: "System Error",
- 3: "Internal Error",
- 4: "Invalid Device Pointer",
- 5: "Invalid Rank",
- 6: "Unsupported Device Count",
- 7: "Device Not Found",
- 8: "Invalid Device Index",
- 9: "Lib Wrapper Not Set",
- 10: "Cuda Malloc Failed",
- 11: "Rank Mismatch",
- 12: "Invalid Argument",
- 13: "Invalid Type",
- 14: "Invalid Operation",
-}
-
-
-def _libnccl():
- global nccl_2_0
- global lib
- global status_codes
- global nccl_types
- if lib is None:
- lib = ctypes.pydll.LoadLibrary(find_library('nccl'))
- if hasattr(lib, 'ncclCommDestroy'):
- lib.ncclCommDestroy.restype = None
- else:
- lib = None
- if hasattr(lib, 'ncclGroupStart'):
- nccl_2_0 = True
- status_codes = status_codes_2_0
- nccl_types = nccl_types_2_0
- return lib
-
-
-class NcclError(RuntimeError):
-
- def __init__(self, status):
- self.status = status
- msg = '{0} ({1})'.format(status_codes.get(status), status)
- super(NcclError, self).__init__(msg)
-
-
-class NcclComm(ctypes.c_void_p):
- def __del__(self):
- lib.ncclCommDestroy(self)
-
-
-class NcclUniqueId(ctypes.Structure):
- _fields_ = [
- ('internal', ctypes.c_uint8 * 128)
- ]
-
-
-def check_error(status):
- if status != 0:
- raise NcclError(status)
-
-
-_uids = []
-
-
-def get_unique_id():
- if _libnccl() is None:
- raise RuntimeError('Unable to load NCCL library')
-
- uid = NcclUniqueId()
- check_error(lib.ncclGetUniqueId(ctypes.byref(uid)))
- _uids.append(uid) # Don't allow UIDs to be collected
- return uid
-
-
-def initialize(num_devices, uid, rank):
- global _num_devices, _uid, _rank
-
- if _libnccl() is None:
- raise RuntimeError('Unable to load NCCL library')
-
- _num_devices = num_devices
- if rank != 0:
- _uid = NcclUniqueId.from_buffer_copy(uid)
- else:
- _uid = uid
- _rank = rank
-
-
-def communicator():
- global _comm
- if _libnccl() is None:
- raise RuntimeError('Unable to load NCCL library')
- if _uid is None:
- raise RuntimeError('NCCL not initialized')
- if _comm is None:
- comm = NcclComm()
- check_error(lib.ncclCommInitRank(
- ctypes.byref(comm),
- ctypes.c_int(_num_devices),
- _uid,
- ctypes.c_int(_rank)))
- _comm = comm
- return _comm
-
-
-def all_reduce(input, output=None, op=SUM, stream=None):
- comm = communicator()
- if output is None:
- output = input
- if stream is not None:
- stream = stream.cuda_stream
- data_type = nccl_types[input.type()]
- check_error(lib.ncclAllReduce(
- ctypes.c_void_p(input.data_ptr()),
- ctypes.c_void_p(output.data_ptr()),
- ctypes.c_size_t(input.numel()),
- data_type,
- op,
- comm,
- ctypes.c_void_p(stream)))
- return output
diff --git a/parlai/agents/fairseq/fairseq_py/fairseq/options.py b/parlai/agents/fairseq/fairseq_py/fairseq/options.py
deleted file mode 100644
--- a/parlai/agents/fairseq/fairseq_py/fairseq/options.py
+++ /dev/null
@@ -1,176 +0,0 @@
-# Copyright (c) 2017-present, Facebook, Inc.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the LICENSE file in
-# the root directory of this source tree. An additional grant of patent rights
-# can be found in the PATENTS file in the same directory.
-#
-
-import argparse
-
-from fairseq import models
-from fairseq.multiprocessing_trainer import MultiprocessingTrainer
-
-
-def get_parser(desc):
- parser = argparse.ArgumentParser(
- description='Facebook AI Research Sequence-to-Sequence Toolkit -- ' + desc)
- parser.add_argument('--no-progress-bar', action='store_true', help='disable progress bar')
- parser.add_argument('--log-interval', type=int, default=1000, metavar='N',
- help='log progress every N updates (when progress bar is disabled)')
- parser.add_argument('--log-format', default=None, help='log format to use',
- choices=['json', 'none', 'simple', 'tqdm'])
- parser.add_argument('--seed', default=1, type=int, metavar='N',
- help='pseudo random number generator seed')
- return parser
-
-
-def add_dataset_args(parser):
- group = parser.add_argument_group('Dataset and data loading')
- group.add_argument('data', metavar='DIR',
- help='path to data directory')
- group.add_argument('-s', '--source-lang', default=None, metavar='SRC',
- help='source language')
- group.add_argument('-t', '--target-lang', default=None, metavar='TARGET',
- help='target language')
- group.add_argument('-j', '--workers', default=1, type=int, metavar='N',
- help='number of data loading workers (default: 1)')
- group.add_argument('--max-source-positions', default=1024, type=int, metavar='N',
- help='max number of tokens in the source sequence')
- group.add_argument('--max-target-positions', default=1024, type=int, metavar='N',
- help='max number of tokens in the target sequence')
- group.add_argument('--skip-invalid-size-inputs-valid-test', action='store_true',
- help='Ignore too long or too short lines in valid and test set')
- return group
-
-
-def add_optimization_args(parser):
- group = parser.add_argument_group('Optimization')
- group.add_argument('--optimizer', default='nag', metavar='OPT',
- choices=MultiprocessingTrainer.OPTIMIZERS,
- help='optimizer ({})'.format(', '.join(MultiprocessingTrainer.OPTIMIZERS)))
- group.add_argument('--lr', '--learning-rate', default=0.25, type=float, metavar='LR',
- help='initial learning rate')
- group.add_argument('--min-lr', metavar='LR', default=1e-5, type=float,
- help='minimum learning rate')
- group.add_argument('--force-anneal', '--fa', default=0, type=int, metavar='N',
- help='force annealing at specified epoch')
- group.add_argument('--max-epoch', '--me', default=0, type=int, metavar='N',
- help='force stop training at specified epoch')
- group.add_argument('--lrshrink', default=0.1, type=float, metavar='LS',
- help='learning rate shrink factor for annealing, lr_new = (lr * lrshrink)')
- group.add_argument('--momentum', default=0.99, type=float, metavar='M',
- help='momentum factor')
- group.add_argument('--adam-betas', default='(0.9, 0.999)', metavar='B',
- help='betas for Adam optimizer')
- group.add_argument('--clip-norm', default=25, type=float, metavar='NORM',
- help='clip threshold of gradients')
- group.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD',
- help='weight decay')
- group.add_argument('--sample-without-replacement', default=0, type=int, metavar='N',
- help='If bigger than 0, use that number of mini-batches for each epoch,'
- ' where each sample is drawn randomly without replacement from the'
- ' dataset')
- group.add_argument('--curriculum', default=0, type=int, metavar='N',
- help='sort batches by source length for first N epochs')
- group.add_argument('--sentence-avg', action='store_true',
- help='normalize gradients by the number of sentences in a batch'
- ' (default is to normalize by number of tokens)')
- return group
-
-
-def add_checkpoint_args(parser):
- group = parser.add_argument_group('Checkpointing')
- group.add_argument('--save-dir', metavar='DIR', default='checkpoints',
- help='path to save checkpoints')
- group.add_argument('--restore-file', default='checkpoint_last.pt',
- help='filename in save-dir from which to load checkpoint')
- group.add_argument('--save-interval', type=int, default=-1,
- help='checkpoint every this many batches')
- group.add_argument('--no-save', action='store_true',
- help='don\'t save models and checkpoints')
- group.add_argument('--no-epoch-checkpoints', action='store_true',
- help='only store last and best checkpoints')
- return group
-
-
-def add_generation_args(parser):
- group = parser.add_argument_group('Generation')
- group.add_argument('--beam', default=5, type=int, metavar='N',
- help='beam size')
- group.add_argument('--nbest', default=1, type=int, metavar='N',
- help='number of hypotheses to output')
- group.add_argument('--max-len-a', default=0, type=float, metavar='N',
- help=('generate sequences of maximum length ax + b, '
- 'where x is the source length'))
- group.add_argument('--max-len-b', default=200, type=int, metavar='N',
- help=('generate sequences of maximum length ax + b, '
- 'where x is the source length'))
- group.add_argument('--remove-bpe', nargs='?', const='@@ ', default=None,
- help='remove BPE tokens before scoring')
- group.add_argument('--no-early-stop', action='store_true',
- help=('continue searching even after finalizing k=beam '
- 'hypotheses; this is more correct, but increases '
- 'generation time by 50%%'))
- group.add_argument('--unnormalized', action='store_true',
- help='compare unnormalized hypothesis scores')
- group.add_argument('--cpu', action='store_true', help='generate on CPU')
- group.add_argument('--no-beamable-mm', action='store_true',
- help='don\'t use BeamableMM in attention layers')
- group.add_argument('--lenpen', default=1, type=float,
- help='length penalty: <1.0 favors shorter, >1.0 favors longer sentences')
- group.add_argument('--unkpen', default=0, type=float,
- help='unknown word penalty: <0 produces more unks, >0 produces fewer')
- group.add_argument('--replace-unk', nargs='?', const=True, default=None,
- help='perform unknown replacement (optionally with alignment dictionary)')
- group.add_argument('--quiet', action='store_true',
- help='Only print final scores')
-
- return group
-
-
-def add_model_args(parser):
- group = parser.add_argument_group(
- 'Model configuration',
- # Only include attributes which are explicitly given as command-line
- # arguments or which have model-independent default values.
- argument_default=argparse.SUPPRESS,
- )
-
- # The model architecture can be specified in several ways.
- # In increasing order of priority:
- # 1) model defaults (lowest priority)
- # 2) --arch argument
- # 3) --encoder/decoder-* arguments (highest priority)
- # Note: --arch cannot be combined with --encoder/decoder-* arguments.
- group.add_argument('--arch', '-a', default='fconv', metavar='ARCH', choices=models.arch_model_map.keys(),
- help='model architecture ({})'.format(', '.join(models.arch_model_map.keys())))
- group.add_argument('--encoder-embed-dim', type=int, metavar='N',
- help='encoder embedding dimension')
- group.add_argument('--encoder-layers', type=str, metavar='EXPR',
- help='encoder layers [(dim, kernel_size), ...]')
- group.add_argument('--decoder-embed-dim', type=int, metavar='N',
- help='decoder embedding dimension')
- group.add_argument('--decoder-layers', type=str, metavar='EXPR',
- help='decoder layers [(dim, kernel_size), ...]')
- group.add_argument('--decoder-out-embed-dim', type=int, metavar='N',
- help='decoder output embedding dimension')
- group.add_argument('--decoder-attention', type=str, metavar='EXPR',
- help='decoder attention [True, ...]')
-
- # Granular dropout settings for models that support them (e.g., LSTM):
- group.add_argument('--encoder-dropout-in', type=float, metavar='D',
- help='dropout probability for encoder input embedding')
- group.add_argument('--encoder-dropout-out', type=float, metavar='D',
- help='dropout probability for encoder output')
- group.add_argument('--decoder-dropout-in', type=float, metavar='D',
- help='dropout probability for decoder input embedding')
- group.add_argument('--decoder-dropout-out', type=float, metavar='D',
- help='dropout probability for decoder output')
-
- # These arguments have default values independent of the model:
- group.add_argument('--dropout', default=0.1, type=float, metavar='D',
- help='dropout probability')
- group.add_argument('--label-smoothing', default=0, type=float, metavar='D',
- help='epsilon for label smoothing, 0 means no label smoothing')
- return group
diff --git a/parlai/agents/fairseq/fairseq_py/fairseq/progress_bar.py b/parlai/agents/fairseq/fairseq_py/fairseq/progress_bar.py
deleted file mode 100644
--- a/parlai/agents/fairseq/fairseq_py/fairseq/progress_bar.py
+++ /dev/null
@@ -1,184 +0,0 @@
-# Copyright (c) 2017-present, Facebook, Inc.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the LICENSE file in
-# the root directory of this source tree. An additional grant of patent rights
-# can be found in the PATENTS file in the same directory.
-#
-
-"""
-Wrapper around various loggers and progress bars (e.g., tqdm).
-"""
-
-from collections import OrderedDict
-import json
-from numbers import Number
-
-from tqdm import tqdm
-
-from fairseq.meters import AverageMeter
-
-
-class progress_bar(object):
- """Abstract class for progress bars."""
- def __init__(self, iterable, epoch=None, prefix=None):
- self.iterable = iterable
- self.epoch = epoch
- self.prefix = ''
- if epoch is not None:
- self.prefix += '| epoch {:03d}'.format(epoch)
- if prefix is not None:
- self.prefix += ' | {}'.format(prefix)
-
- def __enter__(self):
- return self
-
- def __exit__(self, *exc):
- return False
-
- def __iter__(self):
- raise NotImplementedError
-
- def log(self, stats):
- """Log intermediate stats according to log_interval."""
- raise NotImplementedError
-
- def print(self, stats):
- """Print end-of-epoch stats."""
- raise NotImplementedError
-
- def _str_commas(self, stats):
- return ', '.join(key + '=' + stats[key].strip()
- for key in stats.keys())
-
- def _str_pipes(self, stats):
- return ' | '.join(key + ' ' + stats[key].strip()
- for key in stats.keys())
-
- def _format_stats(self, stats):
- postfix = OrderedDict(stats)
- # Preprocess stats according to datatype
- for key in postfix.keys():
- # Number: limit the length of the string
- if isinstance(postfix[key], Number):
- postfix[key] = '{:g}'.format(postfix[key])
- # Meter: display both current and average value
- elif isinstance(postfix[key], AverageMeter):
- postfix[key] = '{:.2f} ({:.2f})'.format(
- postfix[key].val, postfix[key].avg)
- # Else for any other type, try to get the string conversion
- elif not isinstance(postfix[key], str):
- postfix[key] = str(postfix[key])
- # Else if it's a string, don't need to preprocess anything
- return postfix
-
-
-class json_progress_bar(progress_bar):
- """Log output in JSON format."""
-
- def __init__(self, iterable, epoch=None, prefix=None, log_interval=1000):
- super().__init__(iterable, epoch, prefix)
- self.log_interval = log_interval
- self.stats = None
-
- def __iter__(self):
- size = float(len(self.iterable))
- for i, obj in enumerate(self.iterable):
- yield obj
- if self.stats is not None and i > 0 and \
- self.log_interval is not None and i % self.log_interval == 0:
- update = self.epoch + float(i / size) if self.epoch is not None else None
- stats = self._format_stats(self.stats, epoch=self.epoch, update=update)
- print('sweep_log: ' + json.dumps(stats), flush=True)
-
- def log(self, stats):
- """Log intermediate stats according to log_interval."""
- self.stats = stats
-
- def print(self, stats):
- """Print end-of-epoch stats."""
- stats = self._format_stats(self.stats, epoch=self.epoch)
- print("sweep_log: " + json.dumps(stats), flush=True)
-
- def _format_stats(self, stats, epoch=None, update=None):
- postfix = OrderedDict()
- if epoch is not None:
- postfix['epoch'] = epoch
- if update is not None:
- postfix['update'] = update
- # Preprocess stats according to datatype
- for key in stats.keys():
- # Meter: display both current and average value
- if isinstance(stats[key], AverageMeter):
- postfix[key] = stats[key].val
- postfix[key + '_avg'] = stats[key].avg
- else:
- postfix[key] = stats[key]
- return postfix
-
-
-class noop_progress_bar(progress_bar):
- """No logging."""
-
- def __init__(self, iterable, epoch=None, prefix=None):
- super().__init__(iterable, epoch, prefix)
-
- def __iter__(self):
- for obj in self.iterable:
- yield obj
-
- def log(self, stats):
- """Log intermediate stats according to log_interval."""
- pass
-
- def print(self, stats):
- """Print end-of-epoch stats."""
- pass
-
-
-class simple_progress_bar(progress_bar):
- """A minimal logger for non-TTY environments."""
-
- def __init__(self, iterable, epoch=None, prefix=None, log_interval=1000):
- super().__init__(iterable, epoch, prefix)
- self.log_interval = log_interval
- self.stats = None
-
- def __iter__(self):
- size = len(self.iterable)
- for i, obj in enumerate(self.iterable):
- yield obj
- if self.stats is not None and i > 0 and \
- self.log_interval is not None and i % self.log_interval == 0:
- postfix = self._str_commas(self.stats)
- print('{}: {:5d} / {:d} {}'.format(self.prefix, i, size, postfix),
- flush=True)
-
- def log(self, stats):
- """Log intermediate stats according to log_interval."""
- self.stats = self._format_stats(stats)
-
- def print(self, stats):
- """Print end-of-epoch stats."""
- postfix = self._str_pipes(self._format_stats(stats))
- print('{} | {}'.format(self.prefix, postfix), flush=True)
-
-
-class tqdm_progress_bar(progress_bar):
- """Log to tqdm."""
-
- def __init__(self, iterable, epoch=None, prefix=None):
- super().__init__(iterable, epoch, prefix)
- self.tqdm = tqdm(iterable, self.prefix, leave=False)
-
- def __iter__(self):
- return iter(self.tqdm)
-
- def log(self, stats):
- """Log intermediate stats according to log_interval."""
- self.tqdm.set_postfix(self._format_stats(stats), refresh=False)
-
- def print(self, stats):
- """Print end-of-epoch stats."""
- postfix = self._str_pipes(self._format_stats(stats))
- self.tqdm.write('{} | {}'.format(self.tqdm.desc, postfix))
diff --git a/parlai/agents/fairseq/fairseq_py/fairseq/sequence_generator.py b/parlai/agents/fairseq/fairseq_py/fairseq/sequence_generator.py
deleted file mode 100644
--- a/parlai/agents/fairseq/fairseq_py/fairseq/sequence_generator.py
+++ /dev/null
@@ -1,341 +0,0 @@
-# Copyright (c) 2017-present, Facebook, Inc.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the LICENSE file in
-# the root directory of this source tree. An additional grant of patent rights
-# can be found in the PATENTS file in the same directory.
-#
-
-from contextlib import ExitStack
-import math
-import torch
-import torch.nn.functional as F
-from torch.autograd import Variable
-
-from fairseq import utils
-from fairseq.models import FairseqIncrementalDecoder
-
-
-class SequenceGenerator(object):
- def __init__(self, models, beam_size=1, minlen=1, maxlen=200,
- stop_early=True, normalize_scores=True, len_penalty=1,
- unk_penalty=0):
- """Generates translations of a given source sentence.
-
- Args:
- min/maxlen: The length of the generated output will be bounded by
- minlen and maxlen (not including the end-of-sentence marker).
- stop_early: Stop generation immediately after we finalize beam_size
- hypotheses, even though longer hypotheses might have better
- normalized scores.
- normalize_scores: Normalize scores by the length of the output.
- """
- self.models = models
- self.pad = models[0].dst_dict.pad()
- self.unk = models[0].dst_dict.unk()
- self.eos = models[0].dst_dict.eos()
- assert all(m.dst_dict.pad() == self.pad for m in self.models[1:])
- assert all(m.dst_dict.unk() == self.unk for m in self.models[1:])
- assert all(m.dst_dict.eos() == self.eos for m in self.models[1:])
- self.vocab_size = len(models[0].dst_dict)
- self.beam_size = beam_size
- self.minlen = minlen
- self.maxlen = min(maxlen, *[m.max_decoder_positions() for m in self.models])
- self.stop_early = stop_early
- self.normalize_scores = normalize_scores
- self.len_penalty = len_penalty
- self.unk_penalty = unk_penalty
-
- def cuda(self):
- for model in self.models:
- model.cuda()
- return self
-
- def generate_batched_itr(self, data_itr, beam_size=None, maxlen_a=0.0, maxlen_b=None,
- cuda_device=None, timer=None):
- """Iterate over a batched dataset and yield individual translations.
-
- Args:
- maxlen_a/b: generate sequences of maximum length ax + b,
- where x is the source sentence length.
- cuda_device: GPU on which to do generation.
- timer: StopwatchMeter for timing generations.
- """
- if maxlen_b is None:
- maxlen_b = self.maxlen
-
- for sample in data_itr:
- s = utils.prepare_sample(sample, volatile=True, cuda_device=cuda_device)
- input = s['net_input']
- srclen = input['src_tokens'].size(1)
- if timer is not None:
- timer.start()
- hypos = self.generate(input['src_tokens'], beam_size=beam_size,
- maxlen=int(maxlen_a*srclen + maxlen_b))
- if timer is not None:
- timer.stop(s['ntokens'])
- for i, id in enumerate(s['id']):
- src = input['src_tokens'].data[i, :]
- # remove padding from ref
- ref = utils.rstrip_pad(s['target'].data[i, :], self.pad)
- yield id, src, ref, hypos[i]
-
- def generate(self, src_tokens, beam_size=None, maxlen=None):
- """Generate a batch of translations."""
- with ExitStack() as stack:
- for model in self.models:
- if isinstance(model.decoder, FairseqIncrementalDecoder):
- stack.enter_context(model.decoder.incremental_inference())
- return self._generate(src_tokens, beam_size, maxlen)
-
- def _generate(self, src_tokens, beam_size=None, maxlen=None):
- bsz, srclen = src_tokens.size()
- maxlen = min(maxlen, self.maxlen) if maxlen is not None else self.maxlen
-
- # the max beam size is the dictionary size - 1, since we never select pad
- beam_size = beam_size if beam_size is not None else self.beam_size
- beam_size = min(beam_size, self.vocab_size - 1)
-
- encoder_outs = []
- for model in self.models:
- model.eval()
- if isinstance(model.decoder, FairseqIncrementalDecoder):
- model.decoder.set_beam_size(beam_size)
-
- # compute the encoder output for each beam
- encoder_out = model.encoder(src_tokens.repeat(1, beam_size).view(-1, srclen))
- encoder_outs.append(encoder_out)
-
- # initialize buffers
- scores = encoder_outs[0][0].data.new(bsz * beam_size).fill_(0)
- tokens = src_tokens.data.new(bsz * beam_size, maxlen + 2).fill_(self.pad)
- tokens_buf = tokens.clone()
- tokens[:, 0] = self.eos
- attn = scores.new(bsz * beam_size, src_tokens.size(1), maxlen + 2)
- attn_buf = attn.clone()
-
- # list of completed sentences
- finalized = [[] for i in range(bsz)]
- finished = [False for i in range(bsz)]
- worst_finalized = [{'idx': None, 'score': float('Inf')} for i in range(bsz)]
- num_remaining_sent = bsz
-
- # number of candidate hypos per step
- cand_size = 2 * beam_size # 2 x beam size in case half are EOS
-
- # offset arrays for converting between different indexing schemes
- bbsz_offsets = (torch.arange(0, bsz)*beam_size).unsqueeze(1).type_as(tokens)
- cand_offsets = torch.arange(0, cand_size).type_as(tokens)
-
- # helper function for allocating buffers on the fly
- buffers = {}
- def buffer(name, type_of=tokens): # noqa
- if name not in buffers:
- buffers[name] = type_of.new()
- return buffers[name]
-
- def is_finished(sent):
- """
- Check whether we've finished generation for a given sentence, by
- comparing the worst score among finalized hypotheses to the best
- possible score among unfinalized hypotheses.
- """
- assert len(finalized[sent]) <= beam_size
- if len(finalized[sent]) == beam_size:
- if self.stop_early:
- return True
- # stop if the best unfinalized score is worse than the worst
- # finalized one
- bbsz = sent*beam_size
- best_unfinalized_score = scores[bbsz:bbsz+beam_size].max()
- if self.normalize_scores:
- best_unfinalized_score /= maxlen
- if worst_finalized[sent]['score'] >= best_unfinalized_score:
- return True
- return False
-
- def finalize_hypos(step, bbsz_idx, scores):
- """
- Finalize the given hypotheses at this step, while keeping the total
- number of finalized hypotheses per sentence <= beam_size.
-
- Note: the input must be in the desired finalization order, so that
- hypotheses that appear earlier in the input are preferred to those
- that appear later.
-
- Args:
- step: current time step
- bbsz_idx: A vector of indices in the range [0, bsz*beam_size),
- indicating which hypotheses to finalize
- scores: A vector of the same size as bbsz_idx containing scores
- for each hypothesis
- """
- assert bbsz_idx.numel() == scores.numel()
- norm_scores = scores/math.pow(step+1, self.len_penalty) if self.normalize_scores else scores
- sents_seen = set()
- for idx, score in zip(bbsz_idx.cpu(), norm_scores.cpu()):
- sent = idx // beam_size
- sents_seen.add(sent)
-
- def get_hypo():
- hypo = tokens[idx, 1:step+2].clone() # skip the first index, which is EOS
- hypo[step] = self.eos
- attention = attn[idx, :, 1:step+2].clone()
- _, alignment = attention.max(dim=0)
- return {
- 'tokens': hypo,
- 'score': score,
- 'attention': attention,
- 'alignment': alignment,
- }
-
- if len(finalized[sent]) < beam_size:
- finalized[sent].append(get_hypo())
- elif score > worst_finalized[sent]['score']:
- # replace worst hypo for this sentence with new/better one
- worst_idx = worst_finalized[sent]['idx']
- finalized[sent][worst_idx] = get_hypo()
-
- # find new worst finalized hypo for this sentence
- idx, s = min(enumerate(finalized[sent]), key=lambda r: r[1]['score'])
- worst_finalized[sent] = {
- 'score': s['score'],
- 'idx': idx,
- }
-
- # return number of hypotheses finished this step
- num_finished = 0
- for sent in sents_seen:
- # check termination conditions for this sentence
- if not finished[sent] and is_finished(sent):
- finished[sent] = True
- num_finished += 1
- return num_finished
-
- reorder_state = None
- for step in range(maxlen + 1): # one extra step for EOS marker
- # reorder decoder internal states based on the prev choice of beams
- if reorder_state is not None:
- for model in self.models:
- if isinstance(model.decoder, FairseqIncrementalDecoder):
- model.decoder.reorder_incremental_state(reorder_state)
-
- probs, avg_attn_scores = self._decode(tokens[:, :step+1], encoder_outs)
- if step == 0:
- # at the first step all hypotheses are equally likely, so use
- # only the first beam
- probs = probs.unfold(0, 1, beam_size).squeeze(2).contiguous()
- else:
- # make probs contain cumulative scores for each hypothesis
- probs.add_(scores.view(-1, 1))
- probs[:, self.pad] = -math.inf # never select pad
- probs[:, self.unk] -= self.unk_penalty # apply unk penalty
-
- # Record attention scores
- attn[:, :, step+1].copy_(avg_attn_scores)
-
- # take the best 2 x beam_size predictions. We'll choose the first
- # beam_size of these which don't predict eos to continue with.
- cand_scores = buffer('cand_scores', type_of=scores)
- cand_indices = buffer('cand_indices')
- cand_beams = buffer('cand_beams')
- probs.view(bsz, -1).topk(
- min(cand_size, probs.view(bsz, -1).size(1) - 1), # -1 so we never select pad
- out=(cand_scores, cand_indices))
- torch.div(cand_indices, self.vocab_size, out=cand_beams)
- cand_indices.fmod_(self.vocab_size)
-
- # cand_bbsz_idx contains beam indices for the top candidate
- # hypotheses, with a range of values: [0, bsz*beam_size),
- # and dimensions: [bsz, cand_size]
- cand_bbsz_idx = cand_beams.add_(bbsz_offsets)
-
- # finalize hypotheses that end in eos
- eos_mask = cand_indices.eq(self.eos)
- if step >= self.minlen:
- eos_bbsz_idx = buffer('eos_bbsz_idx')
- # only consider eos when it's among the top beam_size indices
- cand_bbsz_idx[:, :beam_size].masked_select(eos_mask[:, :beam_size], out=eos_bbsz_idx)
- if eos_bbsz_idx.numel() > 0:
- eos_scores = buffer('eos_scores', type_of=scores)
- cand_scores[:, :beam_size].masked_select(eos_mask[:, :beam_size], out=eos_scores)
- num_remaining_sent -= finalize_hypos(step, eos_bbsz_idx, eos_scores)
-
- assert num_remaining_sent >= 0
- if num_remaining_sent == 0:
- break
-
- # set active_mask so that values > cand_size indicate eos hypos
- # and values < cand_size indicate candidate active hypos.
- # After, the min values per row are the top candidate active hypos
- active_mask = buffer('active_mask')
- torch.add((eos_mask*cand_size).type_as(cand_offsets), cand_offsets[:eos_mask.size(1)],
- out=active_mask)
-
- # get the top beam_size active hypotheses, which are just the hypos
- # with the smallest values in active_mask
- active_hypos, _ignore = buffer('active_hypos'), buffer('_ignore')
- active_mask.topk(beam_size, 1, largest=False, out=(_ignore, active_hypos))
- active_bbsz_idx = buffer('active_bbsz_idx')
- cand_bbsz_idx.gather(1, active_hypos, out=active_bbsz_idx)
- active_scores = cand_scores.gather(1, active_hypos,
- out=scores.view(bsz, beam_size))
-
- active_bbsz_idx = active_bbsz_idx.view(-1)
- active_scores = active_scores.view(-1)
-
- # finalize all active hypotheses once we hit maxlen
- # finalize_hypos will take care of adding the EOS markers
- if step == maxlen:
- num_remaining_sent -= finalize_hypos(step, active_bbsz_idx, active_scores)
- assert num_remaining_sent == 0
- break
-
- # copy tokens for active hypotheses
- torch.index_select(tokens[:, :step+1], dim=0, index=active_bbsz_idx,
- out=tokens_buf[:, :step+1])
- cand_indices.gather(1, active_hypos,
- out=tokens_buf.view(bsz, beam_size, -1)[:, :, step+1])
-
- # copy attention for active hypotheses
- torch.index_select(attn[:, :, :step+2], dim=0, index=active_bbsz_idx,
- out=attn_buf[:, :, :step+2])
-
- # swap buffers
- old_tokens = tokens
- tokens = tokens_buf
- tokens_buf = old_tokens
- old_attn = attn
- attn = attn_buf
- attn_buf = old_attn
-
- # reorder incremental state in decoder
- reorder_state = active_bbsz_idx
-
- # sort by score descending
- for sent in range(bsz):
- finalized[sent] = sorted(finalized[sent], key=lambda r: r['score'], reverse=True)
-
- return finalized
-
- def _decode(self, tokens, encoder_outs):
- # wrap in Variable
- tokens = Variable(tokens, volatile=True)
-
- avg_probs = None
- avg_attn = None
- for model, encoder_out in zip(self.models, encoder_outs):
- decoder_out, attn = model.decoder(tokens, encoder_out)
- probs = F.softmax(decoder_out[:, -1, :]).data
- attn = attn[:, -1, :].data
- if avg_probs is None or avg_attn is None:
- avg_probs = probs
- avg_attn = attn
- else:
- avg_probs.add_(probs)
- avg_attn.add_(attn)
- avg_probs.div_(len(self.models))
- avg_probs.log_()
- avg_attn.div_(len(self.models))
-
- return avg_probs, avg_attn
diff --git a/parlai/agents/fairseq/fairseq_py/fairseq/tokenizer.py b/parlai/agents/fairseq/fairseq_py/fairseq/tokenizer.py
deleted file mode 100644
--- a/parlai/agents/fairseq/fairseq_py/fairseq/tokenizer.py
+++ /dev/null
@@ -1,74 +0,0 @@
-# Copyright (c) 2017-present, Facebook, Inc.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the LICENSE file in
-# the root directory of this source tree. An additional grant of patent rights
-# can be found in the PATENTS file in the same directory.
-#
-
-from collections import Counter
-import re
-
-import torch
-
-from fairseq import dictionary
-
-
-def tokenize_line(line):
- line = re.sub(r"\t", "", line)
- line = re.sub(r"^\s+", "", line)
- line = re.sub(r"\s+$", "", line)
- line = re.sub(r"\s+", " ", line)
- return line.split()
-
-
-class Tokenizer:
-
- @staticmethod
- def build_dictionary(filename, tokenize=tokenize_line):
- dict = dictionary.Dictionary()
- Tokenizer.add_file_to_dictionary(filename, dict, tokenize)
- dict.finalize()
- return dict
-
- @staticmethod
- def add_file_to_dictionary(filename, dict, tokenize):
- with open(filename, 'r') as f:
- for line in f:
- for word in tokenize(line):
- dict.add_symbol(word)
- dict.add_symbol(dict.eos_word)
-
- @staticmethod
- def binarize(filename, dict, consumer, tokenize=tokenize_line):
- nseq, ntok = 0, 0
- replaced = Counter()
-
- def replaced_consumer(word, idx):
- if idx == dict.unk_index and word != dict.unk_word:
- replaced.update([word])
-
- with open(filename, 'r') as f:
- for line in f:
- ids = Tokenizer.tokenize(line, dict, tokenize, add_if_not_exist=False, consumer=replaced_consumer)
- nseq += 1
-
- consumer(ids)
- ntok += len(ids)
- return {'nseq': nseq, 'nunk': sum(replaced.values()), 'ntok': ntok, 'replaced': len(replaced)}
-
- @staticmethod
- def tokenize(line, dict, tokenize=tokenize_line, add_if_not_exist=True, consumer=None):
- words = tokenize(line)
- nwords = len(words)
- ids = torch.IntTensor(nwords + 1)
- for i, word in enumerate(words):
- if add_if_not_exist:
- idx = dict.add_symbol(word)
- else:
- idx = dict.index(word)
- if consumer is not None:
- consumer(word, idx)
- ids[i] = idx
- ids[nwords] = dict.eos_index
- return ids
diff --git a/parlai/agents/fairseq/fairseq_py/fairseq/utils.py b/parlai/agents/fairseq/fairseq_py/fairseq/utils.py
deleted file mode 100644
--- a/parlai/agents/fairseq/fairseq_py/fairseq/utils.py
+++ /dev/null
@@ -1,247 +0,0 @@
-# Copyright (c) 2017-present, Facebook, Inc.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the LICENSE file in
-# the root directory of this source tree. An additional grant of patent rights
-# can be found in the PATENTS file in the same directory.
-#
-
-import logging
-import os
-import torch
-import traceback
-import sys
-
-from torch.autograd import Variable
-from torch.serialization import default_restore_location
-
-from fairseq import criterions, data, models, progress_bar, tokenizer
-
-
-def parse_args_and_arch(parser):
- args = parser.parse_args()
- args.model = models.arch_model_map[args.arch]
- args = getattr(models, args.model).parse_arch(args)
- return args
-
-
-def build_model(args, src_dict, dst_dict):
- assert hasattr(models, args.model), 'Missing model type'
- return getattr(models, args.model).build_model(args, src_dict, dst_dict)
-
-
-def build_criterion(args, src_dict, dst_dict):
- if args.label_smoothing > 0:
- return criterions.LabelSmoothedCrossEntropyCriterion(args, dst_dict)
- else:
- return criterions.CrossEntropyCriterion(args, dst_dict)
-
-
-def build_progress_bar(args, iterator, epoch=None, prefix=None):
- if args.log_format is None:
- args.log_format = 'tqdm' if sys.stderr.isatty() else 'simple'
-
- if args.log_format == 'json':
- bar = progress_bar.json_progress_bar(iterator, epoch, prefix, args.log_interval)
- elif args.log_format == 'none':
- bar = progress_bar.noop_progress_bar(iterator, epoch, prefix)
- elif args.log_format == 'simple':
- bar = progress_bar.simple_progress_bar(iterator, epoch, prefix, args.log_interval)
- elif args.log_format == 'tqdm':
- bar = progress_bar.tqdm_progress_bar(iterator, epoch, prefix)
- else:
- raise ValueError('Unknown log format: {}'.format(args.log_format))
- return bar
-
-
-def torch_persistent_save(*args, **kwargs):
- for i in range(3):
- try:
- return torch.save(*args, **kwargs)
- except Exception:
- if i == 2:
- logging.error(traceback.format_exc())
-
-
-def save_state(filename, args, model, criterion, optimizer, lr_scheduler, optim_history=None, extra_state=None):
- if optim_history is None:
- optim_history = []
- if extra_state is None:
- extra_state = {}
- state_dict = {
- 'args': args,
- 'model': model.state_dict(),
- 'optimizer_history': optim_history + [
- {
- 'criterion_name': criterion.__class__.__name__,
- 'best_loss': lr_scheduler.best,
- }
- ],
- 'last_optimizer_state': optimizer.state_dict(),
- 'extra_state': extra_state,
- }
- torch_persistent_save(state_dict, filename)
-
-
-def load_state(filename, model, criterion, optimizer, lr_scheduler, cuda_device=None):
- if not os.path.exists(filename):
- return None, []
- if cuda_device is None:
- state = torch.load(filename)
- else:
- state = torch.load(
- filename,
- map_location=lambda s, l: default_restore_location(s, 'cuda:{}'.format(cuda_device))
- )
- state = _upgrade_state_dict(state)
-
- # load model parameters
- model.load_state_dict(state['model'])
-
- # only load optimizer and lr_scheduler if they match with the checkpoint
- optim_history = state['optimizer_history']
- last_optim = optim_history[-1]
- if last_optim['criterion_name'] == criterion.__class__.__name__:
- optimizer.load_state_dict(state['last_optimizer_state'])
- lr_scheduler.best = last_optim['best_loss']
-
- return state['extra_state'], optim_history
-
-
-def _upgrade_state_dict(state):
- """Helper for upgrading old model checkpoints."""
- # add optimizer_history
- if 'optimizer_history' not in state:
- state['optimizer_history'] = [
- {
- 'criterion_name': criterions.CrossEntropyCriterion.__name__,
- 'best_loss': state['best_loss'],
- },
- ]
- state['last_optimizer_state'] = state['optimizer']
- del state['optimizer']
- del state['best_loss']
- # move extra_state into sub-dictionary
- if 'epoch' in state and 'extra_state' not in state:
- state['extra_state'] = {
- 'epoch': state['epoch'],
- 'batch_offset': state['batch_offset'],
- 'val_loss': state['val_loss'],
- }
- del state['epoch']
- del state['batch_offset']
- del state['val_loss']
- # reduce optimizer history's memory usage (only keep the last state)
- if 'optimizer' in state['optimizer_history'][-1]:
- state['last_optimizer_state'] = state['optimizer_history'][-1]['optimizer']
- for optim_hist in state['optimizer_history']:
- del optim_hist['optimizer']
- return state
-
-
-def load_ensemble_for_inference(filenames, src_dict=None, dst_dict=None, data_dir=None):
- """Load an ensemble of models for inference.
-
- The source and target dictionaries can be given explicitly, or loaded from
- the `data_dir` directory.
- """
- # load model architectures and weights
- states = []
- for filename in filenames:
- if not os.path.exists(filename):
- raise IOError('Model file not found: {}'.format(filename))
- states.append(
- torch.load(filename, map_location=lambda s, l: default_restore_location(s, 'cpu'))
- )
- args = states[0]['args']
- args = _upgrade_args(args)
-
- if src_dict is None or dst_dict is None:
- assert data_dir is not None
- src_dict, dst_dict = data.load_dictionaries(data_dir, args.source_lang, args.target_lang)
-
- # build ensemble
- ensemble = []
- for state in states:
- model = build_model(args, src_dict, dst_dict)
- model.load_state_dict(state['model'])
- ensemble.append(model)
- return ensemble, args
-
-
-def _upgrade_args(args):
- if not hasattr(args, 'max_source_positions'):
- args.max_source_positions = args.max_positions
- args.max_target_positions = args.max_positions
- return args
-
-
-def prepare_sample(sample, volatile=False, cuda_device=None):
- """Wrap input tensors in Variable class."""
-
- def make_variable(tensor):
- if cuda_device is not None and torch.cuda.is_available():
- tensor = tensor.cuda(async=True, device=cuda_device)
- return Variable(tensor, volatile=volatile)
-
- return {
- 'id': sample['id'],
- 'ntokens': sample['ntokens'],
- 'target': make_variable(sample['target']),
- 'net_input': {
- key: make_variable(sample[key])
- for key in ['src_tokens', 'input_tokens']
- },
- }
-
-
-def load_align_dict(replace_unk):
- if replace_unk is None:
- align_dict = None
- elif isinstance(replace_unk, str):
- # Load alignment dictionary for unknown word replacement if it was passed as an argument.
- align_dict = {}
- with open(replace_unk, 'r') as f:
- for line in f:
- l = line.split()
- align_dict[l[0]] = l[1]
- else:
- # No alignment dictionary provided but we still want to perform unknown word replacement by copying the
- # original source word.
- align_dict = {}
- return align_dict
-
-
-def replace_unk(hypo_str, src_str, alignment, align_dict, unk):
- # Tokens are strings here
- hypo_tokens = tokenizer.tokenize_line(hypo_str)
- # TODO: Very rare cases where the replacement is '<eos>' should be handled gracefully
- src_tokens = tokenizer.tokenize_line(src_str) + ['<eos>']
- for i, ht in enumerate(hypo_tokens):
- if ht == unk:
- src_token = src_tokens[alignment[i]]
- # Either take the corresponding value in the aligned dictionary or just copy the original value.
- hypo_tokens[i] = align_dict.get(src_token, src_token)
- return ' '.join(hypo_tokens)
-
-
-def post_process_prediction(hypo_tokens, src_str, alignment, align_dict, dst_dict, remove_bpe):
- hypo_str = dst_dict.string(hypo_tokens, remove_bpe)
- if align_dict is not None:
- hypo_str = replace_unk(hypo_str, src_str, alignment, align_dict, dst_dict.unk_string())
- if align_dict is not None or remove_bpe is not None:
- # Convert back to tokens for evaluating with unk replacement or without BPE
- # Note that the dictionary can be modified inside the method.
- hypo_tokens = tokenizer.Tokenizer.tokenize(hypo_str, dst_dict, add_if_not_exist=True)
- return hypo_tokens, hypo_str, alignment
-
-
-def lstrip_pad(tensor, pad):
- return tensor[tensor.eq(pad).sum():]
-
-
-def rstrip_pad(tensor, pad):
- strip = tensor.eq(pad).sum()
- if strip > 0:
- return tensor[:-strip]
- return tensor
diff --git a/parlai/agents/fairseq/fairseq_py/generate.py b/parlai/agents/fairseq/fairseq_py/generate.py
deleted file mode 100644
--- a/parlai/agents/fairseq/fairseq_py/generate.py
+++ /dev/null
@@ -1,130 +0,0 @@
-#!/usr/bin/env python3
-# Copyright (c) 2017-present, Facebook, Inc.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the LICENSE file in
-# the root directory of this source tree. An additional grant of patent rights
-# can be found in the PATENTS file in the same directory.
-#
-
-import torch
-
-from fairseq import bleu, data, options, tokenizer, utils
-from fairseq.meters import StopwatchMeter, TimeMeter
-from fairseq.sequence_generator import SequenceGenerator
-
-
-def main():
- parser = options.get_parser('Generation')
- parser.add_argument('--path', metavar='FILE', required=True, action='append',
- help='path(s) to model file(s)')
- dataset_args = options.add_dataset_args(parser)
- dataset_args.add_argument('--batch-size', default=32, type=int, metavar='N',
- help='batch size')
- dataset_args.add_argument('--gen-subset', default='test', metavar='SPLIT',
- help='data subset to generate (train, valid, test)')
- options.add_generation_args(parser)
-
- args = parser.parse_args()
- if args.no_progress_bar and args.log_format is None:
- args.log_format = 'none'
- print(args)
-
- use_cuda = torch.cuda.is_available() and not args.cpu
-
- # Load dataset
- if args.replace_unk is None:
- dataset = data.load_dataset(args.data, [args.gen_subset], args.source_lang, args.target_lang)
- else:
- dataset = data.load_raw_text_dataset(args.data, [args.gen_subset], args.source_lang, args.target_lang)
- if args.source_lang is None or args.target_lang is None:
- # record inferred languages in args
- args.source_lang, args.target_lang = dataset.src, dataset.dst
-
- # Load ensemble
- print('| loading model(s) from {}'.format(', '.join(args.path)))
- models, _ = utils.load_ensemble_for_inference(args.path, dataset.src_dict, dataset.dst_dict)
-
- print('| [{}] dictionary: {} types'.format(dataset.src, len(dataset.src_dict)))
- print('| [{}] dictionary: {} types'.format(dataset.dst, len(dataset.dst_dict)))
- print('| {} {} {} examples'.format(args.data, args.gen_subset, len(dataset.splits[args.gen_subset])))
-
- # Optimize ensemble for generation
- for model in models:
- model.make_generation_fast_(
- beamable_mm_beam_size=None if args.no_beamable_mm else args.beam)
-
- # Initialize generator
- translator = SequenceGenerator(
- models, beam_size=args.beam, stop_early=(not args.no_early_stop),
- normalize_scores=(not args.unnormalized), len_penalty=args.lenpen,
- unk_penalty=args.unkpen)
- if use_cuda:
- translator.cuda()
-
- # Load alignment dictionary for unknown word replacement
- # (None if no unknown word replacement, empty if no path to align dictionary)
- align_dict = utils.load_align_dict(args.replace_unk)
-
- # Generate and compute BLEU score
- scorer = bleu.Scorer(dataset.dst_dict.pad(), dataset.dst_dict.eos(), dataset.dst_dict.unk())
- max_positions = min(model.max_encoder_positions() for model in models)
- itr = dataset.eval_dataloader(
- args.gen_subset, max_sentences=args.batch_size, max_positions=max_positions,
- skip_invalid_size_inputs_valid_test=args.skip_invalid_size_inputs_valid_test)
- num_sentences = 0
- with utils.build_progress_bar(args, itr) as t:
- wps_meter = TimeMeter()
- gen_timer = StopwatchMeter()
- translations = translator.generate_batched_itr(
- t, maxlen_a=args.max_len_a, maxlen_b=args.max_len_b,
- cuda_device=0 if use_cuda else None, timer=gen_timer)
- for sample_id, src_tokens, target_tokens, hypos in translations:
- # Process input and ground truth
- target_tokens = target_tokens.int().cpu()
- # Either retrieve the original sentences or regenerate them from tokens.
- if align_dict is not None:
- src_str = dataset.splits[args.gen_subset].src.get_original_text(sample_id)
- target_str = dataset.splits[args.gen_subset].dst.get_original_text(sample_id)
- else:
- src_str = dataset.src_dict.string(src_tokens, args.remove_bpe)
- target_str = dataset.dst_dict.string(target_tokens, args.remove_bpe, escape_unk=True)
-
- if not args.quiet:
- print('S-{}\t{}'.format(sample_id, src_str))
- print('T-{}\t{}'.format(sample_id, target_str))
-
- # Process top predictions
- for i, hypo in enumerate(hypos[:min(len(hypos), args.nbest)]):
- hypo_tokens, hypo_str, alignment = utils.post_process_prediction(
- hypo_tokens=hypo['tokens'].int().cpu(),
- src_str=src_str,
- alignment=hypo['alignment'].int().cpu(),
- align_dict=align_dict,
- dst_dict=dataset.dst_dict,
- remove_bpe=args.remove_bpe)
-
- if not args.quiet:
- print('H-{}\t{}\t{}'.format(sample_id, hypo['score'], hypo_str))
- print('A-{}\t{}'.format(sample_id, ' '.join(map(str, alignment))))
-
- # Score only the top hypothesis
- if i == 0:
- if align_dict is not None or args.remove_bpe is not None:
- # Convert back to tokens for evaluation with unk replacement and/or without BPE
- target_tokens = tokenizer.Tokenizer.tokenize(target_str,
- dataset.dst_dict,
- add_if_not_exist=True)
- scorer.add(target_tokens, hypo_tokens)
-
- wps_meter.update(src_tokens.size(0))
- t.log({'wps': round(wps_meter.avg)})
- num_sentences += 1
-
- print('| Translated {} sentences ({} tokens) in {:.1f}s ({:.2f} tokens/s)'.format(
- num_sentences, gen_timer.n, gen_timer.sum, 1. / gen_timer.avg))
- print('| Generate {} with beam={}: {}'.format(args.gen_subset, args.beam, scorer.result_string()))
-
-
-if __name__ == '__main__':
- main()
diff --git a/parlai/agents/fairseq/fairseq_py/interactive.py b/parlai/agents/fairseq/fairseq_py/interactive.py
deleted file mode 100644
--- a/parlai/agents/fairseq/fairseq_py/interactive.py
+++ /dev/null
@@ -1,78 +0,0 @@
-#!/usr/bin/env python3
-# Copyright (c) 2017-present, Facebook, Inc.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the LICENSE file in
-# the root directory of this source tree. An additional grant of patent rights
-# can be found in the PATENTS file in the same directory.
-#
-import sys
-import torch
-from torch.autograd import Variable
-
-from fairseq import options, tokenizer, utils
-from fairseq.sequence_generator import SequenceGenerator
-
-
-def main():
- parser = options.get_parser('Generation')
- parser.add_argument('--path', metavar='FILE', required=True, action='append',
- help='path(s) to model file(s)')
- options.add_dataset_args(parser)
- options.add_generation_args(parser)
-
- args = parser.parse_args()
- print(args)
-
- use_cuda = torch.cuda.is_available() and not args.cpu
-
- # Load ensemble
- print('| loading model(s) from {}'.format(', '.join(args.path)))
- models, model_args = utils.load_ensemble_for_inference(args.path, data_dir=args.data)
- src_dict, dst_dict = models[0].src_dict, models[0].dst_dict
-
- print('| [{}] dictionary: {} types'.format(model_args.source_lang, len(src_dict)))
- print('| [{}] dictionary: {} types'.format(model_args.target_lang, len(dst_dict)))
-
- # Optimize ensemble for generation
- for model in models:
- model.make_generation_fast_(
- beamable_mm_beam_size=None if args.no_beamable_mm else args.beam)
-
- # Initialize generator
- translator = SequenceGenerator(
- models, beam_size=args.beam, stop_early=(not args.no_early_stop),
- normalize_scores=(not args.unnormalized), len_penalty=args.lenpen,
- unk_penalty=args.unkpen)
- if use_cuda:
- translator.cuda()
-
- # Load alignment dictionary for unknown word replacement
- # (None if no unknown word replacement, empty if no path to align dictionary)
- align_dict = utils.load_align_dict(args.replace_unk)
-
- print('| Type the input sentence and press return:')
- for src_str in sys.stdin:
- src_str = src_str.strip()
- src_tokens = tokenizer.Tokenizer.tokenize(src_str, src_dict, add_if_not_exist=False).long()
- if use_cuda:
- src_tokens = src_tokens.cuda()
- translations = translator.generate(Variable(src_tokens.view(1, -1)))
- hypos = translations[0]
- print('O\t{}'.format(src_str))
-
- # Process top predictions
- for hypo in hypos[:min(len(hypos), args.nbest)]:
- hypo_tokens, hypo_str, alignment = utils.post_process_prediction(
- hypo_tokens=hypo['tokens'].int().cpu(),
- src_str=src_str,
- alignment=hypo['alignment'].int().cpu(),
- align_dict=align_dict,
- dst_dict=dst_dict,
- remove_bpe=args.remove_bpe)
- print('H\t{}\t{}'.format(hypo['score'], hypo_str))
- print('A\t{}'.format(' '.join(map(str, alignment))))
-
-
-if __name__ == '__main__':
- main()
diff --git a/parlai/agents/fairseq/fairseq_py/preprocess.py b/parlai/agents/fairseq/fairseq_py/preprocess.py
deleted file mode 100644
--- a/parlai/agents/fairseq/fairseq_py/preprocess.py
+++ /dev/null
@@ -1,139 +0,0 @@
-#!/usr/bin/env python3
-# Copyright (c) 2017-present, Facebook, Inc.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the LICENSE file in
-# the root directory of this source tree. An additional grant of patent rights
-# can be found in the PATENTS file in the same directory.
-#
-
-import argparse
-from itertools import zip_longest
-import os
-import shutil
-
-from fairseq import dictionary, indexed_dataset
-from fairseq.tokenizer import Tokenizer
-
-
-def main():
- parser = argparse.ArgumentParser(
- description='Data pre-processing: Create dictionary and store data in binary format')
- parser.add_argument('-s', '--source-lang', default=None, metavar='SRC', help='source language')
- parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET', help='target language')
- parser.add_argument('--trainpref', metavar='FP', default='train', help='target language')
- parser.add_argument('--validpref', metavar='FP', default='valid', help='comma separated, valid language prefixes')
- parser.add_argument('--testpref', metavar='FP', default='test', help='comma separated, test language prefixes')
- parser.add_argument('--destdir', metavar='DIR', default='data-bin', help='destination dir')
- parser.add_argument('--thresholdtgt', metavar='N', default=0, type=int,
- help='map words appearing less than threshold times to unknown')
- parser.add_argument('--thresholdsrc', metavar='N', default=0, type=int,
- help='map words appearing less than threshold times to unknown')
- parser.add_argument('--tgtdict', metavar='FP', help='reuse given target dictionary')
- parser.add_argument('--srcdict', metavar='FP', help='reuse given source dictionary')
- parser.add_argument('--nwordstgt', metavar='N', default=-1, type=int, help='number of target words to retain')
- parser.add_argument('--nwordssrc', metavar='N', default=-1, type=int, help='number of source words to retain')
- parser.add_argument('--alignfile', metavar='ALIGN', default=None, help='an alignment file (optional)')
- parser.add_argument('--output-format', metavar='FORMAT', default='binary', choices=['binary', 'raw'],
- help='output format (optional)')
-
- args = parser.parse_args()
- print(args)
- os.makedirs(args.destdir, exist_ok=True)
-
- if args.srcdict:
- src_dict = dictionary.Dictionary.load(args.srcdict)
- else:
- src_dict = Tokenizer.build_dictionary(filename='{}.{}'.format(args.trainpref, args.source_lang))
- src_dict.save(os.path.join(args.destdir, 'dict.{}.txt'.format(args.source_lang)),
- threshold=args.thresholdsrc, nwords=args.nwordssrc)
-
- if args.tgtdict:
- tgt_dict = dictionary.Dictionary.load(args.tgtdict)
- else:
- tgt_dict = Tokenizer.build_dictionary(filename='{}.{}'.format(args.trainpref, args.target_lang))
- tgt_dict.save(os.path.join(args.destdir, 'dict.{}.txt'.format(args.target_lang)),
- threshold=args.thresholdtgt, nwords=args.nwordstgt)
-
- def make_binary_dataset(input_prefix, output_prefix, lang):
- dict = dictionary.Dictionary.load(os.path.join(args.destdir, 'dict.{}.txt'.format(lang)))
- print('| [{}] Dictionary: {} types'.format(lang, len(dict) - 1))
-
- ds = indexed_dataset.IndexedDatasetBuilder(
- '{}/{}.{}-{}.{}.bin'.format(args.destdir, output_prefix, args.source_lang,
- args.target_lang, lang)
- )
-
- def consumer(tensor):
- ds.add_item(tensor)
-
- input_file = '{}.{}'.format(input_prefix, lang)
- res = Tokenizer.binarize(input_file, dict, consumer)
- print('| [{}] {}: {} sents, {} tokens, {:.3}% replaced by {}'.format(
- lang, input_file, res['nseq'], res['ntok'],
- 100 * res['nunk'] / res['ntok'], dict.unk_word))
- ds.finalize('{}/{}.{}-{}.{}.idx'.format(
- args.destdir, output_prefix,
- args.source_lang, args.target_lang, lang))
-
- def make_dataset(input_prefix, output_prefix, lang, output_format='binary'):
- if output_format == 'binary':
- make_binary_dataset(input_prefix, output_prefix, lang)
- elif output_format == 'raw':
- # Copy original text file to destination folder
- output_text_file = os.path.join(args.destdir, '{}.{}'.format(output_prefix, lang))
- shutil.copyfile('{}.{}'.format(input_prefix, lang), output_text_file)
-
- make_dataset(args.trainpref, 'train', args.source_lang, args.output_format)
- make_dataset(args.trainpref, 'train', args.target_lang, args.output_format)
- for k, validpref in enumerate(args.validpref.split(',')):
- outprefix = 'valid{}'.format(k) if k > 0 else 'valid'
- make_dataset(validpref, outprefix, args.source_lang, args.output_format)
- make_dataset(validpref, outprefix, args.target_lang, args.output_format)
- for k, testpref in enumerate(args.testpref.split(',')):
- outprefix = 'test{}'.format(k) if k > 0 else 'test'
- make_dataset(testpref, outprefix, args.source_lang, args.output_format)
- make_dataset(testpref, outprefix, args.target_lang, args.output_format)
- print('| Wrote preprocessed data to {}'.format(args.destdir))
-
- if args.alignfile:
- src_file_name = '{}.{}'.format(args.trainpref, args.source_lang)
- tgt_file_name = '{}.{}'.format(args.trainpref, args.target_lang)
- src_dict = dictionary.Dictionary.load(os.path.join(args.destdir, 'dict.{}.txt'.format(args.source_lang)))
- tgt_dict = dictionary.Dictionary.load(os.path.join(args.destdir, 'dict.{}.txt'.format(args.target_lang)))
- freq_map = {}
- with open(args.alignfile, 'r') as align_file:
- with open(src_file_name, 'r') as src_file:
- with open(tgt_file_name, 'r') as tgt_file:
- for a, s, t in zip_longest(align_file, src_file, tgt_file):
- si = Tokenizer.tokenize(s, src_dict, add_if_not_exist=False)
- ti = Tokenizer.tokenize(t, tgt_dict, add_if_not_exist=False)
- ai = list(map(lambda x: tuple(x.split('-')), a.split()))
- for sai, tai in ai:
- srcidx = si[int(sai)]
- tgtidx = ti[int(tai)]
- if srcidx != src_dict.unk() and tgtidx != tgt_dict.unk():
- assert srcidx != src_dict.pad()
- assert srcidx != src_dict.eos()
- assert tgtidx != tgt_dict.pad()
- assert tgtidx != tgt_dict.eos()
-
- if srcidx not in freq_map:
- freq_map[srcidx] = {}
- if tgtidx not in freq_map[srcidx]:
- freq_map[srcidx][tgtidx] = 1
- else:
- freq_map[srcidx][tgtidx] += 1
-
- align_dict = {}
- for srcidx in freq_map.keys():
- align_dict[srcidx] = max(freq_map[srcidx], key=freq_map[srcidx].get)
-
- with open(os.path.join(args.destdir, 'alignment.{}-{}.txt'.format(
- args.source_lang, args.target_lang)), 'w') as f:
- for k, v in align_dict.items():
- print('{} {}'.format(src_dict[k], tgt_dict[v]), file=f)
-
-
-if __name__ == '__main__':
- main()
diff --git a/parlai/agents/fairseq/fairseq_py/score.py b/parlai/agents/fairseq/fairseq_py/score.py
deleted file mode 100644
--- a/parlai/agents/fairseq/fairseq_py/score.py
+++ /dev/null
@@ -1,59 +0,0 @@
-#!/usr/bin/env python3
-# Copyright (c) 2017-present, Facebook, Inc.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the LICENSE file in
-# the root directory of this source tree. An additional grant of patent rights
-# can be found in the PATENTS file in the same directory.
-#
-
-import argparse
-import os
-import sys
-
-from fairseq import bleu, dictionary, tokenizer
-
-
-def main():
- parser = argparse.ArgumentParser(description='Command-line script for BLEU scoring.')
- parser.add_argument('-s', '--sys', default='-', help='system output')
- parser.add_argument('-r', '--ref', required=True, help='references')
- parser.add_argument('-o', '--order', default=4, metavar='N',
- type=int, help='consider ngrams up to this order')
- parser.add_argument('--ignore-case', action='store_true',
- help='case-insensitive scoring')
-
- args = parser.parse_args()
- print(args)
-
- assert args.sys == '-' or os.path.exists(args.sys), \
- "System output file {} does not exist".format(args.sys)
- assert os.path.exists(args.ref), \
- "Reference file {} does not exist".format(args.ref)
-
- dict = dictionary.Dictionary()
-
- def readlines(fd):
- for line in fd.readlines():
- if args.ignore_case:
- yield line.lower()
- yield line
-
- def score(fdsys):
- with open(args.ref) as fdref:
- scorer = bleu.Scorer(dict.pad(), dict.eos(), dict.unk())
- for sys_tok, ref_tok in zip(readlines(fdsys), readlines(fdref)):
- sys_tok = tokenizer.Tokenizer.tokenize(sys_tok, dict)
- ref_tok = tokenizer.Tokenizer.tokenize(ref_tok, dict)
- scorer.add(ref_tok, sys_tok)
- print(scorer.result_string(args.order))
-
- if args.sys == '-':
- score(sys.stdin)
- else:
- with open(args.sys, 'r') as f:
- score(f)
-
-
-if __name__ == '__main__':
- main()
diff --git a/parlai/agents/fairseq/fairseq_py/scripts/__init__.py b/parlai/agents/fairseq/fairseq_py/scripts/__init__.py
deleted file mode 100644
--- a/parlai/agents/fairseq/fairseq_py/scripts/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# Copyright (c) 2017-present, Facebook, Inc.
-# All rights reserved.
-# This source code is licensed under the BSD-style license found in the
-# LICENSE file in the root directory of this source tree. An additional grant
-# of patent rights can be found in the PATENTS file in the same directory.
\ No newline at end of file
diff --git a/parlai/agents/fairseq/fairseq_py/scripts/build_sym_alignment.py b/parlai/agents/fairseq/fairseq_py/scripts/build_sym_alignment.py
deleted file mode 100644
--- a/parlai/agents/fairseq/fairseq_py/scripts/build_sym_alignment.py
+++ /dev/null
@@ -1,99 +0,0 @@
-# Copyright (c) 2017-present, Facebook, Inc.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the LICENSE file in
-# the root directory of this source tree. An additional grant of patent rights
-# can be found in the PATENTS file in the same directory.
-#
-
-"""
-Use this script in order to build symmetric alignments for your translation
-dataset.
-This script depends on fast_align and mosesdecoder tools. You will need to
-build those before running the script.
-fast_align:
- github: http://github.com/clab/fast_align
- instructions: follow the instructions in README.md
-mosesdecoder:
- github: http://github.com/moses-smt/mosesdecoder
- instructions: http://www.statmt.org/moses/?n=Development.GetStarted
-The script produces the following files under --output_dir:
- text.joined - concatenation of lines from the source_file and the
- target_file.
- align.forward - forward pass of fast_align.
- align.backward - backward pass of fast_align.
- aligned.sym_heuristic - symmetrized alignment.
-"""
-
-import argparse
-import os
-from itertools import zip_longest
-
-
-def main():
- parser = argparse.ArgumentParser(description='symmetric alignment builer')
- parser.add_argument('--fast_align_dir',
- help='path to fast_align build directory')
- parser.add_argument('--mosesdecoder_dir',
- help='path to mosesdecoder root directory')
- parser.add_argument('--sym_heuristic',
- help='heuristic to use for symmetrization',
- default='grow-diag-final-and')
- parser.add_argument('--source_file',
- help='path to a file with sentences '
- 'in the source language')
- parser.add_argument('--target_file',
- help='path to a file with sentences '
- 'in the target language')
- parser.add_argument('--output_dir',
- help='output directory')
- args = parser.parse_args()
-
- fast_align_bin = os.path.join(args.fast_align_dir, 'fast_align')
- symal_bin = os.path.join(args.mosesdecoder_dir, 'bin', 'symal')
- sym_fast_align_bin = os.path.join(
- args.mosesdecoder_dir, 'scripts', 'ems',
- 'support', 'symmetrize-fast-align.perl')
-
- # create joined file
- joined_file = os.path.join(args.output_dir, 'text.joined')
- with open(args.source_file, 'r') as src, open(args.target_file, 'r') as tgt:
- with open(joined_file, 'w') as joined:
- for s, t in zip_longest(src, tgt):
- print('{} ||| {}'.format(s.strip(), t.strip()), file=joined)
-
- bwd_align_file = os.path.join(args.output_dir, 'align.backward')
-
- # run forward alignment
- fwd_align_file = os.path.join(args.output_dir, 'align.forward')
- fwd_fast_align_cmd = '{FASTALIGN} -i {JOINED} -d -o -v > {FWD}'.format(
- FASTALIGN=fast_align_bin,
- JOINED=joined_file,
- FWD=fwd_align_file)
- assert os.system(fwd_fast_align_cmd) == 0
-
- # run backward alignment
- bwd_align_file = os.path.join(args.output_dir, 'align.backward')
- bwd_fast_align_cmd = '{FASTALIGN} -i {JOINED} -d -o -v -r > {BWD}'.format(
- FASTALIGN=fast_align_bin,
- JOINED=joined_file,
- BWD=bwd_align_file)
- assert os.system(bwd_fast_align_cmd) == 0
-
- # run symmetrization
- sym_out_file = os.path.join(args.output_dir, 'aligned')
- sym_cmd = '{SYMFASTALIGN} {FWD} {BWD} {SRC} {TGT} {OUT} {HEURISTIC} {SYMAL}'.format(
- SYMFASTALIGN=sym_fast_align_bin,
- FWD=fwd_align_file,
- BWD=bwd_align_file,
- SRC=args.source_file,
- TGT=args.target_file,
- OUT=sym_out_file,
- HEURISTIC=args.sym_heuristic,
- SYMAL=symal_bin
- )
- assert os.system(sym_cmd) == 0
-
-
-if __name__ == '__main__':
- main()
diff --git a/parlai/agents/fairseq/fairseq_py/setup.py b/parlai/agents/fairseq/fairseq_py/setup.py
deleted file mode 100644
--- a/parlai/agents/fairseq/fairseq_py/setup.py
+++ /dev/null
@@ -1,73 +0,0 @@
-#!/usr/bin/env python3
-# Copyright (c) 2017-present, Facebook, Inc.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the LICENSE file in
-# the root directory of this source tree. An additional grant of patent rights
-# can be found in the PATENTS file in the same directory.
-#
-
-from setuptools import setup, find_packages, Extension
-from setuptools.command.build_py import build_py
-import sys
-from torch.utils.ffi import create_extension
-
-
-if sys.version_info < (3,):
- sys.exit('Sorry, Python3 is required for fairseq.')
-
-with open('README.md') as f:
- readme = f.read()
-
-with open('LICENSE') as f:
- license = f.read()
-
-with open('requirements.txt') as f:
- reqs = f.read()
-
-bleu = Extension(
- 'fairseq.libbleu',
- sources=[
- 'fairseq/clib/libbleu/libbleu.cpp',
- 'fairseq/clib/libbleu/module.cpp',
- ],
- extra_compile_args=['-std=c++11'],
-)
-
-conv_tbc = create_extension(
- 'fairseq.temporal_convolution_tbc',
- relative_to='fairseq',
- headers=['fairseq/clib/temporal_convolution_tbc/temporal_convolution_tbc.h'],
- sources=['fairseq/clib/temporal_convolution_tbc/temporal_convolution_tbc.cpp'],
- define_macros=[('WITH_CUDA', None)],
- with_cuda=True,
- extra_compile_args=['-std=c++11'],
- source_extension='.cpp',
-)
-
-
-class build_py_hook(build_py):
- def run(self):
- conv_tbc.build()
- build_py.run(self)
-
-
-setup(
- name='fairseq',
- version='0.2.0',
- description='Facebook AI Research Sequence-to-Sequence Toolkit',
- long_description=readme,
- license=license,
- install_requires=reqs.strip().split('\n'),
- packages=find_packages(),
- ext_modules=[bleu],
-
- # build and install PyTorch extensions
- package_data={
- 'fairseq': ['temporal_convolution_tbc/*.so'],
- },
- include_package_data=True,
- cmdclass={
- 'build_py': build_py_hook,
- },
-)
diff --git a/parlai/agents/fairseq/fairseq_py/train.py b/parlai/agents/fairseq/fairseq_py/train.py
deleted file mode 100644
--- a/parlai/agents/fairseq/fairseq_py/train.py
+++ /dev/null
@@ -1,269 +0,0 @@
-#!/usr/bin/env python3
-# Copyright (c) 2017-present, Facebook, Inc.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the LICENSE file in
-# the root directory of this source tree. An additional grant of patent rights
-# can be found in the PATENTS file in the same directory.
-#
-
-import collections
-import os
-import torch
-import math
-
-from fairseq import data, options, utils
-from fairseq.meters import AverageMeter, StopwatchMeter, TimeMeter
-from fairseq.multiprocessing_trainer import MultiprocessingTrainer
-
-
-def main():
- parser = options.get_parser('Trainer')
- dataset_args = options.add_dataset_args(parser)
- dataset_args.add_argument('--max-tokens', default=6000, type=int, metavar='N',
- help='maximum number of tokens in a batch')
- dataset_args.add_argument('--max-sentences', type=int, metavar='N',
- help='maximum number of sentences in a batch')
- dataset_args.add_argument('--train-subset', default='train', metavar='SPLIT',
- choices=['train', 'valid', 'test'],
- help='data subset to use for training (train, valid, test)')
- dataset_args.add_argument('--valid-subset', default='valid', metavar='SPLIT',
- help='comma separated list of data subsets '
- ' to use for validation (train, valid, valid1,test, test1)')
- options.add_optimization_args(parser)
- options.add_checkpoint_args(parser)
- options.add_model_args(parser)
-
- args = utils.parse_args_and_arch(parser)
-
- if args.no_progress_bar and args.log_format is None:
- args.log_format = 'simple'
-
- if not os.path.exists(args.save_dir):
- os.makedirs(args.save_dir)
- torch.manual_seed(args.seed)
-
- # Load dataset
- splits = ['train', 'valid']
- if data.has_binary_files(args.data, splits):
- dataset = data.load_dataset(args.data, splits, args.source_lang, args.target_lang)
- else:
- dataset = data.load_raw_text_dataset(args.data, splits, args.source_lang, args.target_lang)
- if args.source_lang is None or args.target_lang is None:
- # record inferred languages in args, so that it's saved in checkpoints
- args.source_lang, args.target_lang = dataset.src, dataset.dst
-
- print(args)
- print('| [{}] dictionary: {} types'.format(dataset.src, len(dataset.src_dict)))
- print('| [{}] dictionary: {} types'.format(dataset.dst, len(dataset.dst_dict)))
- for split in splits:
- print('| {} {} {} examples'.format(args.data, split, len(dataset.splits[split])))
-
- if not torch.cuda.is_available():
- raise NotImplementedError('Training on CPU is not supported')
- num_gpus = torch.cuda.device_count()
-
- print('| using {} GPUs (with max tokens per GPU = {} and max sentences per GPU = {})'.format(
- num_gpus, args.max_tokens, args.max_sentences))
-
- # Build model and criterion
- model = utils.build_model(args, dataset.src_dict, dataset.dst_dict)
- criterion = utils.build_criterion(args, dataset.src_dict, dataset.dst_dict)
- print('| model {}, criterion {}'.format(args.arch, criterion.__class__.__name__))
-
- # The max number of positions can be different for train and valid
- # e.g., RNNs may support more positions at test time than seen in training
- max_positions_train = (args.max_source_positions, args.max_target_positions)
- max_positions_valid = (
- min(args.max_source_positions, model.max_encoder_positions()),
- min(args.max_target_positions, model.max_decoder_positions())
- )
-
- # Start multiprocessing
- trainer = MultiprocessingTrainer(args, model, criterion)
-
- # Load the latest checkpoint if one is available
- checkpoint_path = os.path.join(args.save_dir, args.restore_file)
- extra_state = trainer.load_checkpoint(checkpoint_path)
- if extra_state is not None:
- epoch = extra_state['epoch']
- batch_offset = extra_state['batch_offset']
- print('| loaded checkpoint {} (epoch {})'.format(checkpoint_path, epoch))
- if batch_offset == 0:
- epoch += 1
- else:
- epoch, batch_offset = 1, 0
-
- # Train until the learning rate gets too small
- val_loss = None
- max_epoch = args.max_epoch or math.inf
- lr = trainer.get_lr()
- train_meter = StopwatchMeter()
- train_meter.start()
- while lr > args.min_lr and epoch <= max_epoch:
- # train for one epoch
- train(args, epoch, batch_offset, trainer, dataset, max_positions_train, num_gpus)
-
- # evaluate on validate set
- for k, subset in enumerate(args.valid_subset.split(',')):
- val_loss = validate(args, epoch, trainer, dataset, max_positions_valid, subset, num_gpus)
- if k == 0:
- if not args.no_save:
- # save checkpoint
- save_checkpoint(trainer, args, epoch, 0, val_loss)
- # only use first validation loss to update the learning schedule
- lr = trainer.lr_step(val_loss, epoch)
-
- epoch += 1
- batch_offset = 0
- train_meter.stop()
- print('| done training in {:.1f} seconds'.format(train_meter.sum))
-
- # Stop multiprocessing
- trainer.stop()
-
-
-def get_perplexity(loss):
- try:
- return round(math.pow(2, loss), 2)
- except OverflowError:
- return float('inf')
-
-
-def train(args, epoch, batch_offset, trainer, dataset, max_positions, num_gpus):
- """Train the model for one epoch."""
-
- seed = args.seed + epoch
- torch.manual_seed(seed)
- trainer.set_seed(seed)
-
- itr = dataset.train_dataloader(
- args.train_subset, num_workers=args.workers,
- max_tokens=args.max_tokens, max_sentences=args.max_sentences,
- max_positions=max_positions, seed=seed, epoch=epoch,
- sample_without_replacement=args.sample_without_replacement,
- sort_by_source_size=(epoch <= args.curriculum))
- loss_meter = AverageMeter()
- bsz_meter = AverageMeter() # sentences per batch
- wpb_meter = AverageMeter() # words per batch
- wps_meter = TimeMeter() # words per second
- clip_meter = AverageMeter() # % of updates clipped
- extra_meters = collections.defaultdict(lambda: AverageMeter())
-
- lr = trainer.get_lr()
- with utils.build_progress_bar(args, itr, epoch) as t:
- for i, sample in data.skip_group_enumerator(t, num_gpus, batch_offset):
- loss_dict = trainer.train_step(sample)
- loss = loss_dict['loss']
- del loss_dict['loss'] # don't include in extra_meters or extra_postfix
-
- ntokens = sum(s['ntokens'] for s in sample)
- nsentences = sum(s['src_tokens'].size(0) for s in sample)
- loss_meter.update(loss, nsentences if args.sentence_avg else ntokens)
- bsz_meter.update(nsentences)
- wpb_meter.update(ntokens)
- wps_meter.update(ntokens)
- clip_meter.update(1 if loss_dict['gnorm'] > args.clip_norm else 0)
-
- extra_postfix = []
- for k, v in loss_dict.items():
- extra_meters[k].update(v)
- extra_postfix.append((k, extra_meters[k].avg))
-
- t.log(collections.OrderedDict([
- ('loss', loss_meter),
- ('wps', round(wps_meter.avg)),
- ('wpb', round(wpb_meter.avg)),
- ('bsz', round(bsz_meter.avg)),
- ('lr', lr),
- ('clip', '{:.0%}'.format(clip_meter.avg)),
- ] + extra_postfix))
-
- if i == 0:
- # ignore the first mini-batch in words-per-second calculation
- wps_meter.reset()
- if args.save_interval > 0 and (i + 1) % args.save_interval == 0:
- save_checkpoint(trainer, args, epoch, i + 1)
-
- t.print(collections.OrderedDict([
- ('train loss', round(loss_meter.avg, 2)),
- ('train ppl', get_perplexity(loss_meter.avg)),
- ('s/checkpoint', round(wps_meter.elapsed_time)),
- ('words/s', round(wps_meter.avg)),
- ('words/batch', round(wpb_meter.avg)),
- ('bsz', round(bsz_meter.avg)),
- ('lr', lr),
- ('clip', '{:3.0f}%'.format(clip_meter.avg * 100)),
- ] + [
- (k, meter.avg)
- for k, meter in extra_meters.items()
- ]))
-
-
-def save_checkpoint(trainer, args, epoch, batch_offset, val_loss):
- extra_state = {
- 'epoch': epoch,
- 'batch_offset': batch_offset,
- 'val_loss': val_loss,
- }
-
- if batch_offset == 0:
- if not args.no_epoch_checkpoints:
- epoch_filename = os.path.join(args.save_dir, 'checkpoint{}.pt'.format(epoch))
- trainer.save_checkpoint(epoch_filename, extra_state)
-
- assert val_loss is not None
- if not hasattr(save_checkpoint, 'best') or val_loss < save_checkpoint.best:
- save_checkpoint.best = val_loss
- best_filename = os.path.join(args.save_dir, 'checkpoint_best.pt')
- trainer.save_checkpoint(best_filename, extra_state)
-
- last_filename = os.path.join(args.save_dir, 'checkpoint_last.pt')
- trainer.save_checkpoint(last_filename, extra_state)
-
-
-def validate(args, epoch, trainer, dataset, max_positions, subset, ngpus):
- """Evaluate the model on the validation set and return the average loss."""
-
- itr = dataset.eval_dataloader(
- subset, max_tokens=args.max_tokens, max_sentences=args.max_sentences,
- max_positions=max_positions,
- skip_invalid_size_inputs_valid_test=args.skip_invalid_size_inputs_valid_test,
- descending=True, # largest batch first to warm the caching allocator
- )
- loss_meter = AverageMeter()
- extra_meters = collections.defaultdict(lambda: AverageMeter())
-
- prefix = 'valid on \'{}\' subset'.format(subset)
- with utils.build_progress_bar(args, itr, epoch, prefix) as t:
- for _, sample in data.skip_group_enumerator(t, ngpus):
- loss_dict = trainer.valid_step(sample)
- loss = loss_dict['loss']
- del loss_dict['loss'] # don't include in extra_meters or extra_postfix
-
- ntokens = sum(s['ntokens'] for s in sample)
- loss_meter.update(loss, ntokens)
-
- extra_postfix = []
- for k, v in loss_dict.items():
- extra_meters[k].update(v)
- extra_postfix.append((k, extra_meters[k].avg))
-
- t.log(collections.OrderedDict([
- ('valid loss', round(loss_meter.avg, 2)),
- ] + extra_postfix))
-
- t.print(collections.OrderedDict([
- ('valid loss', round(loss_meter.avg, 2)),
- ('valid ppl', get_perplexity(loss_meter.avg)),
- ] + [
- (k, meter.avg)
- for k, meter in extra_meters.items()
- ]))
-
- # update and return the learning rate
- return loss_meter.avg
-
-
-if __name__ == '__main__':
- main()
diff --git a/parlai/core/torch_agent.py b/parlai/core/torch_agent.py
--- a/parlai/core/torch_agent.py
+++ b/parlai/core/torch_agent.py
@@ -309,6 +309,8 @@ def save(self, path):
states['optimizer'] = self.optimizer.state_dict()
if states: # anything found to save?
+ # also store the options with the file for good measure
+ states['opt'] = self.opt
with open(path, 'wb') as write:
torch.save(states, write)
| diff --git a/parlai/agents/fairseq/fairseq_py/tests/__init__.py b/parlai/agents/fairseq/fairseq_py/tests/__init__.py
deleted file mode 100644
--- a/parlai/agents/fairseq/fairseq_py/tests/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# Copyright (c) 2017-present, Facebook, Inc.
-# All rights reserved.
-# This source code is licensed under the BSD-style license found in the
-# LICENSE file in the root directory of this source tree. An additional grant
-# of patent rights can be found in the PATENTS file in the same directory.
\ No newline at end of file
diff --git a/parlai/agents/fairseq/fairseq_py/tests/test_convtbc.py b/parlai/agents/fairseq/fairseq_py/tests/test_convtbc.py
deleted file mode 100644
--- a/parlai/agents/fairseq/fairseq_py/tests/test_convtbc.py
+++ /dev/null
@@ -1,51 +0,0 @@
-# Copyright (c) 2017-present, Facebook, Inc.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the LICENSE file in
-# the root directory of this source tree. An additional grant of patent rights
-# can be found in the PATENTS file in the same directory.
-#
-
-import torch
-import unittest
-from fairseq.modules import ConvTBC
-import torch.nn as nn
-from torch.autograd import Variable
-
-
-class TestConvTBC(unittest.TestCase):
-
- def test_convtbc(self):
- # ksz, in_channels, out_channels
- conv_tbc = ConvTBC(4, 5, kernel_size=3, padding=1)
- # out_channels, in_channels, ksz
- conv1d = nn.Conv1d(4, 5, kernel_size=3, padding=1)
-
- conv_tbc.weight.data.copy_(conv1d.weight.data.transpose(0, 2))
- conv_tbc.bias.data.copy_(conv1d.bias.data)
-
- input_tbc = Variable(torch.randn(7, 2, 4), requires_grad=True)
- input1d = Variable(input_tbc.data.transpose(0, 1).transpose(1, 2), requires_grad=True)
-
- output_tbc = conv_tbc(input_tbc)
- output1d = conv1d(input1d)
-
- self.assertAlmostEqual(output_tbc.data.transpose(0, 1).transpose(1, 2), output1d.data)
-
- grad_tbc = torch.randn(output_tbc.size())
- grad1d = grad_tbc.transpose(0, 1).transpose(1, 2).contiguous()
-
- output_tbc.backward(grad_tbc)
- output1d.backward(grad1d)
-
- self.assertAlmostEqual(conv_tbc.weight.grad.data.transpose(0, 2), conv1d.weight.grad.data)
- self.assertAlmostEqual(conv_tbc.bias.grad.data, conv1d.bias.grad.data)
- self.assertAlmostEqual(input_tbc.grad.data.transpose(0, 1).transpose(1, 2), input1d.grad.data)
-
- def assertAlmostEqual(self, t1, t2):
- self.assertEqual(t1.size(), t2.size(), "size mismatch")
- self.assertLess((t1 - t2).abs().max(), 1e-4)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/parlai/agents/fairseq/fairseq_py/tests/test_label_smoothing.py b/parlai/agents/fairseq/fairseq_py/tests/test_label_smoothing.py
deleted file mode 100644
--- a/parlai/agents/fairseq/fairseq_py/tests/test_label_smoothing.py
+++ /dev/null
@@ -1,35 +0,0 @@
-# Copyright (c) 2017-present, Facebook, Inc.
-# All rights reserved.
-#
-# This source code is licensed under the license found in the LICENSE file in
-# the root directory of this source tree. An additional grant of patent rights
-# can be found in the PATENTS file in the same directory.
-#
-
-import torch
-import unittest
-from fairseq.criterions.label_smoothed_cross_entropy import LabelSmoothedNLLLoss
-from torch.autograd import Variable, gradcheck
-
-
-torch.set_default_tensor_type('torch.DoubleTensor')
-
-
-class TestLabelSmoothing(unittest.TestCase):
-
- def test_label_smoothing(self):
- input = Variable(torch.randn(3, 5), requires_grad=True)
- idx = torch.rand(3) * 4
- target = Variable(idx.long())
- criterion = LabelSmoothedNLLLoss()
- self.assertTrue(gradcheck(
- lambda x, y: criterion.apply(x, y, 0.1, 2, None), (input, target)
- ))
- weights = torch.ones(5)
- weights[2] = 0
- self.assertTrue(gradcheck(lambda x, y: criterion.apply(x, y, 0.1, None, weights), (input, target)))
- self.assertTrue(gradcheck(lambda x, y: criterion.apply(x, y, 0.1, None, None), (input, target)))
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/tests/test_init.py b/tests/test_init.py
--- a/tests/test_init.py
+++ b/tests/test_init.py
@@ -19,9 +19,6 @@ def test_init_everywhere(self):
if os.path.basename(root) == 'html':
# skip mturk core's html folder--not a python module
continue
- if 'fairseq_py' in root:
- # skip fairseq_py folder
- continue
assert '__init__.py' in files, 'Dir {} is missing __init__.py'.format(root)
| Fairseq model out of date
| 2018-06-18T14:33:19Z | [] | [] |
|
facebookresearch/ParlAI | 1,052 | facebookresearch__ParlAI-1052 | [
"924"
] | d7d53e286e65c8d67541da0ae775fd1769b33d0e | diff --git a/parlai/agents/fairseq/fairseq.py b/parlai/agents/fairseq/fairseq.py
--- a/parlai/agents/fairseq/fairseq.py
+++ b/parlai/agents/fairseq/fairseq.py
@@ -5,6 +5,7 @@
# of patent rights can be found in the PATENTS file in the same directory.
from parlai.core.dict import DictionaryAgent
+from parlai.core.utils import argsort, padded_tensor
try:
from fairseq import models, optim, criterions
@@ -15,6 +16,7 @@
)
from fairseq import trainer, fp16_trainer
from fairseq.sequence_generator import SequenceGenerator
+from fairseq.sequence_scorer import SequenceScorer
from fairseq import options
from fairseq.tasks.fairseq_task import FairseqTask
from fairseq.utils import convert_padding_direction
@@ -319,7 +321,7 @@ def __init__(self, opt, shared=None):
# actually construct the model and generator
self.model = self.build_model()
- # Construct the generator
+ # Construct the generator and scorer
self.generator = SequenceGenerator(
[self.model],
tgt_dict=self.dict,
@@ -332,6 +334,8 @@ def __init__(self, opt, shared=None):
sampling_topk=self.args.sampling_topk,
sampling_temperature=self.args.sampling_temperature,
)
+ self.scorer = SequenceScorer([self.model], self.dict)
+
# set up the grader and the trainer
self.criterion = criterions.build_criterion(self.args, self.task)
@@ -464,12 +468,53 @@ def eval_step(self, batch):
if batch.label_vec is not None:
# Interactive mode won't have a gold label
self.trainer.valid_step(samples)
- # Grade each of the candidate sequences
- # TODO: grade everything in observations[i]['label_candidates']
+ # Output placeholders
+ reranked_cands = None
+ generated_output = None
+
+ # Grade each of the candidate sequences
+ if batch.candidate_vecs is not None:
+ bsz = len(batch.text_vec)
+ reranked_cands = []
+ # score the candidates for each item in the batch separately, so that
+ # we can support variable number of candidates
+ for i in range(bsz):
+ cands = batch.candidate_vecs[i]
+ if not cands:
+ reranked_cands.append(None)
+ continue
+ ncand = len(cands)
+ # repeat the input many times
+ xs = batch.text_vec[i].unsqueeze(0).expand(ncand, -1)
+ # some models crash if there's leading padding on every example
+ xs = xs[:, :batch.text_lengths[i]]
+ # and appropriately pack the outputs
+ ys, _ = padded_tensor(cands, self.NULL_IDX, self.use_cuda)
+ s = self._make_sample(xs, ys)
+ # perform the actual grading, extract the scores
+ scored = list(self.scorer.score_batched_itr([s], cuda=self.use_cuda))
+ scores = [s[3][0]['score'].item() for s in scored]
+ # intentional hanging comma here; argsort returns a list
+ ranked, = argsort(scores, batch.candidates[i], descending=True)
+ reranked_cands.append(ranked)
+
+ # Next generate freely to create our response
if not self.args.skip_generation:
- # Next generate freely to create our response
- return Output(self._generate(samples), None)
+ generated_output = self._generate(samples)
+ elif reranked_cands:
+ # we're skiping generation, but we're also grading candidates
+ # so output the highest ranked candidate
+ # In the case of zero candidates, we don't have something to rank,
+ # so we may need to pass on that None
+ generated_output = [
+ ranked and ranked[0] or None for ranked in reranked_cands
+ ]
+ else:
+ # no output at all
+ pass
+
+ return Output(generated_output, reranked_cands)
def _generate(self, samples):
src_tokens = samples["net_input"]["src_tokens"]
@@ -554,6 +599,7 @@ def _make_sample(self, xs, ys):
# TODO: should the right/left padding thing be in torch agent?
repadded = convert_padding_direction(xs, self.dict.pad(), right_to_left=True)
sample = {}
+ sample["id"] = torch.arange(len(xs) - 1)
sample["net_input"] = {
"src_tokens": repadded,
"src_lengths": self._seq_length(xs),
diff --git a/parlai/core/torch_agent.py b/parlai/core/torch_agent.py
--- a/parlai/core/torch_agent.py
+++ b/parlai/core/torch_agent.py
@@ -6,7 +6,7 @@
from parlai.core.agents import Agent
from parlai.core.dict import DictionaryAgent
-from parlai.core.utils import set_namedtuple_defaults
+from parlai.core.utils import set_namedtuple_defaults, argsort, padded_tensor
try:
import torch
@@ -352,21 +352,13 @@ def batchify(self, obs_batch, sort=False,
# TEXT
xs, x_lens = None, None
if any('text_vec' in ex for ex in exs):
- x_text = [ex.get('text_vec', self.EMPTY) for ex in exs]
- x_lens = [x.shape[0] for x in x_text]
-
+ _xs = [ex.get('text_vec', self.EMPTY) for ex in exs]
+ xs, x_lens = padded_tensor(_xs)
if sort:
sort = False # now we won't sort on labels
- ind_sorted = sorted(range(len(x_lens)),
- key=lambda k: -x_lens[k])
- exs = [exs[k] for k in ind_sorted]
- valid_inds = [valid_inds[k] for k in ind_sorted]
- x_text = [x_text[k] for k in ind_sorted]
- x_lens = [x_lens[k] for k in ind_sorted]
-
- xs = torch.LongTensor(len(exs), max(x_lens)).fill_(self.NULL_IDX)
- for i, ex in enumerate(x_text):
- xs[i, :ex.shape[0]] = ex
+ xs, x_lens, valid_inds, exs = argsort(
+ x_lens, xs, x_lens, valid_inds, exs, descending=True
+ )
if self.use_cuda:
xs = xs.cuda()
@@ -384,14 +376,11 @@ def batchify(self, obs_batch, sort=False,
y_lens = [y.shape[0] for y in label_vecs]
if sort and xs is None:
- # always sort on xs if we have them, not ys
- ind_sorted = sorted(range(len(y_lens)),
- key=lambda k: -y_lens[k])
- exs = [exs[k] for k in ind_sorted]
- valid_inds = [valid_inds[k] for k in ind_sorted]
- label_vecs = [label_vecs[k] for k in ind_sorted]
- labels = [labels[k] for k in ind_sorted]
- y_lens = [y_lens[k] for k in ind_sorted]
+ ys, y_lens = padded_tensor(label_vecs)
+ exs, valid_inds, label_vecs, labels, y_lens = argsort(
+ y_lens, exs, valid_inds, label_vecs, labels, y_lens,
+ descending=True
+ )
ys = torch.LongTensor(len(exs), max(y_lens)).fill_(self.NULL_IDX)
for i, y in enumerate(label_vecs):
@@ -461,10 +450,10 @@ def match_batch(self, batch_reply, valid_inds, output=None):
def _add_person_tokens(self, text, token, add_after_newln=False):
if add_after_newln:
split = text.split('\n')
- split[-1] = token + split[-1]
+ split[-1] = token + ' ' + split[-1]
return '\n'.join(split)
else:
- return token + text
+ return token + ' ' +text
def get_dialog_history(self, observation, reply=None,
add_person_tokens=False, add_p1_after_newln=False):
@@ -490,15 +479,14 @@ def get_dialog_history(self, observation, reply=None,
if reply is not None:
if add_person_tokens:
# add person2 token to reply
- reply = self._add_person_tokens(reply, self.P2_TOKEN + ' ')
+ reply = self._add_person_tokens(reply, self.P2_TOKEN)
# add reply to history
self.history.append(reply)
if 'text' in obs:
if add_person_tokens:
# add person1 token to text
- obs['text'] = self._add_person_tokens(obs['text'],
- self.P1_TOKEN + ' ',
+ obs['text'] = self._add_person_tokens(obs['text'], self.P1_TOKEN,
add_p1_after_newln)
# add text to history
self.history.append(obs['text'])
diff --git a/parlai/core/utils.py b/parlai/core/utils.py
--- a/parlai/core/utils.py
+++ b/parlai/core/utils.py
@@ -10,6 +10,14 @@
import random
import time
+# some of the utility methods are helpful for Torch
+try:
+ import torch
+ __TORCH_AVAILABLE = True
+except ImportError:
+ __TORCH_AVAILABLE = False
+
+
DISPLAY_MESSAGE_DEFAULT_FIELDS = {
'episode_done',
'id',
@@ -872,3 +880,70 @@ def set_namedtuple_defaults(namedtuple, default=None):
"""
namedtuple.__new__.__defaults__ = (default,) * len(namedtuple._fields)
return namedtuple
+
+
+def padded_tensor(items, pad_idx=0, use_cuda=False, left_padded=False):
+ """Create a right-padded matrix from an uneven list of lists.
+
+ Returns (padded, lengths), where padded is the padded matrix, and lengths
+ is a list containing the lengths of each row.
+
+ Matrix is right-padded (filled to the right) by default, but can be
+ left padded if the flag is set to True.
+
+ Matrix can also be placed on cuda automatically.
+
+ :param list[iter[int]] items: List of items
+ :param bool sort: If True, orders by the length
+ :param int pad_idx: the value to use for padding
+ :param bool use_cuda: if true, places `padded` on GPU
+ :param bool left_padded:
+
+ :return: (padded, lengths) tuple
+ :rtype: (Tensor[int64], list[int])
+ """
+ # hard fail if we don't have torch
+ if not __TORCH_AVAILABLE:
+ raise ImportError(
+ "Cannot use padded_tensor without torch; go to http://pytorch.org"
+ )
+
+ # number of items
+ n = len(items)
+ # length of each item
+ lens = [len(item) for item in items]
+ # max in time dimension
+ t = max(lens)
+ output = torch.LongTensor(n, t).fill_(pad_idx)
+ for i in range(len(items)):
+ if left_padded:
+ # place at end
+ output[i, t - lens[i]:] = torch.LongTensor(items[i])
+ else:
+ # place at beginning
+ output[i, :lens[i]] = torch.LongTensor(items[i])
+ if use_cuda:
+ output = output.cuda()
+ return output, lens
+
+
+def argsort(keys, *lists, descending=False):
+ """Reorder each list in lists by the (descending) sorted order of keys.
+
+ :param iter keys: Keys to order by
+ :param list[list] lists: Lists to reordered by keys's order.
+ Correctly handles lists and 1-D tensors.
+ :param bool descending: Use descending order if true
+ :return: The reordered items
+ """
+ ind_sorted = sorted(range(len(keys)), key=lambda k: keys[k])
+ if descending:
+ ind_sorted = list(reversed(ind_sorted))
+ output = []
+ for lst in lists:
+ # watch out in case we don't have torch installed
+ if __TORCH_AVAILABLE and isinstance(lst, torch.Tensor):
+ output.append(lst[ind_sorted])
+ else:
+ output.append([lst[i] for i in ind_sorted])
+ return output
| diff --git a/tests/run_tests_short.sh b/tests/run_tests_short.sh
--- a/tests/run_tests_short.sh
+++ b/tests/run_tests_short.sh
@@ -9,6 +9,7 @@
set -e # stop if any tests fail
for test in $(ls); do
if [ ${test: -3} == ".py" ] && [ $test != "test_downloads.py" ] && [ $test != "test_mlb_vqa.py" ]; then
+ echo "Running $test"
python3 $test;
fi
done
diff --git a/tests/test_torch_agent.py b/tests/test_torch_agent.py
--- a/tests/test_torch_agent.py
+++ b/tests/test_torch_agent.py
@@ -520,12 +520,12 @@ def test__add_person_tokens(self):
"Attack ships on fire off the shoulder of Orion.\n"
"I watched C-beams glitter in the dark near the Tannhauser gate.\n"
"All those moments will be lost in time, like tears in rain.")
- prefix = 'PRE '
+ prefix = 'PRE'
out = agent._add_person_tokens(text, prefix, add_after_newln=False)
- self.assertEqual(out, prefix + text)
+ self.assertEqual(out, prefix + ' ' + text)
out = agent._add_person_tokens(text, prefix, add_after_newln=True)
idx = text.rfind('\n') + 1
- self.assertEqual(out, text[:idx] + prefix + text[idx:])
+ self.assertEqual(out, text[:idx] + prefix + ' ' + text[idx:])
def test_get_dialog_history(self):
"""Test different dialog history settings."""
diff --git a/tests/test_utils.py b/tests/test_utils.py
--- a/tests/test_utils.py
+++ b/tests/test_utils.py
@@ -4,13 +4,18 @@
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
-from parlai.core.utils import Timer, round_sigfigs, set_namedtuple_defaults
+from parlai.core.utils import Timer
+from parlai.core.utils import round_sigfigs
+from parlai.core.utils import set_namedtuple_defaults
+from parlai.core.utils import padded_tensor
+from parlai.core.utils import argsort
import time
import unittest
+import torch
+import numpy as np
class TestUtils(unittest.TestCase):
-
def test_round_sigfigs(self):
x = 0
y = 0
@@ -89,3 +94,30 @@ def test_setnamedtupledefaults(self):
assert nt.a is 1
assert nt.b is 1
assert nt.c is 1
+
+ def test_padded_tensor(self):
+ # list of lists
+ lol = [[1, 2], [3, 4, 5]]
+ output, lens = padded_tensor(lol)
+ assert np.all(output.numpy() == np.array([[1, 2, 0], [3, 4, 5]]))
+ assert lens == [2, 3]
+ output, _ = padded_tensor(lol, left_padded=True)
+ assert np.all(output.numpy() == np.array([[0, 1, 2], [3, 4, 5]]))
+ output, _ = padded_tensor(lol, pad_idx=99)
+ assert np.all(output.numpy() == np.array([[1, 2, 99], [3, 4, 5]]))
+
+ def test_argsort(self):
+ keys = [5, 4, 3, 2, 1]
+ items = ["five", "four", "three", "two", "one"]
+ items2 = ["e", "d", "c", "b", "a"]
+ torch_keys = torch.LongTensor(keys)
+ assert argsort(keys, items, items2) == [
+ list(reversed(items)), list(reversed(items2))
+ ]
+ assert argsort(keys, items, items2, descending=True) == [items, items2]
+
+ assert np.all(argsort(torch_keys, torch_keys)[0].numpy() == np.arange(1, 6))
+
+
+if __name__ == '__main__':
+ unittest.main()
| FairSeq grade candidates
Fairseq can currently generate and evaluate a single label, but it ignores `label_candidates` for when there's multiple labels.
The change belongs here:
https://github.com/facebookresearch/ParlAI/blob/5994d437089892ec626fcd510b6fefd2869d8a80/parlai/agents/fairseq/fairseq.py#L348-L349
| 2018-08-07T20:52:03Z | [] | [] |
|
facebookresearch/ParlAI | 1,580 | facebookresearch__ParlAI-1580 | [
"1564"
] | f893d84f6d2be3fcafd49632d7c28c5c5ef8227c | diff --git a/projects/wizard_of_wikipedia/generator/agents.py b/projects/wizard_of_wikipedia/generator/agents.py
--- a/projects/wizard_of_wikipedia/generator/agents.py
+++ b/projects/wizard_of_wikipedia/generator/agents.py
@@ -58,48 +58,38 @@ def batchify(self, obs_batch):
checked_sentences.append(checked_sentence)
batch['checked_sentence'] = checked_sentences
+
return batch
class TwoStageAgent(_GenericWizardAgent):
- def __init__(self, opt, shared):
+ def __init__(self, opt, shared=None):
super().__init__(opt, shared)
if shared is not None:
# make sure the dialogue token appears
self.dict[TOKEN_DIALOG] = 9999999
- def observe(self, obs):
+ def _set_text_vec(self, obs, history, truncate):
if 'text' not in obs:
return obs
- # TODO: resolve this with #1421
- # get the dialog stuff
- reply = self.last_reply()
- self.observation = self.get_dialog_history(obs, reply=reply)
- # we need to store the old text so that we can restore it
- oldtext = obs['text']
-
- # now we want to force prepend the knowledge stuff
- fields = []
- if 'chosen_topic' in obs:
- fields += [obs['title']]
- if 'checked_sentence' in obs:
- fields += [TOKEN_KNOWLEDGE, obs['checked_sentence']]
- if obs['text'] != '':
- fields += [TOKEN_DIALOG, obs['text']]
- obs['text'] = ' '.join(fields)
-
- # now vectorize with the extra knowledge. It'll all get stored in the
- # text_vec operation, etc
- self.vectorize(
- obs,
- text_truncate=self.text_truncate,
- label_truncate=self.label_truncate
- )
-
- # finally we need to return the old text to the way it was
- obs['text'] = oldtext
- assert obs is self.observation
+ if 'text_vec' not in obs:
+ fields = []
+ dialogue_history = history.get_history_str()
+ if 'chosen_topic' in obs:
+ fields += [obs['title']]
+ if 'checked_sentence' in obs:
+ fields += [TOKEN_KNOWLEDGE, obs['checked_sentence']]
+ if dialogue_history:
+ fields += [TOKEN_DIALOG, dialogue_history]
+ obs['text'] = ' '.join(fields)
+ obs['text_vec'] = self.dict.txt2vec(obs['text'])
+
+ # check truncation
+ if 'text_vec' in obs:
+ obs['text_vec'] = th.LongTensor(
+ self._check_truncate(obs['text_vec'], truncate, True)
+ )
return obs
| diff --git a/tests/nightly/gpu/test_wizard.py b/tests/nightly/gpu/test_wizard.py
new file mode 100644
--- /dev/null
+++ b/tests/nightly/gpu/test_wizard.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python
+
+# Copyright (c) Facebook, Inc. and its affiliates.
+# This source code is licensed under the MIT license found in the
+# LICENSE file in the root directory of this source tree.
+
+
+import unittest
+import parlai.scripts.display_data as display_data
+import parlai.core.testing_utils as testing_utils
+
+END2END_OPTIONS = {
+ 'task': 'wizard_of_wikipedia:generator:random_split',
+ 'model_file': 'models:wizard_of_wikipedia/end2end_generator/model',
+ 'batchsize': 32,
+ 'log_every_n_secs': 30,
+ 'embedding_type': 'random',
+}
+
+
+@testing_utils.skipUnlessGPU
+class TestWizardModel(unittest.TestCase):
+ """Checks that DrQA Model can be downloaded and achieve appropriate results"""
+ @classmethod
+ def setUpClass(cls):
+ # go ahead and download things here
+ with testing_utils.capture_output():
+ parser = display_data.setup_args()
+ parser.set_defaults(**END2END_OPTIONS)
+ opt = parser.parse_args(print_args=False)
+ opt['num_examples'] = 1
+ display_data.display_data(opt)
+
+ def test_end2end(self):
+ stdout, valid, _ = testing_utils.eval_model(END2END_OPTIONS)
+ self.assertEqual(
+ valid['ppl'], 61.21,
+ 'valid ppl = {}\nLOG:\n{}'.format(valid['ppl'], stdout)
+ )
+ self.assertEqual(
+ valid['f1'], 0.1717,
+ 'valid f1 = {}\nLOG:\n{}'.format(valid['f1'], stdout)
+ )
+ self.assertGreaterEqual(
+ valid['know_acc'], 0.2201,
+ 'valid know_acc = {}\nLOG:\n{}'.format(valid['know_acc'], stdout)
+ )
+
+
+if __name__ == '__main__':
+ unittest.main()
| [Wizard of Wikipedia] Where is get_dialog_history() defined?
https://github.com/facebookresearch/ParlAI/blob/a43f2880719c5a048fdf3d0aa5d5b25eeb9a1a41/projects/wizard_of_wikipedia/generator/agents.py#L78
@stephenroller I'm guessing this got missed in TorchAgent. Could you please fix it?
| Hi g-karthik, sorry I must've broken it this past week. I'm unfortunately on vacation right now, but I'll fix it when I get back | 2019-03-25T17:24:30Z | [] | [] |
facebookresearch/ParlAI | 1,595 | facebookresearch__ParlAI-1595 | [
"1592"
] | 6a34b6429a5f757a459bbf78d825c3c8b2b0287d | diff --git a/parlai/agents/seq2seq/seq2seq.py b/parlai/agents/seq2seq/seq2seq.py
--- a/parlai/agents/seq2seq/seq2seq.py
+++ b/parlai/agents/seq2seq/seq2seq.py
@@ -5,6 +5,7 @@
# LICENSE file in the root directory of this source tree.
from parlai.core.torch_generator_agent import TorchGeneratorAgent
+from parlai.core.utils import warn_once
from .modules import Seq2seq, opt_to_kwargs
import torch
@@ -201,3 +202,22 @@ def load(self, path):
if 'longest_label' in states:
self.model.longest_label = states['longest_label']
return states
+
+ def is_valid(self, obs):
+ normally_valid = super().is_valid(obs)
+ if not normally_valid:
+ # shortcut boolean evaluation
+ return normally_valid
+ contains_empties = obs['text_vec'].shape[0] == 0
+ if self.is_training and contains_empties:
+ warn_once(
+ 'seq2seq got an empty input sequence (text_vec) during training. '
+ 'Skipping this example, but you should check your dataset and '
+ 'preprocessing.'
+ )
+ elif not self.is_training and contains_empties:
+ warn_once(
+ 'seq2seq got an empty input sequence (text_vec) in an '
+ 'evaluation example! This may affect your metrics!'
+ )
+ return not contains_empties
| diff --git a/parlai/tasks/integration_tests/agents.py b/parlai/tasks/integration_tests/agents.py
--- a/parlai/tasks/integration_tests/agents.py
+++ b/parlai/tasks/integration_tests/agents.py
@@ -13,6 +13,7 @@
"""
from parlai.core.teachers import DialogTeacher
+import copy
import random
import itertools
@@ -171,5 +172,69 @@ def setup_data(self, fold):
yield (t, a), e
+class BadExampleTeacher(CandidateTeacher):
+ """
+ Teacher which produces a variety of examples that upset verify_data.py.
+
+ Useful for checking how models respond when the following assumptions are
+ violated:
+
+ 0. text is empty string
+ 1. missing text
+ 2. label is empty string
+ 3. missing label
+ 4. label candidates is empty
+ 5. label candidates contains an empty string
+ 6. label isn't in the candidates
+ 7. missing label candidates
+
+ Note: this test may come to outlive its purpose in the future. When failing
+ this test, one should consider who is really at fault: the test, or the code.
+ """
+ NUM_CASES = 8
+
+ def __init__(self, opt, shared=None):
+ super().__init__(opt, shared)
+ # gross hack: override data.get to force things the way we want; otherwise
+ # we can't actually force some of these scenarios.
+ self.data.get = self._wrapperfn(self.data.get)
+
+ def _wrapperfn(self, oldget):
+ def newget(*args):
+ item, eod = oldget(*args)
+ item = copy.deepcopy(item)
+ newget.case = (newget.case + 1) % self.NUM_CASES
+ case = newget.case
+ if case == 0:
+ # empty string input
+ item['text'] = ''
+ elif case == 1:
+ # not text input
+ del item['text']
+ elif case == 2:
+ # empty string label
+ item['labels'] = ['']
+ elif case == 3:
+ # no label
+ del item['labels']
+ elif case == 4:
+ # no label candidates
+ item['label_candidates'] = []
+ elif case == 5:
+ # extra empty string in labels
+ item['label_candidates'] = list(item['label_candidates']) + ['']
+ elif case == 6:
+ # label candidates doesn't have the label
+ item['label_candidates'] = list(item['label_candidates'])
+ item['label_candidates'].remove(item['labels'][0])
+ elif case == 7:
+ # no label candidates field
+ del item['label_candidates']
+ return item, eod
+
+ newget.case = random.randint(0, self.NUM_CASES)
+ return newget
+
+
class DefaultTeacher(CandidateTeacher):
pass
diff --git a/tests/test_seq2seq.py b/tests/test_seq2seq.py
--- a/tests/test_seq2seq.py
+++ b/tests/test_seq2seq.py
@@ -106,6 +106,23 @@ def test_beamsearch(self):
"test ppl = {}\nLOG:\n{}".format(test['ppl'], stdout)
)
+ def test_badinput(self):
+ """Ensures model doesn't crash on malformed inputs."""
+ stdout, _, _ = testing_utils.train_model(dict(
+ task='integration_tests:bad_example',
+ model='seq2seq',
+ lr=LR,
+ batchsize=10,
+ datatype='train:ordered:stream',
+ num_epochs=1,
+ numthreads=1,
+ no_cuda=True,
+ embeddingsize=16,
+ hiddensize=16,
+ ))
+ self.assertIn('valid:{', stdout)
+ self.assertIn('test:{', stdout)
+
class TestHogwildSeq2seq(unittest.TestCase):
@testing_utils.skipIfGPU
diff --git a/tests/test_transformers.py b/tests/test_transformers.py
--- a/tests/test_transformers.py
+++ b/tests/test_transformers.py
@@ -245,6 +245,22 @@ def test_generator_backcomp(self):
'test f1 = {}\nLOG:\n{}'.format(test['f1'], stdout)
)
+ def test_badinput(self):
+ """Ensures model doesn't crash on malformed inputs."""
+ stdout, _, _ = testing_utils.train_model(dict(
+ task='integration_tests:bad_example',
+ model='transformer/generator',
+ batchsize=10,
+ datatype='train:ordered:stream',
+ num_epochs=1,
+ numthreads=1,
+ no_cuda=True,
+ embeddingsize=16,
+ hiddensize=16,
+ ))
+ self.assertIn('valid:{', stdout)
+ self.assertIn('test:{', stdout)
+
class TestLearningRateScheduler(unittest.TestCase):
def test_resuming(self):
| Empty training examples throwing runtime error
I'm getting an error with the OpenSubtitles (2009 and 2018) and the Cornell Movie datasets with length 0 examples being loaded. My guess is that there are empty messages in the datasets, which could easily be removed with some preprocessing or a flag to avoid such examples. However, I'm having trouble finding such a flag and would appreciate any help you can provide. I do not have this issue arise on other datasets that I have explored so far.
I'm a bit puzzled as to why others haven't seemed to run into this problem and was hoping you could help point me in the right direction, especially if I'm missing an easy fix. Thanks.
**Error:**
```
Traceback (most recent call last):
File "examples/train_model.py", line 16, in <module>
TrainLoop(opt).train()
File "/data/odemasi/packages/ParlAI/parlai/scripts/train_model.py", line 523, in train
world.parley()
File "/data/odemasi/packages/ParlAI/parlai/core/worlds.py", line 654, in parley
batch_act = self.batch_act(agent_idx, batch_observations[agent_idx])
File "/data/odemasi/packages/ParlAI/parlai/core/worlds.py", line 627, in batch_act
batch_actions = a.batch_act(batch_observation)
File "/data/odemasi/packages/ParlAI/parlai/core/torch_agent.py", line 1206, in batch_act
output = self.train_step(batch)
File "/data/odemasi/packages/ParlAI/parlai/core/torch_generator_agent.py", line 478, in train_step
raise e
File "/data/odemasi/packages/ParlAI/parlai/core/torch_generator_agent.py", line 454, in train_step
scores, preds, _ = self.model(batch.text_vec, batch.label_vec)
File "/data/odemasi/anaconda3/envs/parlai_venv/lib/python3.7/site-packages/torch/nn/modules/module.py", line 489, in __call__
result = self.forward(*input, **kwargs)
File "/data/odemasi/packages/ParlAI/parlai/core/torch_generator_agent.py", line 214, in forward
encoder_states = prev_enc if prev_enc is not None else self.encoder(xs)
File "/data/odemasi/anaconda3/envs/parlai_venv/lib/python3.7/site-packages/torch/nn/modules/module.py", line 489, in __call__
result = self.forward(*input, **kwargs)
File "/data/odemasi/packages/ParlAI/parlai/agents/seq2seq/modules.py", line 226, in forward
xes = pack_padded_sequence(xes, x_lens, batch_first=True)
File "/data/odemasi/anaconda3/envs/parlai_venv/lib/python3.7/site-packages/torch/nn/utils/rnn.py", line 148, in pack_padded_sequence
return PackedSequence(torch._C._VariableFunctions._pack_padded_sequence(input, lengths, batch_first))
RuntimeError: Length of all samples has to be greater than 0, but found an element in 'lengths' that is <= 0
```
**Resulted from:**
python examples/train_model.py \
-t cornell_movie \
-bs 32 \
--hiddensize 1024 \
--dict-minfreq 2 \
--dict-lower True \
--dict-file cornell_dict \
-m seq2seq \
-mf cornell_seq2seq
| Thanks for reporting. How long does it usually take before you hit this bug?
It comes up very quickly. It's obviously within the first epoch and for the
Cornell dataset it is within the first few minutes of training.
On Fri, Mar 29, 2019, 18:00 Stephen Roller <[email protected]> wrote:
> Thanks for reporting. How long does it usually take before you hit this
> bug?
>
> —
> You are receiving this because you authored the thread.
> Reply to this email directly, view it on GitHub
> <https://github.com/facebookresearch/ParlAI/issues/1592#issuecomment-478191314>,
> or mute the thread
> <https://github.com/notifications/unsubscribe-auth/ADozPzrg46AnEPpHdWnH5W3aFEuXCZ58ks5vbrc_gaJpZM4cTcKI>
> .
>
| 2019-04-01T13:50:24Z | [] | [] |
facebookresearch/ParlAI | 1,680 | facebookresearch__ParlAI-1680 | [
"1557"
] | 6f13b999ed221b19604283175360af9dc5063eed | diff --git a/parlai/zoo/controllable_dialogue/__init__.py b/parlai/zoo/controllable_dialogue/__init__.py
new file mode 100644
--- /dev/null
+++ b/parlai/zoo/controllable_dialogue/__init__.py
@@ -0,0 +1,5 @@
+#!/usr/bin/env python
+
+# Copyright (c) Facebook, Inc. and its affiliates.
+# This source code is licensed under the MIT license found in the
+# LICENSE file in the root directory of this source tree.
diff --git a/parlai/zoo/controllable_dialogue/build.py b/parlai/zoo/controllable_dialogue/build.py
new file mode 100644
--- /dev/null
+++ b/parlai/zoo/controllable_dialogue/build.py
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+
+# Copyright (c) Facebook, Inc. and its affiliates.
+# This source code is licensed under the MIT license found in the
+# LICENSE file in the root directory of this source tree.
+"""
+Pretrained models from the "What makes a good conversation?" paper.
+
+See https://parl.ai/projects/controllable_dialogue/.
+"""
+
+from parlai.core.build_data import download_models
+
+
+def download(datapath):
+ opt = {
+ 'datapath': datapath,
+ }
+ fnames = ['models_v1.tar.gz']
+ download_models(
+ opt, fnames, 'controllable_dialogue', version='v1.0', use_model_type=False,
+ )
diff --git a/parlai/zoo/model_list.py b/parlai/zoo/model_list.py
--- a/parlai/zoo/model_list.py
+++ b/parlai/zoo/model_list.py
@@ -3,7 +3,9 @@
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
-"""This file contains a list of all the models in the model zoo, the path to
+
+"""
+This file contains a list of all the models in the model zoo, the path to
load them, agents & tasks associated (e.g. they were trained using) and a
description. Using the path you should be able to download and use the model
automatically, e.g.:
@@ -188,9 +190,9 @@
"python examples/eval_model.py -t light_dialog "
"-mf models:light/biranker_dialogue/model"
),
- "result": "{'exs': 6623, 'accuracy': 0.7586, 'f1': 0.7802, 'hits@1': 0.759, 'hits@5': 0.965,"
- "'hits@10': 0.994, 'hits@100': 1.0, 'bleu': 0.7255, 'lr': 5e-05, 'num_updates': 15050,"
- "'examples': 6623, 'loss': 5307.0, 'mean_loss': 0.8013, 'mean_rank': 1.599, 'train_accuracy': 0}",
+ "result": "{'exs': 6623, 'accuracy': 0.7586, 'f1': 0.7802, 'hits@1': 0.759, 'hits@5': 0.965," # noqa: E501
+ "'hits@10': 0.994, 'hits@100': 1.0, 'bleu': 0.7255, 'lr': 5e-05, 'num_updates': 15050," # noqa: E501
+ "'examples': 6623, 'loss': 5307.0, 'mean_loss': 0.8013, 'mean_rank': 1.599, 'train_accuracy': 0}", # noqa: E501
},
{
"title": "Twitter conversational model",
@@ -201,4 +203,12 @@
"description": ("Generic conversational model trained on the twitter task"),
"result": "{'exs': 10405, 'accuracy': 0.001538, 'f1': 0.07537, 'bleu': 0.002304, 'loss': 3.93, 'ppl': 50.9}", # noqa: E501
},
+ {
+ "title": "Controllable Dialogue pretrained models",
+ "id": "controllable_dialogue",
+ "path": "models:controllable_dialogue/convai2_finetuned_baseline",
+ "agent": "projects.controllable_dialogue.controllable_seq2seq.controllable_seq2seq:ControllableSeq2seqAgent", # noqa: E501
+ "task": "projects.controllable_dialogue.tasks.agents",
+ "description": ("Generic conversational model trained on the twitter task"),
+ },
]
diff --git a/projects/controllable_dialogue/controllable_seq2seq/__init__.py b/projects/controllable_dialogue/controllable_seq2seq/__init__.py
new file mode 100644
--- /dev/null
+++ b/projects/controllable_dialogue/controllable_seq2seq/__init__.py
@@ -0,0 +1,5 @@
+#!/usr/bin/env python3
+
+# Copyright (c) Facebook, Inc. and its affiliates.
+# This source code is licensed under the MIT license found in the
+# LICENSE file in the root directory of this source tree.
diff --git a/projects/controllable_dialogue/controllable_seq2seq/arora.py b/projects/controllable_dialogue/controllable_seq2seq/arora.py
new file mode 100644
--- /dev/null
+++ b/projects/controllable_dialogue/controllable_seq2seq/arora.py
@@ -0,0 +1,388 @@
+#!/usr/bin/env python3
+
+# Copyright (c) Facebook, Inc. and its affiliates.
+# This source code is licensed under the MIT license found in the
+# LICENSE file in the root directory of this source tree.
+
+"""
+This file contains code for computing Arora-style sentence embeddings, for
+response-relatedness control.
+"""
+
+from parlai.core.params import ParlaiParser
+from parlai.agents.repeat_label.repeat_label import RepeatLabelAgent
+from parlai.core.worlds import create_task
+from parlai.core.build_data import modelzoo_path
+import torchtext.vocab as vocab
+from parlai.core.utils import TimeLogger
+from collections import Counter, deque
+import numpy as np
+import os
+import pickle
+import torch
+
+CONTROLLABLE_DIR = 'controllable_dialogue'
+
+
+class SentenceEmbedder(object):
+ """
+ A class to produce Arora-style sentence embeddings
+ sent_emb(s)
+ where s is a sentence. Also gives relatedness scores
+ cos_sim(word_emb(w), sent_emb(s))
+ for words w with GloVe embeddings word_emb(w).
+
+ See: "A Simple But Tough-To-Beat Baseline For Sentence Embeddings",
+ Arora et al, 2017, https://openreview.net/pdf?id=SyK00v5xx
+ """
+
+ def __init__(self, word2prob, arora_a, glove_name, glove_dim, first_sv,
+ glove_cache):
+ """
+ Inputs:
+ word2prob: dict mapping words to their unigram probs
+ arora_a: a float. Is the constant (called "a" in the paper)
+ used to compute Arora sentence embeddings.
+ glove_name: the version of GloVe to use, e.g. '840B'
+ glove_dim: the dimension of the GloVe embeddings to use, e.g. 300
+ first_sv: np array shape (glove_dim). The first singular value,
+ used to compute Arora sentence embeddings. Can be None.
+ glove_cache: The path to where the glove vectors are stored.
+ """
+ self.word2prob = word2prob
+ self.arora_a = arora_a
+ self.glove_name = glove_name
+ self.glove_dim = glove_dim
+ self.glove_cache = glove_cache
+ self.first_sv = first_sv
+ if self.first_sv is not None:
+ self.first_sv = torch.tensor(self.first_sv) # convert to torch tensor
+
+ self.min_word_prob = min(word2prob.values()) # prob of rarest word
+ self.tt_embs = None # will be torchtext.vocab.GloVe object
+ self.emb_matrix = None # will be np array shape (vocab_size, glove_dim)
+
+ # Initialize a cache, which holds up to 64 sentences, along with their
+ # corresponding word similarity scores (i.e. cosine sim for every word in the
+ # vocab). This enables us to repeatedly retrieve sims for sentences we have
+ # already processed (useful for batched beam search).
+ self.cache_limit = 64
+ self.cache_sent2sims = {} # maps sent to sims. holds up to cache_limit.
+ self.cache_sentqueue = deque() # list of sents. add to right, remove from left
+
+ def get_glove_embs(self):
+ """
+ Loads torchtext GloVe embs from file and stores in self.tt_embs.
+ """
+ print('Loading torchtext GloVe embs (for Arora sentence embs)...')
+ self.tt_embs = vocab.GloVe(name=self.glove_name, dim=self.glove_dim,
+ cache=self.glove_cache)
+ print('Finished loading torchtext GloVe embs')
+
+ def get_emb_matrix(self, dictionary):
+ """
+ Construct an embedding matrix containing pretrained GloVe vectors for all words
+ in dictionary, and store in self.emb_matrix. This is needed for
+ response-relatedness weighted decoding.
+
+ Inputs:
+ dictionary: ParlAI dictionary
+ """
+ print('Constructing GloVe emb matrix for response-relatedness weighted '
+ 'decoding...')
+ self.emb_matrix = []
+ oov_indices = [] # list of dictionary indices for all OOV words
+ for idx in range(len(dictionary)):
+ word = dictionary[idx]
+ if word in self.tt_embs.stoi:
+ word_emb = self.tt_embs.vectors[self.tt_embs.stoi[word]]
+ else:
+ # If word is OOV, enter a zero vector instead.
+ # This means that the cosine similarity will always be zero.
+ word_emb = torch.zeros(self.glove_dim)
+ oov_indices.append(idx)
+ self.emb_matrix.append(word_emb)
+ self.emb_matrix = np.stack(self.emb_matrix) # (vocab_size, glove_dim)
+ print('Done constructing GloVe emb matrix; found %i OOVs of %i words'
+ % (len(oov_indices), len(dictionary)))
+
+ # Get the norm of each of the word vectors. This is needed for cosine sims.
+ # self.emb_matrix_norm is a np array shape (vocab_size)
+ self.emb_matrix_norm = np.linalg.norm(self.emb_matrix, axis=1)
+
+ # For the OOV words which have zero vectors,
+ # set the norm to 1.0 so we don't have divide-by-zero errors
+ for idx in oov_indices:
+ self.emb_matrix_norm[idx] = 1.0
+
+ def get_word_sims(self, sent, sent_emb, dictionary):
+ """
+ Given a sentence and its Arora-style sentence embedding, compute the cosine
+ similarities to it, for all words in the dictionary.
+
+ Inputs:
+ sent: string. Used only for caching lookup purposes.
+ sent_emb: torch Tensor shape (glove_dim).
+ dictionary: ParlAI dictionary
+
+ Returns:
+ sims: torch Tensor shape (vocab_size), containing the cosine sims.
+ """
+ # If we haven't initialized the GloVe emb matrix yet, do so
+ if self.emb_matrix is None:
+ self.get_emb_matrix(dictionary)
+
+ # If we have already computed sims for this sentence, return it
+ if sent in self.cache_sent2sims:
+ sims = self.cache_sent2sims[sent]
+ return sims
+
+ # Compute the cosine similarities. Implementation from here:
+ # https://codereview.stackexchange.com/questions/55717/efficient-numpy-cosine-distance-calculation
+ dotted = self.emb_matrix.dot(sent_emb) # shape (vocab_size)
+ sent_emb_norm = np.linalg.norm(sent_emb) # norm of the sent emb. scalar
+ norms = np.multiply(self.emb_matrix_norm, sent_emb_norm) # shape (vocab_size)
+ sims = np.divide(dotted, norms) # divide dot prods by norms. shape (vocab_size)
+ sims = torch.tensor(sims) # convert to torch Tensor, shape (vocab_size)
+
+ # Cache sims in self.cache_sent2sims
+ self.cache_sentqueue.append(sent) # append sent to right
+ self.cache_sent2sims[sent] = sims # add (sent, sims) pair to cache
+ if len(self.cache_sentqueue) > self.cache_limit:
+ to_remove = self.cache_sentqueue.popleft() # remove from left
+ del self.cache_sent2sims[to_remove] # remove from cache
+ assert len(self.cache_sent2sims) == len(self.cache_sentqueue)
+ assert len(self.cache_sent2sims) <= self.cache_limit
+
+ return sims
+
+ def embed_sent(self, sent, rem_first_sv=True):
+ """
+ Produce a Arora-style sentence embedding for a given sentence.
+
+ Inputs:
+ sent: tokenized sentence; a list of strings
+ rem_first_sv: If True, remove the first singular value when you compute the
+ sentence embddings. Otherwise, don't remove it.
+ Returns:
+ sent_emb: tensor length glove_dim, or None.
+ If sent_emb is None, that's because all of the words were OOV for GloVe.
+ """
+ # If we haven't loaded the torchtext GloVe embeddings, do so
+ if self.tt_embs is None:
+ self.get_glove_embs()
+
+ # Lookup glove embeddings for words
+ tokens = [t for t in sent if t in self.tt_embs.stoi] # in-vocab tokens
+ # glove_oov_tokens = [t for t in sent if t not in self.tt_embs.stoi]
+ # if len(glove_oov_tokens)>0:
+ # print("WARNING: tokens OOV for glove: ", glove_oov_tokens)
+ if len(tokens) == 0:
+ print('WARNING: tried to embed utterance %s but all tokens are OOV for '
+ 'GloVe. Returning embedding=None' % sent)
+ return None
+ word_embs = [self.tt_embs.vectors[self.tt_embs.stoi[t]]
+ for t in tokens] # list of torch Tensors shape (glove_dim)
+
+ # Get unigram probabilities for the words. If we don't have a word in word2prob,
+ # assume it's as rare as the rarest word in word2prob.
+ unigram_probs = [self.word2prob[t] if t in self.word2prob
+ else self.min_word_prob for t in tokens] # list of floats
+ # word2prob_oov_tokens = [t for t in tokens if t not in self.word2prob]
+ # if len(word2prob_oov_tokens)>0:
+ # print('WARNING: tokens OOV for word2prob, so assuming they are '
+ # 'maximally rare: ', word2prob_oov_tokens)
+
+ # Calculate the weighted average of the word embeddings
+ smooth_inverse_freqs = [self.arora_a / (self.arora_a + p)
+ for p in unigram_probs] # list of floats
+ sent_emb = sum([word_emb*wt for (word_emb, wt) in
+ zip(word_embs, smooth_inverse_freqs)
+ ])/len(word_embs) # torch Tensor shape (glove_dim)
+
+ # Remove the first singular value from sent_emb
+ if rem_first_sv:
+ sent_emb = remove_first_sv(sent_emb, self.first_sv)
+
+ return sent_emb
+
+
+def remove_first_sv(emb, first_sv):
+ """
+ Projects out the first singular value (first_sv) from the embedding (emb).
+
+ Inputs:
+ emb: torch Tensor shape (glove_dim)
+ first_sv: torch Tensor shape (glove_dim)
+
+ Returns:
+ new emb: torch Tensor shape (glove_dim)
+ """
+ # Calculate dot prod of emb and first_sv using torch.mm:
+ # (1, glove_dim) x (glove_dim, 1) -> (1,1) -> float
+ dot_prod = torch.mm(torch.unsqueeze(emb, 0), torch.unsqueeze(first_sv, 1)).item()
+ return emb - first_sv * dot_prod
+
+
+def get_word_counts(opt, count_inputs):
+ """Goes through the dataset specified in opt, returns word counts and all utterances
+
+ Inputs:
+ count_inputs: If True, include both input and reply when counting words and
+ utterances. Otherwise, only include reply text.
+
+ Returns:
+ word_counter: a Counter mapping each word to the total number of times it appears
+ total_count: int. total word count, i.e. the sum of the counts for each word
+ all_utts: list of strings. all the utterances that were used for counting words
+ """
+ # Create repeat label agent and assign it to the specified task
+ agent = RepeatLabelAgent(opt)
+ world = create_task(opt, agent)
+
+ # Count word frequency for all words in dataset
+ word_counter = Counter()
+ total_count = 0
+ all_utts = []
+ log_timer = TimeLogger()
+ while True:
+ world.parley()
+
+ # Count words in reply
+ reply = world.acts[0].get('labels', world.acts[0].get('eval_labels'))[0]
+ words = reply.split()
+ word_counter.update(words)
+ total_count += len(words)
+ all_utts.append(reply)
+
+ # Optionally count words in input text
+ if count_inputs:
+ input = world.acts[0]['text']
+ input = input.split('\n')[-1] # e.g. in ConvAI2, this removes persona
+ words = input.split()
+ word_counter.update(words)
+ total_count += len(words)
+ all_utts.append(input)
+
+ if log_timer.time() > opt['log_every_n_secs']:
+ text, _log = log_timer.log(world.total_parleys, world.num_examples())
+ print(text)
+
+ if world.epoch_done():
+ print('EPOCH DONE')
+ break
+
+ assert total_count == sum(word_counter.values())
+
+ return word_counter, total_count, all_utts
+
+
+def learn_arora(opt):
+ """
+ Go through ConvAI2 data and collect word counts, thus compute the unigram
+ probability distribution. Use those probs to compute weighted sentence embeddings
+ for all utterances, thus compute first principal component.
+
+ Save all info to arora.pkl file.
+ """
+ arora_file = os.path.join(opt['datapath'], 'controllable_dialogue', 'arora.pkl')
+
+ opt['task'] = 'fromfile:parlaiformat'
+ opt['log_every_n_secs'] = 2
+
+ print('Getting word counts from ConvAI2 train set...')
+ opt['datatype'] = 'train:ordered'
+ opt['fromfile_datapath'] = os.path.join(
+ opt['datapath'], 'controllable_dialogue', 'ConvAI2_parlaiformat', 'train.txt'
+ )
+ # Do include inputs because ConvAI2 train set reverses every convo:
+ word_counter_train, total_count_train, all_utts_train = get_word_counts(
+ opt, count_inputs=False)
+
+ print('Getting word counts from ConvAI2 val set...')
+ opt['datatype'] = 'valid'
+ opt['fromfile_datapath'] = os.path.join(
+ opt['datapath'], 'controllable_dialogue', 'ConvAI2_parlaiformat', 'valid.txt'
+ )
+ # Don't include inputs because ConvAI2 val set doesn't reverses convos:
+ word_counter_valid, total_count_valid, all_utts_valid = get_word_counts(
+ opt, count_inputs=True)
+
+ # Merge word counts
+ word_counter = word_counter_train
+ for word, count in word_counter_valid.items():
+ word_counter[word] += count
+ total_count = total_count_train + total_count_valid
+
+ # Merge all_utts
+ all_utts = all_utts_train + all_utts_valid
+
+ # Compute unigram prob for every word
+ print("Computing unigram probs for all words...")
+ word2prob = {w: c/total_count for w, c in word_counter.items()}
+
+ # Settings for sentence embedder
+ arora_a = 0.0001
+ glove_name = '840B'
+ glove_dim = 300
+ glove_cache = modelzoo_path(opt['datapath'], 'models:glove_vectors')
+
+ # Embed every sentence, without removing first singular value
+ print('Embedding all sentences...')
+ sent_embedder = SentenceEmbedder(word2prob, arora_a, glove_name, glove_dim,
+ first_sv=None, glove_cache=glove_cache)
+ utt_embs = []
+ log_timer = TimeLogger()
+ for n, utt in enumerate(all_utts):
+ utt_emb = sent_embedder.embed_sent(utt.split(), rem_first_sv=False)
+ utt_embs.append(utt_emb)
+ if log_timer.time() > opt['log_every_n_secs']:
+ text, _log = log_timer.log(n, len(all_utts))
+ print(text)
+
+ # Use SVD to calculate singular vector
+ # https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.linalg.svd.html
+ print('Calculating SVD...')
+ utt_embs = np.stack(utt_embs, axis=0) # shape (num_embs, glove_dim)
+ U, s, V = np.linalg.svd(utt_embs, full_matrices=False)
+ first_sv = V[0, :] # first row of V. shape (glove_dim)
+
+ # Remove singular vector from all embs to get complete Arora-style sent embs
+ print('Removing singular vec from all sentence embeddings...')
+ utt_embs_adj = [remove_first_sv(torch.Tensor(emb), torch.Tensor(first_sv)).numpy()
+ for emb in utt_embs] # list of np arrays shape (glove_dim)
+
+ # Make dict mapping ConvAI2 dataset utterances to Arora sent emb
+ # We save this to file for convenience (e.g. if you want to inspect)
+ utt2emb = {utt: emb for (utt, emb) in zip(all_utts, utt_embs_adj)}
+
+ # Save unigram distribution, first singular value, hyperparameter value for a,
+ # info about GloVe vectors used, and full dict of utt->emb to file
+ print("Saving Arora embedding info to %s..." % arora_file)
+ with open(arora_file, "wb") as f:
+ pickle.dump({
+ 'word2prob': word2prob, # dict: string to float between 0 and 1
+ 'first_sv': first_sv, # np array shape (glove_dim)
+ 'arora_a': arora_a, # float, 0.0001
+ 'glove_name': glove_name, # string, '840B'
+ 'glove_dim': glove_dim, # int, 300
+ 'utt2emb': utt2emb, # dict: string to np array shape (glove_dim)
+ }, f)
+
+
+def load_arora(opt):
+ """
+ Load the data in the arora.pkl file in data/controllable_dialogue.
+ """
+ arora_fp = os.path.join(opt['datapath'], CONTROLLABLE_DIR, 'arora.pkl')
+ print("Loading Arora embedding info from %s..." % arora_fp)
+ with open(arora_fp, "rb") as f:
+ data = pickle.load(f)
+ print("Done loading arora info.")
+ return data
+
+
+if __name__ == '__main__':
+ parser = ParlaiParser()
+ opt = parser.parse_args()
+ learn_arora(opt)
diff --git a/projects/controllable_dialogue/controllable_seq2seq/controllable_seq2seq.py b/projects/controllable_dialogue/controllable_seq2seq/controllable_seq2seq.py
new file mode 100644
--- /dev/null
+++ b/projects/controllable_dialogue/controllable_seq2seq/controllable_seq2seq.py
@@ -0,0 +1,1045 @@
+#!/usr/bin/env python3
+
+# Copyright (c) Facebook, Inc. and its affiliates.
+# This source code is licensed under the MIT license found in the
+# LICENSE file in the root directory of this source tree.
+
+"""
+This file is derived from parlai/core/seq2seq/seq2seq.py.
+
+In particular, it's derived from an older version that inherits from TorchAgent rather
+than TorchGeneratorAgent.
+
+It should be possible to refactor this file to be comparable to the current
+parlai/core/seq2seq/seq2seq.py, i.e. inherit from TorchGeneratorAgent - this would
+probably reduce the amount of boilerplate in this file.
+
+However, for simplicity and to keep things as similar as possible to the version used
+for the paper, we have kept this file mostly the same.
+"""
+
+from parlai.core.torch_agent import TorchAgent, Output, Batch
+from parlai.core.torch_generator_agent import Beam
+from parlai.core.utils import padded_tensor, round_sigfigs, argsort
+from parlai.core.thread_utils import SharedTable
+from .modules import Seq2seq, opt_to_kwargs
+from .util import ConvAI2History, show_beam_cands, reorder_extrep2gram_qn
+from .controls import (
+ CONTROL2DEFAULTNUMBUCKETS,
+ CONTROL2DEFAULTEMBSIZE,
+ WDFEATURE2UPDATEFN,
+ get_ctrl_vec,
+ get_wd_features,
+ initialize_control_information,
+)
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+from collections import defaultdict, namedtuple
+
+import os
+import math
+import json
+import tempfile
+import copy
+
+
+class ControllableSeq2seqAgent(TorchAgent):
+ """
+ This is a version of the Seq2seqAgent, that allows for attribute control via
+ Conditional Training (CT) and/or Weighted Decoding (WD).
+
+ See the paper:
+ "What makes a good conversation? How controllable attributes affect human judgments"
+ https://arxiv.org/pdf/1902.08654.pdf
+ """
+
+ @classmethod
+ def add_cmdline_args(cls, argparser):
+ """Add command-line arguments specifically for this agent."""
+ agent = argparser.add_argument_group('ControllableSeq2seqAgent Arguments')
+ agent.add_argument('--init-model', type=str, default=None,
+ help='load dict/model/opts from this path')
+ agent.add_argument('-hs', '--hiddensize', type=int, default=128,
+ help='size of the hidden layers')
+ agent.add_argument('-esz', '--embeddingsize', type=int, default=128,
+ help='size of the token embeddings')
+ agent.add_argument('-nl', '--numlayers', type=int, default=2,
+ help='number of hidden layers')
+ agent.add_argument('-dr', '--dropout', type=float, default=0.1,
+ help='dropout rate')
+ agent.add_argument('-bi', '--bidirectional', type='bool',
+ default=False,
+ help='whether to encode the context with a '
+ 'bidirectional rnn')
+ agent.add_argument('-att', '--attention', default='none',
+ choices=['none', 'concat', 'general', 'dot',
+ 'local'],
+ help='Choices: none, concat, general, local. '
+ 'If set local, also set attention-length. '
+ '(see arxiv.org/abs/1508.04025)')
+ agent.add_argument('-attl', '--attention-length', default=48, type=int,
+ help='Length of local attention.')
+ agent.add_argument('--attention-time', default='post',
+ choices=['pre', 'post'],
+ help='Whether to apply attention before or after '
+ 'decoding.')
+ agent.add_argument('-rnn', '--rnn-class', default='lstm',
+ choices=Seq2seq.RNN_OPTS.keys(),
+ help='Choose between different types of RNNs.')
+ agent.add_argument('-dec', '--decoder', default='same',
+ choices=['same', 'shared'],
+ help='Choose between different decoder modules. '
+ 'Default "same" uses same class as encoder, '
+ 'while "shared" also uses the same weights. '
+ 'Note that shared disabled some encoder '
+ 'options--in particular, bidirectionality.')
+ agent.add_argument('-lt', '--lookuptable', default='unique',
+ choices=['unique', 'enc_dec', 'dec_out', 'all'],
+ help='The encoder, decoder, and output modules can '
+ 'share weights, or not. '
+ 'Unique has independent embeddings for each. '
+ 'Enc_dec shares the embedding for the encoder '
+ 'and decoder. '
+ 'Dec_out shares decoder embedding and output '
+ 'weights. '
+ 'All shares all three weights.')
+ agent.add_argument('-soft', '--numsoftmax', default=1, type=int,
+ help='default 1, if greater then uses mixture of '
+ 'softmax (see arxiv.org/abs/1711.03953).')
+ agent.add_argument('--beam-size', type=int, default=1,
+ help='Beam size, if 1 then greedy search')
+ agent.add_argument('--beam-dot-log', type='bool', default=False,
+ help='Dump beam trees as png dot images into /tmp folder')
+ agent.add_argument('--beam-min-n-best', type=int, default=3,
+ help='Minimum number of nbest candidates to achieve '
+ 'during the beam search')
+ agent.add_argument('--beam-min-length', type=int, default=3,
+ help='Minimum length of prediction to be generated by '
+ 'the beam search')
+ agent.add_argument('-idr', '--input-dropout', type=float, default=0.0,
+ help='Each token from the input will be masked with'
+ ' __unk__ token with this probability.')
+ agent.add_argument('--beam-block-ngram', type=int, default=0,
+ help='Block all repeating ngrams up to history length n-1')
+ agent.add_argument('-cv', '--control-vars', type=str, default='',
+ help='Comma-separated list of control variables to use')
+ agent.add_argument('-cnb', '--control-num-buckets', type=str, default='',
+ help='Number of buckets for each of the control variables')
+ agent.add_argument('-cesz', '--control-embeddingsize',
+ type=str, default='',
+ help='Sizes for the control variable embeddings')
+ agent.add_argument('--add-control', type='bool', default=False,
+ help='If True, takes an existing saved model, adds necessary'
+ 'parameters for new CT controls, and saves in a new model '
+ 'file')
+ agent.add_argument('--set-controls', type=str, default='',
+ help='Specify fixed settings for CT control variables. '
+ 'For example, avg_niwf:6')
+ agent.add_argument('--beam-reorder', default='none',
+ choices=['none', 'best_extrep2gram_qn'],
+ help='Choices: none, best_extrep2gram_qn.'
+ 'Apply the specified function for reordering the '
+ 'n-best beam search candidates. '
+ 'If best_extrep2gram_qn, then pick candidate which '
+ 'contains question mark and has lowest extrep_2gram')
+ agent.add_argument('-wd', '--weighted-decoding', type=str, default='',
+ help='List of WD features and their corresponding weights '
+ 'For example, intrep_word:-1,extrep_2gram:-1,nidf:3')
+ agent.add_argument('--verbose', type='bool', default=False,
+ help='If true, print out beam search info')
+ TorchAgent.add_cmdline_args(argparser)
+ ControllableSeq2seqAgent.dictionary_class().add_cmdline_args(argparser)
+ return agent
+
+ @staticmethod
+ def model_version():
+ """Return current version of this model, counting up from 0.
+
+ Models may not be backwards-compatible with older versions.
+ Version 1 split from version 0 on Aug 29, 2018.
+ To use version 0, use --model legacy:seq2seq:0
+ (legacy agent code is located in parlai/agents/legacy_agents).
+ """
+ return 1
+
+ def __init__(self, opt, shared=None):
+ """Set up model."""
+ init_model = None
+ if not shared: # only do this on first setup
+ initialize_control_information(opt)
+ # first check load path in case we need to override paths
+ if opt.get('init_model') and os.path.isfile(opt['init_model']):
+ # check first for 'init_model' for loading model from file
+ init_model = opt['init_model']
+ if opt.get('model_file') and os.path.isfile(opt['model_file']):
+ # next check for 'model_file', this would override init_model
+ init_model = opt['model_file']
+
+ if init_model is not None:
+ # if we are loading a model, should load its dict too
+ if (os.path.isfile(init_model + '.dict') or
+ opt['dict_file'] is None):
+ opt['dict_file'] = init_model + '.dict'
+ super().__init__(opt, shared)
+ opt = self.opt
+ assert opt['person_tokens'] # want this for ConvAI2
+ assert opt['add_p1_after_newln'] # want this for ConvAI2
+
+ # all instances may need some params
+ self.id = 'Seq2Seq'
+ self.multigpu = (opt.get('multigpu') and self.use_cuda and
+ (opt.get('batchsize') > 1))
+ states = {}
+
+ self.beam_dot_log = opt.get('beam_dot_log', False)
+ self.beam_size = opt.get('beam_size', 1)
+ self.beam_min_n_best = opt.get('beam_min_n_best', 3)
+ self.beam_min_length = opt.get('beam_min_length', 3)
+ self.beam_block_ngram = opt.get('beam_block_ngram', 0)
+
+ self._init_controls()
+
+ if shared:
+ # set up shared properties
+ self.model = shared['model']
+ self.metrics = shared['metrics']
+ states = shared.get('states', {})
+ else:
+ self.metrics = {'loss': 0.0, 'num_tokens': 0, 'correct_tokens': 0,
+ 'total_skipped_batches': 0}
+ # this is not a shared instance of this class, so do full init
+ if self.beam_dot_log:
+ self.beam_dot_dir = tempfile.mkdtemp(
+ prefix='{}-beamdot-beamsize-{}-'.format(
+ os.path.basename(
+ opt.get('model_file')),
+ self.beam_size))
+ print(
+ '[ Saving dot beam logs in {} ]'.format(
+ self.beam_dot_dir))
+ if init_model is not None:
+ # load model parameters if available
+ print('[ Loading existing model params from {} ]'
+ ''.format(init_model))
+ states = self.load(init_model)
+
+ self._init_model(states=states)
+
+ # set up criteria
+ if opt.get('numsoftmax', 1) > 1:
+ self.criterion = nn.NLLLoss(
+ ignore_index=self.NULL_IDX, reduction='sum')
+ else:
+ self.criterion = nn.CrossEntropyLoss(
+ ignore_index=self.NULL_IDX, reduction='sum')
+
+ if self.use_cuda:
+ self.criterion.cuda()
+
+ if 'train' in opt.get('datatype', ''):
+ self.init_optim(
+ [p for p in self.model.parameters() if p.requires_grad],
+ optim_states=states.get('optimizer'),
+ saved_optim_type=states.get('optimizer_type'))
+
+ self.reset()
+
+ # If we are adding parameters for new CT controls, save and exit
+ if self.opt['add_control']:
+ self.save()
+ print('Finished adding CT control parameters. Saved model. Quitting.')
+ exit()
+
+ def _add_control(self, states):
+ print("Adding new parameters for CT model")
+
+ # Take the new CT embeddings which have been initialized in the model,
+ # and copy them over to states (the params loaded from file)
+ model_ctrl_embs = self.model.decoder.control_encoder.control_embeddings
+ for control_var, emb in model_ctrl_embs.items():
+ # init_control_embs is tensor shape (num buckets, control emb size):
+ init_control_embs = torch.Tensor(copy.deepcopy(emb.weight))
+ key = 'decoder.control_encoder.control_embeddings.%s.weight' % control_var
+ states['model'][key] = init_control_embs
+
+ # Take the extra RNN weights which have been initialized in the model,
+ # and copy them over to states (the params loaded from file).
+ model_dec_input_wts = self.model.decoder.rnn.weight_ih_l0
+ # init_decoder_ih_l0 is tensor shape:
+ # ([hiddensize*4, emb_size + sum of ctrl emb sizes])
+ init_decoder_ih_l0 = torch.Tensor(copy.deepcopy(model_dec_input_wts))
+ # Copy over the trained weights from file, for the non-CT part
+ key = 'decoder.rnn.weight_ih_l0'
+ init_decoder_ih_l0[:, :self.opt['embeddingsize']] = states['model'][key]
+ # Copy the full version (trained non-CT weights plus initialized CT
+ # weights) to states
+ states['model'][key] = init_decoder_ih_l0
+
+ def _init_model(self, states=None):
+ """Initialize model, override to change model setup."""
+ opt = self.opt
+
+ kwargs = opt_to_kwargs(opt)
+ self.model = Seq2seq(
+ len(self.dict), opt['embeddingsize'], opt['hiddensize'],
+ padding_idx=self.NULL_IDX, start_idx=self.START_IDX,
+ unknown_idx=self.dict[self.dict.unk_token],
+ longest_label=states.get('longest_label', 1),
+ control_settings=self.control_settings,
+ **kwargs)
+
+ if (opt.get('dict_tokenizer') == 'bpe' and
+ opt['embedding_type'] != 'random'):
+ print('skipping preinitialization of embeddings for bpe')
+ elif not states and opt['embedding_type'] != 'random':
+ # `not states`: only set up embeddings if not loading model
+ self._copy_embeddings(self.model.decoder.lt.weight,
+ opt['embedding_type'])
+ if opt['lookuptable'] in ['unique', 'dec_out']:
+ # also set encoder lt, since it's not shared
+ self._copy_embeddings(self.model.encoder.lt.weight,
+ opt['embedding_type'], log=False)
+
+ if states:
+ if self.opt['add_control']: # Add parameters for new CT controls
+ self._add_control(states)
+
+ # set loaded states if applicable
+ self.model.load_state_dict(states['model'])
+
+ if opt['embedding_type'].endswith('fixed'):
+ print('Seq2seq: fixing embedding weights.')
+ self.model.decoder.lt.weight.requires_grad = False
+ self.model.encoder.lt.weight.requires_grad = False
+ if opt['lookuptable'] in ['dec_out', 'all']:
+ self.model.decoder.e2s.weight.requires_grad = False
+
+ if self.use_cuda:
+ self.model.cuda()
+ if self.multigpu:
+ self.model = torch.nn.DataParallel(self.model)
+ self.model.encoder = self.model.module.encoder
+ self.model.decoder = self.model.module.decoder
+ self.model.longest_label = self.model.module.longest_label
+ self.model.output = self.model.module.output
+
+ return self.model
+
+ def _init_controls(self):
+ """
+ Sets the following:
+
+ self.control_vars: list of strings. The CT controls sorted alphabetically.
+
+ self.control_settings: a dictionary containing info about the CT controls.
+ Each control name maps to a dictionary that contains:
+ 'embsize': embedding size for this control
+ 'num_buckets': num buckets for this control
+ 'set_value': a set value for this control, or None
+ 'idx': the index of this control in this list self.control_vars
+
+ self.wd_features: list of strings, the WD features to use.
+
+ self.wd_wts: list of floats, the WD weights to use.
+ """
+
+ # Set self.control_vars, a list of the CT control vars in alphabetical order
+ self.control_vars = (sorted(self.opt['control_vars'].split(','))
+ if self.opt['control_vars'] != '' else [])
+
+ # Process the control_num_buckets flag (for CT)
+ ctrl_numbucket_override = {}
+ if self.opt['control_num_buckets'] != "":
+ ctrl_numbucket_override = {
+ s.split(':')[0]: int(s.split(':')[1])
+ for s in self.opt['control_num_buckets'].split(',')
+ } # string to int
+
+ # Process the control_embeddingsize flag (for CT)
+ ctrl_esz_override = {}
+ if self.opt['control_embeddingsize'] != "":
+ ctrl_esz_override = {
+ s.split(':')[0]: int(s.split(':')[1])
+ for s in self.opt['control_embeddingsize'].split(',')
+ } # string to int
+
+ # Process the set_controls flag, which gives user-supplied settings for CT
+ set_controls = {}
+ if self.opt['set_controls'] != "":
+ set_controls = {} # string to (int or string)
+ for s in self.opt['set_controls'].split(','):
+ control, set_val = s.split(':')[0], s.split(':')[1]
+ if control not in self.control_vars:
+ raise ValueError("Received --set-controls for control '%s', but "
+ "that is not one of the existing CT controls for "
+ "this model, which are: %s"
+ % (control, ', '.join(self.control_vars)))
+ try:
+ set_val = int(set_val) # set_val should be a string of an int
+ except ValueError:
+ raise ValueError("Received --set-controls '%s' for CT "
+ "control '%s'. The set value must be an integer."
+ % (set_val, control))
+ set_controls[control] = int(set_val)
+
+ # Set self.control_settings for the CT controls
+ self.control_settings = {}
+ for idx, c in enumerate(self.control_vars):
+ d = {}
+ d['embsize'] = (ctrl_esz_override[c] if c in ctrl_esz_override
+ else CONTROL2DEFAULTEMBSIZE[c])
+ d['num_buckets'] = (ctrl_numbucket_override[c]
+ if c in ctrl_numbucket_override
+ else CONTROL2DEFAULTNUMBUCKETS[c])
+ if c in set_controls:
+ set_val = set_controls[c]
+ if set_val not in range(d['num_buckets']):
+ raise ValueError("Received --set-controls '%s' for CT control "
+ "'%s', which has num_buckets=%i. The set value "
+ "must be between 0 and %i." %
+ (set_val, c, d['num_buckets'], d['num_buckets']-1))
+ d['set_value'] = set_controls[c] if c in set_controls else None
+ d['idx'] = idx
+ self.control_settings[c] = d
+
+ # Get list of WD features and weights, self.wd_features and self.wd_weights
+ if self.opt['weighted_decoding'] != "":
+ if self.beam_size == 1:
+ raise ValueError("WD control is not currently implemented for greedy "
+ "search. Either increase --beam-size to be greater "
+ "than 1, or do not enter --weighted-decoding (-wd).")
+
+ # Get a list of (feature, weight) i.e. (string, float) pairs
+ wd_feats_wts = [(s.split(':')[0], float(s.split(':')[1]))
+ for s in self.opt['weighted_decoding'].split(',')]
+ self.wd_features = [f for (f, w) in wd_feats_wts] # list of strings
+ for wd_feat in self.wd_features:
+ if wd_feat not in WDFEATURE2UPDATEFN:
+ raise ValueError("'%s' is not an existing WD feature. Available WD "
+ "features: %s"
+ % (wd_feat, ', '.join(WDFEATURE2UPDATEFN.keys())))
+ self.wd_wts = [w for (f, w) in wd_feats_wts] # list of floats
+ else:
+ self.wd_features, self.wd_wts = [], []
+
+ def _v2t(self, vec):
+ """Convert token indices to string of tokens."""
+ new_vec = []
+ if hasattr(vec, 'cpu'):
+ vec = vec.cpu()
+ for i in vec:
+ if i == self.END_IDX:
+ break
+ elif i != self.START_IDX:
+ new_vec.append(i)
+ return self.dict.vec2txt(new_vec)
+
+ def zero_grad(self):
+ """Zero out optimizer."""
+ self.optimizer.zero_grad()
+
+ def update_params(self):
+ """Do one optimization step."""
+ if self.opt['gradient_clip'] > 0:
+ torch.nn.utils.clip_grad_norm_(self.model.parameters(),
+ self.opt['gradient_clip'])
+ self.optimizer.step()
+
+ def reset_metrics(self):
+ """Reset metrics for reporting loss and perplexity."""
+ super().reset_metrics()
+ self.metrics['loss'] = 0.0
+ self.metrics['num_tokens'] = 0
+ self.metrics['correct_tokens'] = 0
+
+ def report(self):
+ """Report loss and perplexity from model's perspective.
+
+ Note that this includes predicting __END__ and __UNK__ tokens and may
+ differ from a truly independent measurement.
+ """
+ m = {}
+ num_tok = self.metrics['num_tokens']
+ if num_tok > 0:
+ if self.metrics['correct_tokens'] > 0:
+ m['token_acc'] = self.metrics['correct_tokens'] / num_tok
+ m['loss'] = self.metrics['loss'] / num_tok
+ try:
+ m['ppl'] = math.exp(m['loss'])
+ except OverflowError:
+ m['ppl'] = float('inf')
+ if self.metrics['total_skipped_batches'] > 0:
+ m['total_skipped_batches'] = self.metrics['total_skipped_batches']
+ for k, v in m.items():
+ # clean up: rounds to sigfigs and converts tensors to floats
+ m[k] = round_sigfigs(v, 4)
+ return m
+
+ def share(self):
+ """Share internal states between parent and child instances."""
+ shared = super().share()
+ shared['model'] = self.model
+ if self.opt.get('numthreads', 1) > 1:
+ # we're doing hogwild so share the model too
+ if isinstance(self.metrics, dict):
+ # move metrics and model to shared memory
+ self.metrics = SharedTable(self.metrics)
+ self.model.share_memory()
+ shared['states'] = { # don't share optimizer states
+ 'optimizer_type': self.opt['optimizer'],
+ }
+ shared['metrics'] = self.metrics # do after numthreads check
+ if self.beam_dot_log is True:
+ shared['beam_dot_dir'] = self.beam_dot_dir
+ return shared
+
+ def vectorize(self, *args, **kwargs):
+ """Override vectorize for seq2seq."""
+ kwargs['add_start'] = False # model does this in module code
+ kwargs['add_end'] = True # we do want this
+ return super().vectorize(*args, **kwargs)
+
+ def batchify(self, *args, **kwargs):
+ """Override batchify options for seq2seq."""
+ kwargs['sort'] = True # need sorted for pack_padded
+ batch = super().batchify(*args, **kwargs)
+
+ # Get some args needed for batchify
+ obs_batch = args[0]
+ sort = kwargs['sort']
+ is_valid = (lambda obs: 'text_vec' in obs
+ or 'image' in obs) # from TorchAgent.batchify
+
+ # Run this part of TorchAgent's batchify to get exs in correct order
+
+ # ==================== START COPIED FROM TORCHAGENT ===================
+ if len(obs_batch) == 0:
+ return Batch()
+
+ valid_obs = [(i, ex) for i, ex in enumerate(obs_batch) if is_valid(ex)]
+
+ if len(valid_obs) == 0:
+ return Batch()
+
+ valid_inds, exs = zip(*valid_obs)
+
+ # TEXT
+ xs, x_lens = None, None
+ if any('text_vec' in ex for ex in exs):
+ _xs = [ex.get('text_vec', self.EMPTY) for ex in exs]
+ xs, x_lens = padded_tensor(_xs, self.NULL_IDX, self.use_cuda)
+ if sort:
+ sort = False # now we won't sort on labels
+ xs, x_lens, valid_inds, exs = argsort(
+ x_lens, xs, x_lens, valid_inds, exs, descending=True
+ )
+
+ # ======== END COPIED FROM TORCHAGENT ========
+
+ # Add history to the batch
+ history = [ConvAI2History(ex['text'], dictionary=self.dict) for ex in exs]
+
+ # Add CT control vars to batch
+ ctrl_vec = get_ctrl_vec(exs, history, self.control_settings) # tensor or None
+ if self.use_cuda and ctrl_vec is not None:
+ ctrl_vec = ctrl_vec.cuda()
+
+ # Replace the old namedtuple with a new one that includes ctrl_vec and history
+ ControlBatch = namedtuple('Batch', tuple(batch.keys())+('ctrl_vec', 'history'))
+ batch = ControlBatch(ctrl_vec=ctrl_vec, history=history, **dict(batch))
+
+ return batch
+
+ def _init_cuda_buffer(self, model, criterion, batchsize, maxlen):
+ """Pre-initialize CUDA buffer by doing fake forward pass."""
+ if self.use_cuda and not hasattr(self, 'buffer_initialized'):
+ try:
+ print('preinitializing pytorch cuda buffer')
+ dummy = torch.ones(batchsize, maxlen).long().cuda()
+ if len(self.control_settings) > 0:
+ ctrl_dummy = torch.ones(batchsize,
+ len(self.control_settings)).long().cuda()
+ else:
+ ctrl_dummy = None
+ out = model(dummy, ctrl_dummy, dummy)
+ sc = out[0] # scores
+ loss = criterion(sc.view(-1, sc.size(-1)), dummy.view(-1))
+ loss.backward()
+ self.buffer_initialized = True
+ except RuntimeError as e:
+ if 'out of memory' in str(e):
+ m = ('CUDA OOM: Lower batch size (-bs) from {} or lower '
+ ' max sequence length (-tr) from {}'
+ ''.format(batchsize, maxlen))
+ raise RuntimeError(m)
+ else:
+ raise e
+
+ def train_step(self, batch):
+ """Train on a single batch of examples."""
+ batchsize = batch.text_vec.size(0)
+ if self.multigpu and batchsize % 2 != 0:
+ # throw out one training example
+ batch = self.truncate_input(batch)
+ # helps with memory usage
+ self._init_cuda_buffer(self.model, self.criterion, batchsize,
+ self.truncate or 180)
+ self.model.train()
+ self.zero_grad()
+
+ try:
+ seq_len = None if not self.multigpu else batch.text_vec.size(1)
+ out = self.model(batch.text_vec, batch.ctrl_vec, batch.label_vec,
+ seq_len=seq_len)
+
+ # generated response
+ scores = out[0]
+ _, preds = scores.max(2)
+
+ score_view = scores.view(-1, scores.size(-1))
+ loss = self.criterion(score_view, batch.label_vec.view(-1))
+ # save loss to metrics
+ notnull = batch.label_vec.ne(self.NULL_IDX)
+ target_tokens = notnull.long().sum().item()
+ correct = ((batch.label_vec == preds) * notnull).sum().item()
+ self.metrics['correct_tokens'] += correct
+ self.metrics['loss'] += loss.item()
+ self.metrics['num_tokens'] += target_tokens
+ loss /= target_tokens # average loss per token
+ loss.backward()
+ self.update_params()
+ except RuntimeError as e:
+ # catch out of memory exceptions during fwd/bck (skip batch)
+ if 'out of memory' in str(e):
+ print('| WARNING: ran out of memory, skipping batch. '
+ 'if this happens frequently, decrease batchsize or '
+ 'truncate the inputs to the model.')
+ self.metrics['total_skipped_batches'] += 1
+ else:
+ raise e
+
+ def _build_cands(self, batch):
+ if not batch.candidates:
+ return None, None
+ cand_inds = [i for i in range(len(batch.candidates))
+ if batch.candidates[i]]
+ cands = [batch.candidate_vecs[i] for i in cand_inds]
+ max_cands_len = max(
+ [max([cand.size(0) for cand in cands_i]) for cands_i in cands]
+ )
+ for i, c in enumerate(cands):
+ cands[i] = padded_tensor(c,
+ use_cuda=self.use_cuda,
+ max_len=max_cands_len)[0].unsqueeze(0)
+ cands = torch.cat(cands, 0)
+ return cands, cand_inds
+
+ def _pick_cands(self, cand_preds, cand_inds, cands):
+ cand_replies = [None] * len(cands)
+ for idx, order in enumerate(cand_preds):
+ batch_idx = cand_inds[idx]
+ cand_replies[batch_idx] = [cands[batch_idx][i] for i in order]
+ return cand_replies
+
+ def greedy_search(self, batch):
+ cand_params = self._build_cands(batch)
+ seq_len = None if not self.multigpu else batch.text_vec.size(1)
+ out = self.model(batch.text_vec, batch.ctrl_vec, ys=None, cands=cand_params[0],
+ seq_len=seq_len)
+ return out, cand_params
+
+ @staticmethod
+ def beam_search(model, batch, beam_size, dictionary, start=1, end=2,
+ pad=0, min_length=3, min_n_best=5, max_ts=40, block_ngram=0,
+ wd_features=[], wd_wts=[]):
+ """ Beam search given the model and Batch
+ This function uses model with the following reqs:
+ - model.encoder takes input returns tuple (enc_out, enc_hidden, attn_mask)
+ - model.decoder takes decoder params and returns decoder outputs after attn
+ - model.output takes decoder outputs and returns distr over dictionary
+
+ Function arguments:
+ model : nn.Module, here defined in modules.py
+ batch : Batch structure with input and labels
+ beam_size : Size of each beam during the search
+ start : start of sequence token
+ end : end of sequence token
+ pad : padding token
+ min_length : minimum length of the decoded sequence
+ min_n_best : minimum number of completed hypothesis generated from each beam
+ max_ts: the maximum length of the decoded sequence
+ wd_features: list of strings, the WD features to use
+ wd_weights: list of floats, the WD weights to use
+
+ Return:
+ beam_preds_scores : list of tuples (prediction, score) for each sample in Batch
+ n_best_preds_scores : list of n_best list of tuples (prediction, score) for
+ each sample from Batch
+ beams : list of Beam instances defined in Beam class, can be used for any
+ following postprocessing, e.g. dot logging.
+ """
+ encoder_states = model.encoder(batch.text_vec)
+ enc_out = encoder_states[0]
+ enc_hidden = encoder_states[1]
+ attn_mask = encoder_states[2]
+ current_device = encoder_states[0][0].device
+ vocab_size = len(dictionary)
+
+ batch_size = len(batch.text_lengths)
+ beams = [Beam(beam_size, min_length=min_length, padding_token=pad,
+ bos_token=start, eos_token=end, min_n_best=min_n_best,
+ cuda=current_device,
+ block_ngram=block_ngram) for i in range(batch_size)]
+ decoder_input = torch.Tensor([start]).detach().expand(
+ batch_size, 1).long().to(current_device)
+ # repeat encoder_outputs, hiddens, attn_mask
+ decoder_input = decoder_input.repeat(
+ 1, beam_size).view(beam_size * batch_size, -1)
+
+ # ctrl_input is shape (bsz, num_controls)
+ # we want it to be (bsz*beam_size, num_controls)
+ ctrl_input = batch.ctrl_vec
+ if batch.ctrl_vec is not None:
+ ctrl_input = batch.ctrl_vec.repeat(beam_size, 1)
+
+ enc_out = enc_out.unsqueeze(1).repeat(1, beam_size, 1, 1).view(
+ batch_size * beam_size, -1, enc_out.size(-1))
+ attn_mask = encoder_states[2].repeat(
+ 1, beam_size).view(attn_mask.size(0) * beam_size, -1)
+ repeated_hiddens = []
+ if isinstance(enc_hidden, tuple): # LSTM
+ for i in range(len(enc_hidden)):
+ repeated_hiddens.append(
+ enc_hidden[i].unsqueeze(2).repeat(1, 1, beam_size, 1))
+ num_layers = enc_hidden[0].size(0)
+ hidden_size = enc_hidden[0].size(-1)
+ enc_hidden = tuple([repeated_hiddens[i].view(
+ num_layers, batch_size *
+ beam_size, hidden_size) for i in range(len(repeated_hiddens))])
+ else: # GRU
+ num_layers = enc_hidden.size(0)
+ hidden_size = enc_hidden.size(-1)
+ enc_hidden = enc_hidden.unsqueeze(2).repeat(1, 1, beam_size, 1).view(
+ num_layers, batch_size * beam_size, hidden_size)
+
+ hidden = enc_hidden
+ for ts in range(max_ts):
+ if all((b.done() for b in beams)):
+ break
+ output, hidden = model.decoder(
+ decoder_input, ctrl_input, hidden, (enc_out, attn_mask))
+ score = model.output(output)
+ # score contains softmax scores for batch_size * beam_size samples
+ score = score.view(batch_size, beam_size, -1)
+ score = F.log_softmax(score, dim=-1)
+ for i, b in enumerate(beams):
+ if not b.done():
+ scores_in = score[i]
+
+ # If using WD, update scores_in to reflect the WD features
+ if len(wd_features) > 0:
+
+ # Obtain wd_feat_vecs, the sum of the weighted features
+ # across the whole vocabulary
+ wd_feat_vecs = torch.zeros((beam_size, vocab_size))
+ for hyp_idx in range(beam_size): # For each hypothesis
+
+ # Get the partial hypothesis (None if first timestep)
+ partial_hyp = b.partial_hyps[hyp_idx] if ts > 0 else None
+
+ # Get the WD feature vector (a tensor) for this hypothesis
+ wd_feat_vec = get_wd_features(dictionary, partial_hyp,
+ batch.history[i], wd_features,
+ wd_wts) # shape (vocab_size)
+
+ wd_feat_vecs[hyp_idx, :] = wd_feat_vec
+ wd_feat_vecs = wd_feat_vecs.to(current_device)
+
+ # Add the WD features to the log probability scores
+ scores_in = scores_in + wd_feat_vecs
+
+ # Update the beam as usual
+ b.advance(scores_in)
+
+ decoder_input = torch.cat(
+ [b.get_output_from_current_step() for b in beams]).unsqueeze(-1)
+ permute_hidden_idx = torch.cat(
+ [beam_size * i +
+ b.get_backtrack_from_current_step() for i, b in enumerate(beams)])
+ # permute decoder hiddens with respect to chosen hypothesis now
+ if isinstance(hidden, tuple): # LSTM
+ for i in range(len(hidden)):
+ hidden[i].data.copy_(hidden[i].data.index_select(
+ dim=1, index=permute_hidden_idx))
+ else: # GRU
+ hidden.data.copy_(hidden.data.index_select(
+ dim=1, index=permute_hidden_idx))
+ for b in beams:
+ b.check_finished()
+
+ beam_preds_scores = [list(b.get_top_hyp()) for b in beams]
+ for pair in beam_preds_scores:
+ pair[0] = Beam.get_pretty_hypothesis(pair[0])
+
+ n_best_beams = [b.get_rescored_finished(
+ n_best=min_n_best) for b in beams]
+ n_best_beam_preds_scores = []
+ for i, beamhyp in enumerate(n_best_beams):
+ this_beam = []
+ for hyp in beamhyp:
+ pred = beams[i].get_pretty_hypothesis(
+ beams[i].get_hyp_from_finished(hyp))
+ score = hyp.score
+ this_beam.append((pred, score))
+ n_best_beam_preds_scores.append(this_beam)
+
+ return beam_preds_scores, n_best_beam_preds_scores, beams
+
+ def extend_input(self, batch):
+ # add pad tensor to text vec
+ pad_tensor = torch.zeros(1, batch.text_vec.size(1)).long().cuda()
+ text_vec = torch.cat([batch.text_vec, pad_tensor], 0)
+ batch = batch._replace(text_vec=text_vec)
+ if batch.label_vec is not None:
+ # add pad tensor to label vec
+ pad_tensor = torch.zeros(
+ 1,
+ batch.label_vec.size(1)
+ ).long().cuda()
+ label_vec = torch.cat([batch.label_vec, pad_tensor], 0)
+ batch = batch._replace(label_vec=label_vec)
+ if batch.candidates is not None:
+ # add dummy candidates list
+ dummy_list = [['None'] for _ in range(len(batch.candidates[0]))]
+ batch = batch._replace(candidates=batch.candidates + [dummy_list])
+ # add pad tensor to candidate_vecs
+ new_vecs = (batch.candidate_vecs +
+ [[torch.zeros(1).long() for _ in
+ range(len(batch.candidate_vecs[0]))]])
+ batch = batch._replace(candidate_vecs=new_vecs)
+ return batch
+
+ def truncate_input(self, batch):
+ # truncate batch for multigpu
+ text_vec = batch.text_vec[:-1]
+ batch = batch._replace(text_vec=text_vec)
+ if batch.label_vec is not None:
+ label_vec = batch.label_vec[:-1]
+ batch = batch._replace(label_vec=label_vec)
+ return batch
+
+ def truncate_output(self, out):
+ new_out_0 = out[0][:-1]
+ new_out_1 = None if out[1] is None else out[1][:-1]
+ new_out_2 = [vec[:-1] for vec in out[2]]
+ return tuple([new_out_0, new_out_1, new_out_2])
+
+ def eval_step(self, batch):
+ """Evaluate a single batch of examples."""
+ if batch.text_vec is None:
+ return
+ orig_batch = batch # save for evaluation
+ needs_truncation = self.multigpu and batch.text_vec.size(0) % 2 != 0
+ if needs_truncation:
+ # for multigpu, we need to split evenly across gpus
+ batch = self.extend_input(batch)
+ self.model.eval()
+ cand_scores = None
+ if self.beam_size == 1:
+ out, cand_params = self.greedy_search(batch)
+ if needs_truncation:
+ out = self.truncate_output(out)
+ if cand_params[0] is not None:
+ cand_params = (cand_params[0][:-1], cand_params[1][:-1])
+ scores, cand_scores = out[0], out[1]
+ _, preds = scores.max(2)
+ elif self.beam_size > 1:
+ out = ControllableSeq2seqAgent.beam_search(
+ self.model,
+ batch,
+ self.beam_size,
+ self.dict,
+ start=self.START_IDX,
+ end=self.END_IDX,
+ pad=self.NULL_IDX,
+ min_length=self.beam_min_length,
+ min_n_best=self.beam_min_n_best,
+ block_ngram=self.beam_block_ngram,
+ wd_features=self.wd_features,
+ wd_wts=self.wd_wts)
+ if needs_truncation:
+ out = self.truncate_output(out)
+ beam_preds_scores, n_best_preds_scores, beams = out
+
+ # Optionally print out the n-best beam search candidates
+ if self.opt['verbose']:
+ for cands, hist in zip(n_best_preds_scores, batch.history):
+ show_beam_cands(cands, hist, self.dict)
+
+ # If we have a special reordering function, apply it to choose the best
+ # one of the candidates.
+ if self.opt['beam_reorder'] == 'best_extrep2gram_qn':
+ beam_preds_scores = [reorder_extrep2gram_qn(cands, hist, self.dict,
+ self.opt['verbose']) for cands, hist in
+ zip(n_best_preds_scores, batch.history)]
+
+ preds, scores = [p[0] for p in beam_preds_scores], [
+ p[1] for p in beam_preds_scores]
+ if self.beam_dot_log is True:
+ for i, b in enumerate(beams):
+ dot_graph = b.get_beam_dot(dictionary=self.dict, n_best=3)
+ image_name = self._v2t(batch.text_vec[i, -20:]).replace(
+ ' ',
+ '-').replace('__null__', '')
+ dot_graph.write_png(os.path.join(
+ self.beam_dot_dir, "{}.png".format(image_name)))
+
+ if batch.label_vec is not None:
+ # calculate loss on targets with teacher forcing
+ seq_len = None if not self.multigpu else batch.text_vec.size(1)
+ out = self.model(batch.text_vec, batch.ctrl_vec, batch.label_vec,
+ seq_len=seq_len)
+ if needs_truncation:
+ out = self.truncate_output(out)
+ f_scores = out[0] # forced scores
+ _, f_preds = f_scores.max(2) # forced preds
+ score_view = f_scores.view(-1, f_scores.size(-1))
+ loss = self.criterion(score_view, orig_batch.label_vec.view(-1))
+ # save loss to metrics
+ notnull = orig_batch.label_vec.ne(self.NULL_IDX)
+ target_tokens = notnull.long().sum().item()
+ correct = ((orig_batch.label_vec == f_preds) * notnull).sum().item()
+ self.metrics['correct_tokens'] += correct
+ self.metrics['loss'] += loss.item()
+ self.metrics['num_tokens'] += target_tokens
+
+ cand_choices = None
+ if cand_scores is not None:
+ cand_preds = cand_scores.sort(1, descending=True)[1]
+ # now select the text of the cands based on their scores
+ cand_choices = self._pick_cands(cand_preds, cand_params[1],
+ orig_batch.candidates)
+
+ text = [self._v2t(p) for p in preds]
+
+ return Output(text, cand_choices)
+
+ def save(self, path=None):
+ """Save model parameters if model_file is set."""
+ path = self.opt.get('model_file', None) if path is None else path
+
+ if path and hasattr(self, 'model'):
+ model = {}
+ model['model'] = self.model.state_dict()
+ model['longest_label'] = self.model.longest_label
+ model['optimizer'] = self.optimizer.state_dict()
+ model['optimizer_type'] = self.opt['optimizer']
+
+ with open(path, 'wb') as write:
+ torch.save(model, write)
+
+ # save opt file
+ with open(path + '.opt', 'w') as handle:
+ # save version string
+ self.opt['model_version'] = self.model_version()
+ json.dump(self.opt, handle)
+
+ def load(self, path):
+ """Return opt and model states."""
+ states = torch.load(path, map_location=lambda cpu, _: cpu)
+
+ # check opt file for multigpu
+ with open(path + ".opt", 'r') as handle:
+ saved_opt = json.load(handle)
+ if saved_opt.get('multigpu'):
+ # create new OrderedDict that does not contain `module.`
+ from collections import OrderedDict
+ new_state_dict = OrderedDict()
+ for k, v in states['model'].items():
+ if k.startswith('module'):
+ name = k[7:] # remove `module.`
+ new_state_dict[name] = v
+ states['model'] = new_state_dict
+
+ return states
+
+
+class mydefaultdict(defaultdict):
+ """Get function also uses default_factory for this defaultdict.
+
+ This makes dict.get() behave like dict[] if a default is not provided.
+ """
+
+ def get(self, key, default=None):
+ """Return value at key or default if key is not in dict.
+
+ If a default is not provided, return the default factory value.
+ """
+ # override default from "get" (like "__getitem__" already is)
+ return super().get(key, default or self.default_factory())
+
+
+class PerplexityEvaluatorAgent(ControllableSeq2seqAgent):
+ """Subclass for doing standardized perplexity evaluation.
+
+ This is designed to be used in conjunction with the PerplexityWorld at
+ parlai/scripts/eval_ppl.py. It uses the `next_word_probability` function
+ to calculate the probability of tokens one token at a time.
+ """
+
+ def __init__(self, opt, shared=None):
+ """Initialize evaluator."""
+ if opt.get('multigpu'):
+ print('| WARNING: Multi-GPU is not supported for the Perplexity ' +
+ 'Evaluator Agent. Setting this option to False.')
+ opt['multigpu'] = False
+ super().__init__(opt, shared)
+ self.prev_enc = None
+ self.last_xs = None
+
+ def next_word_probability(self, partial_out):
+ """Return probability distribution over next words.
+
+ This probability is based on both nn input and partial true output.
+ This is used to calculate the per-word perplexity.
+
+ Arguments:
+ observation -- input observation dict
+ partial_out -- list of previous "true" words
+
+ Returns a dict, where each key is a word and each value is a
+ probability score for that word.
+ Unset keys will use a probability of 1e-7.
+
+ e.g.
+ {'text': 'Run test program.'}, ['hello'] => {'world': 1.0}
+ """
+ obs = self.observation
+ xs = obs['text_vec'].unsqueeze(0)
+ ys = self._vectorize_text(
+ ' '.join(partial_out), False, True, self.truncate
+ ).unsqueeze(0)
+ if self.prev_enc is not None and self.last_xs is not None and (
+ xs.shape[1] != self.last_xs.shape[1] or
+ (xs == self.last_xs).sum().item() != xs.shape[1]):
+ # reset prev_enc, this is a new input
+ self.prev_enc = None
+ self.last_xs = xs
+
+ self.model.eval()
+ out = self.model(
+ xs,
+ ctrl_inputs=None,
+ ys=(ys if len(partial_out) > 0 else None),
+ prev_enc=self.prev_enc,
+ maxlen=1)
+ scores, self.prev_enc = out[0], out[2]
+ # scores is bsz x seqlen x num_words, so select probs of current index
+ probs = F.softmax(scores.select(1, -1), dim=1).squeeze()
+ dist = mydefaultdict(lambda: 1e-7) # default probability for any token
+ for i in range(len(probs)):
+ dist[self.dict[i]] = probs[i].item()
+ return dist
diff --git a/projects/controllable_dialogue/controllable_seq2seq/controls.py b/projects/controllable_dialogue/controllable_seq2seq/controls.py
new file mode 100644
--- /dev/null
+++ b/projects/controllable_dialogue/controllable_seq2seq/controls.py
@@ -0,0 +1,928 @@
+#!/usr/bin/env python3
+
+# Copyright (c) Facebook, Inc. and its affiliates.
+# This source code is licensed under the MIT license found in the
+# LICENSE file in the root directory of this source tree.
+
+"""
+This file contains the main code for running CT and WD controlled models.
+"""
+
+import torch
+import numpy as np
+from parlai.core.build_data import modelzoo_path
+from projects.controllable_dialogue.tasks.build import build
+from .stopwords import STOPWORDS
+from .nidf import load_word2nidf
+from .arora import SentenceEmbedder, load_arora
+
+
+# Interrogative words, used to control question-asking via weighted decoding
+# From https://en.wikipedia.org/wiki/Interrogative_word
+QN_WORDS = ['who', 'what', 'where', 'why', 'when', 'how', 'which', 'whom', 'whose', '?']
+
+
+# ========================================
+# LOADING NIDF MEASURES
+# ========================================
+
+class NIDFFeats(object):
+ """
+ An object to hold a vector containing the NIDF values for all words in the
+ vocabulary. The vector is contstructed when first needed.
+ """
+
+ def __init__(self):
+ self.NIDF_FEATS = None # will be vector length vocab_size containing NIDF vals
+
+ def make_feat_vec(self, dict):
+ """
+ Construct the NIDF feature vector for the given dict.
+ """
+ print("Constructing NIDF feature vector...")
+ self.NIDF_FEATS = torch.zeros((len(dict)))
+ num_oovs = 0
+ for idx in range(len(dict)):
+ word = dict[idx]
+ if word in word2nidf:
+ # Leave emoji (these appear in Twitter dataset) as NIDF=0
+ # (so we don't encourage emoji when we set WD weight high for NIDF)
+ if word[0] == '@' and word[-1] == '@':
+ continue
+ nidf = word2nidf[word] # between 0 and 1
+ self.NIDF_FEATS[idx] = nidf
+ else:
+ # print("WARNING: word %s has no NIDF; marking it as NIDF=0" % word)
+ num_oovs += 1 # If we don't have NIDF for this word, set as 0
+ print('Done constructing NIDF feature vector; of %i words in dict there '
+ 'were %i words with unknown NIDF; they were marked as NIDF=0.'
+ % (len(dict), num_oovs))
+
+ def get_feat_vec(self, dict):
+ """
+ Return the NIDF feature vector. If necessary, construct it first.
+ """
+ if self.NIDF_FEATS is None:
+ self.make_feat_vec(dict)
+ return self.NIDF_FEATS
+
+
+word2nidf = None
+nidf_feats = None
+arora_data = None
+sent_embedder = None
+
+
+def initialize_control_information(opt, build_task=True):
+ """
+ Loads information from word2count.pkl, arora.pkl in data/controllable_dialogue, and
+ uses it to initialize objects for computing NIDF and response-relatedness controls.
+
+ By default (build_task=True) we will also build the controllable_dialogue task i.e.
+ download data/controllable_dialogue if necessary.
+ """
+ global word2nidf, nidf_feats, arora_data, sent_embedder
+
+ if word2nidf is not None:
+ # already loaded, no need to do anything
+ return
+
+ if build_task:
+ build(opt)
+
+ print("Loading up controllable features...")
+ word2nidf = load_word2nidf(opt) # get word2nidf dict
+ nidf_feats = NIDFFeats() # init the NIDFFeats object
+ # load info for arora sentence embeddings
+ arora_data = load_arora(opt)
+ sent_embedder = SentenceEmbedder(
+ arora_data['word2prob'],
+ arora_data['arora_a'],
+ arora_data['glove_name'],
+ arora_data['glove_dim'],
+ arora_data['first_sv'],
+ glove_cache=modelzoo_path(opt['datapath'], 'models:glove_vectors'),
+ )
+
+
+# ========================================
+# UTIL
+# ========================================
+
+def flatten(list_of_lists):
+ """Flatten a list of lists"""
+ return [item for sublist in list_of_lists for item in sublist]
+
+
+def intrep_frac(lst):
+ """Returns the fraction of items in the list that are repeated"""
+ if len(lst) == 0:
+ return 0
+ num_rep = 0
+ for idx in range(len(lst)):
+ if lst[idx] in lst[:idx]:
+ num_rep += 1
+ return num_rep/len(lst)
+
+
+def extrep_frac(lst1, lst2):
+ """Returns the fraction of items in lst1 that are in lst2"""
+ if len(lst1) == 0:
+ return 0
+ num_rep = len([x for x in lst1 if x in lst2])
+ return num_rep/len(lst1)
+
+
+def get_ngrams(text, n):
+ """Returns all ngrams that are in the text.
+ Inputs:
+ text: string
+ n: int
+ Returns:
+ list of strings (each is a ngram)
+ """
+ tokens = text.split()
+ return [" ".join(tokens[i:i+n]) for i in range(len(tokens)-(n-1))] # list of str
+
+
+def matching_ngram_completions(comparison_seq, hypothesis, n):
+ """
+ Return the list of words that if appended to hypothesis, would create a n-gram that
+ already exists in comparison_seq. For efficiency, this function represents words
+ as integers not strings.
+
+ Inputs:
+ comparison_seq: list of integers
+ hypothesis: list of integers or None
+ n: integer
+
+ Output:
+ bad_words: list of integers
+ """
+ if hypothesis is None or len(hypothesis) < n-1 or len(comparison_seq) < n:
+ return []
+ hypothesis = [int(i) for i in hypothesis] # cast to list of ints
+ comparison_seq = [int(i) for i in comparison_seq] # cast to list of ints
+ n_minus_1_gram = hypothesis[-(n-1):] # list of ints length n-1
+ bad_words = [comparison_seq[i] for i in range(n-1, len(comparison_seq))
+ if comparison_seq[i-(n-1):i] == n_minus_1_gram] # list of ints
+ return bad_words
+
+
+# ========================================
+# WEIGHTED DECODING FEATURE FUNCTIONS
+# These functions compute the decoding features for weighted decoding.
+#
+# Given a conversational history and a hypothesis (i.e. a partially generated response),
+# these functions update the weighted decoding feature vector (of length vocab_size) by
+# adding the new decoding feature, multiplied by its corresponding weight.
+#
+# All these functions have the following inputs and outputs:
+#
+# Inputs:
+# dict: parlai DictionaryAgent
+# hypothesis: a list of integers. This is the partially generated response,
+# represented via word indices.
+# history: a ConvAI2History. This represents the conversation history.
+# wt: a float. This is the weight for the weighted decoding feature.
+# feat: a vector length vocab_size. This will ultimately contain the sum of all the
+# weighted decoding features.
+# Output:
+# feat: a vector length vocab_size. This is the feature vector, now with the new
+# weighted decoding feature added (multiplied by wt).
+# ========================================
+
+def intrep_word_used_before(dict, hypothesis, history, wt, feat, remove_stopwords):
+ """
+ Weighted decoding feature function. See explanation above.
+ This feature is 1 for words that have already appeared within the hypothesis,
+ 0 otherwise.
+
+ Additional inputs:
+ remove_stopwords: bool. If True, stopwords are not included when identifying words
+ that have already appeared.
+ """
+ if hypothesis is not None:
+ if remove_stopwords:
+ hypothesis = [idx for idx in hypothesis if dict[idx] not in STOPWORDS]
+ if len(hypothesis) > 0:
+ feat[hypothesis] += wt
+ return feat
+
+
+def intrep_ngram_used_before(dict, hypothesis, history, wt, feat, n):
+ """
+ Weighted decoding feature function. See explanation above.
+ This feature is 1 for words that, if added to the hypothesis, will create a n-gram
+ that has already appeared in the hypothesis; otherwise 0.
+
+ Additional inputs:
+ n: int, the size of the n-grams considered.
+ """
+ if hypothesis is not None:
+ bad_words = matching_ngram_completions(hypothesis, hypothesis, n)
+ if len(bad_words) > 0:
+ feat[bad_words] += wt
+ return feat
+
+
+def extrep_word_used_before(dict, hypothesis, history, wt, feat, remove_stopwords,
+ person):
+ """
+ Weighted decoding feature function. See explanation above.
+ This feature is 1 for words that have already been used earlier in the conversation;
+ otherwise 0.
+
+ Additional inputs:
+ remove_stopwords: bool. If True, stopwords are not included when identifying words
+ that have already appeared.
+ person: If 'self', identify words that have already been used by self (bot).
+ If 'partner', identify words that have already been used by partner (human).
+ """
+ if person == 'self':
+ prev_utts = history.own_utts
+ elif person == 'partner':
+ prev_utts = history.partner_utts
+ else:
+ raise ValueError("person must be 'self' or 'partner', but it is: ", person)
+ if len(prev_utts) == 0:
+ return feat
+ prev_words = [dict.txt2vec(utt) for utt in prev_utts] # list of list of ints
+ prev_words = list(set(flatten(prev_words))) # list of ints, no duplicates
+ if remove_stopwords:
+ prev_words = [idx for idx in prev_words if dict[idx] not in STOPWORDS]
+ if len(prev_words) > 0:
+ feat[prev_words] += wt
+ return feat
+
+
+def extrep_ngram_used_before(dict, hypothesis, history, wt, feat, n, person):
+ """
+ Weighted decoding feature function. See explanation above.
+ This feature is 1 for words that, if added to hypothesis, would create a n-gram that
+ has already been used earlier in the conversation; otherwise 0.
+
+ Additional inputs:
+ n: int, the size of the n-grams considered.
+ person: If 'self', identify n-grams that have already been used by self (bot).
+ If 'partner', identify n-grams that have already been used by partner (human).
+ """
+ if person == 'self':
+ prev_utts = history.own_utts
+ elif person == 'partner':
+ prev_utts = history.partner_utts
+ else:
+ raise ValueError("person must be 'self' or 'partner', but it is: ", person)
+ if len(prev_utts) == 0:
+ return feat
+ if hypothesis is None:
+ return feat
+ prev_utts_wordidx = [dict.txt2vec(utt) for utt in prev_utts] # list of list of ints
+ bad_words = [matching_ngram_completions(prev_utt, hypothesis, n)
+ for prev_utt in prev_utts_wordidx] # list of list of ints
+ bad_words = list(set(flatten(bad_words))) # list of ints, no duplicates
+ if len(bad_words) > 0:
+ feat[bad_words] += wt
+ return feat
+
+
+def nidf(dict, hypothesis, history, wt, feat):
+ """
+ Weighted decoding feature function. See explanation above.
+ This feature is equal to the NIDF (normalized inverse document frequency) score for
+ each word. The score is always between 0 and 1.
+ """
+ feat += wt*nidf_feats.get_feat_vec(dict)
+ return feat
+
+
+def qn_words(dict, hypothesis, history, wt, feat):
+ """
+ Weighted decoding feature function. See explanation above.
+ This feature is 1 for 'interrogative words', 0 otherwise.
+ """
+ qn_indices = [dict[w] for w in QN_WORDS]
+ feat[qn_indices] += wt
+ return feat
+
+
+def lastutt_sim_arora_word(dict, hypothesis, history, wt, feat):
+ """
+ Weighted decoding feature function. See explanation above.
+ Given a word w, this feature is equal to
+ cos_sim(word_emb(w), sent_emb(l))
+ the cosine similarity between the GloVe vector for word w, and the Arora-style
+ sentence embedding for the partner's last utterance l.
+ """
+ partner_utts = history.partner_utts
+ if len(partner_utts) == 0: # if bot goes first then do nothing
+ return feat
+ last_utt = partner_utts[-1] # string
+ if last_utt.strip().lower() == "__silence__": # if bot goes first then do nothing
+ return feat
+
+ # Get last_utt_emb, which is a tensor shape (glove_dim)
+ last_utt_emb = sent_embedder.embed_sent(dict.tokenize(last_utt))
+ if last_utt_emb is None:
+ return feat
+
+ # Get cosine similarities, which is a tensor shape (vocab_size)
+ sims = sent_embedder.get_word_sims(last_utt, last_utt_emb, dict)
+
+ feat += wt*sims
+ return feat
+
+
+# In this dictionary, the keys are the names of the WD features, and the values are
+# functions with inputs (dict, hypothesis, history, wt, feat), that update the feature
+# vector feat.
+WDFEATURE2UPDATEFN = {
+
+ # Use to reduce repeated words within an utterance. Not used in paper.
+ "intrep_word":
+ (lambda x: intrep_word_used_before(x[0], x[1], x[2], x[3], x[4],
+ remove_stopwords=False)),
+
+ # Use to reduce repeated non-stopwords within an utterance. intrep_unigram in paper.
+ "intrep_nonstopword":
+ (lambda x: intrep_word_used_before(x[0], x[1], x[2], x[3], x[4],
+ remove_stopwords=True)),
+
+ # Use to reduce repeated 2-grams within an utterance. intrep_bigram in paper.
+ "intrep_2gram":
+ (lambda x: intrep_ngram_used_before(x[0], x[1], x[2], x[3], x[4], n=2)),
+
+ # Use to reduce repeated 3-grams within an utterance. Not used in paper.
+ "intrep_3gram":
+ (lambda x: intrep_ngram_used_before(x[0], x[1], x[2], x[3], x[4], n=3)),
+
+ # Use to reduce repeating words already used in previous bot utterances.
+ # Not used in paper.
+ "extrep_word":
+ (lambda x: extrep_word_used_before(x[0], x[1], x[2], x[3], x[4],
+ remove_stopwords=False, person='self')),
+
+ # Use to reduce repeating non-stopwords already used in previous bot utterances.
+ # extrep_unigram in paper.
+ "extrep_nonstopword":
+ (lambda x: extrep_word_used_before(x[0], x[1], x[2], x[3], x[4],
+ remove_stopwords=True, person='self')),
+
+ # Use to reduce repeating 2-grams already used in previous bot utterances.
+ # extrep_bigram in paper.
+ "extrep_2gram":
+ (lambda x: extrep_ngram_used_before(x[0], x[1], x[2], x[3], x[4],
+ n=2, person='self')),
+
+ # Use to reduce repeating 3-grams already used in previous bot utterances.
+ # Not used in paper.
+ "extrep_3gram":
+ (lambda x: extrep_ngram_used_before(x[0], x[1], x[2], x[3], x[4],
+ n=3, person='self')),
+
+ # Use to reduce repeating words already used in previous partner utterances.
+ # Not used in paper.
+ "partnerrep_word":
+ (lambda x: extrep_word_used_before(x[0], x[1], x[2], x[3], x[4],
+ remove_stopwords=False, person='partner')),
+
+ # Use to reduce repeating non-stopwords already used in previous partner utterances.
+ # Not used in paper.
+ "partnerrep_nonstopword":
+ (lambda x: extrep_word_used_before(x[0], x[1], x[2], x[3], x[4],
+ remove_stopwords=True, person='partner')),
+
+ # Use to reduce repeating 2-grams already used in previous partner utterances.
+ # partnerrep_bigram in paper.
+ "partnerrep_2gram":
+ (lambda x: extrep_ngram_used_before(x[0], x[1], x[2], x[3], x[4],
+ n=2, person='partner')),
+
+ # Use to reduce repeating 3-grams already used in previous partner utterances.
+ # Not used in paper.
+ "partnerrep_3gram":
+ (lambda x: extrep_ngram_used_before(x[0], x[1], x[2], x[3], x[4],
+ n=3, person='partner')),
+
+ # Use to increase/decrease the probability of high-specificity (i.e. rare) words.
+ # This is the NIDF(w) weighted decoding feature mentioned in the paper.
+ "nidf":
+ (lambda x: nidf(x[0], x[1], x[2], x[3], x[4])),
+
+ # Use to increase/decrease the probability of interrogative (i.e. question) words.
+ # This is the is_qn_word(w) WD feature mentioned in the paper.
+ "question":
+ (lambda x: qn_words(x[0], x[1], x[2], x[3], x[4])),
+
+ # Use to increase/decrease the probability of words with high response-relatedness
+ # (i.e. similarity to the partner's last utterance).
+ # This is the resp_rel(w) WD feature mentioned in the paper.
+ "lastuttsim":
+ (lambda x: lastutt_sim_arora_word(x[0], x[1], x[2], x[3], x[4])),
+}
+
+
+def get_wd_features(dict, hypothesis, history, wd_features, wd_weights):
+ """
+ Given a conversational history and a hypothesis (i.e. partially generated response),
+ compute the Weighted Decoding features for all words in the vocabulary.
+
+ Inputs:
+ dict: parlai DictionaryAgent
+ hypothesis: list of ints or None
+ history: a ConvAI2History. This represents the conversation history.
+ wd_features: list of strings; the names of the WD features we want to use
+ wd_weights: list of floats; the weights corresponding to the WD features.
+ Returns:
+ wd_feat_vec: tensor shape (vocab_size), containing weighted sum of the feature
+ functions, for each candidate continuation word
+ """
+ wd_feat_vec = torch.zeros((len(dict)))
+ for f, w in zip(wd_features, wd_weights):
+ wd_feat_vec = WDFEATURE2UPDATEFN[f]((dict, hypothesis, history, w, wd_feat_vec))
+ return wd_feat_vec
+
+
+# ========================================
+# SENTENCE-LEVEL ATTRIBUTE FUNCTIONS
+# Given an input utterance, these functions compute the value of the controllable
+# attribute at the sentence level (more precisely, at the utterance level).
+#
+# All these functions have the following inputs and outputs:
+#
+# Inputs:
+# utt: a string, tokenized and lowercase
+# history: a ConvAI2History. This represents the conversation history.
+# Output:
+# score: float. the value of the controllable attribute for utt.
+# ========================================
+
+def intrep_repeated_word_frac(utt, history, remove_stopwords):
+ """
+ Sentence-level attribute function. See explanation above.
+ Returns the fraction of words in utt that are repeated.
+ Additional inputs:
+ remove_stopwords: bool. If True, stopwords are removed before counting repetition.
+ """
+ assert utt.strip() != ""
+ tokens = utt.split()
+ if remove_stopwords:
+ tokens = [t for t in tokens if t not in STOPWORDS]
+ return intrep_frac(tokens)
+
+
+def intrep_repeated_ngram_frac(utt, history, n):
+ """
+ Sentence-level attribute function. See explanation above.
+ Returns the fraction of n-grams in utt that are repeated.
+ Additional inputs:
+ n: int, the size of the n-grams considered.
+ """
+ assert utt.strip() != ""
+ ngrams = get_ngrams(utt, n)
+ return intrep_frac(ngrams)
+
+
+def extrep_repeated_word_frac(utt, history, remove_stopwords, person):
+ """
+ Sentence-level attribute function. See explanation above.
+ Returns the fraction of words in utt that already appeared in a previous utterance.
+ Additional inputs:
+ remove_stopwords: bool. If True, stopwords are removed from utt before counting
+ repetition.
+ person: If 'self', identify words that have already been used by self (bot).
+ If 'partner', identify words that have already been used by partner (human).
+ """
+ assert utt.strip() != ""
+ if person == 'self':
+ prev_utts = history.own_utts # should already be tokenized
+ elif person == 'partner':
+ prev_utts = history.partner_utts # should already be tokenized
+ else:
+ raise ValueError("person must be 'self' or 'partner', but it is: ", person)
+ if len(prev_utts) == 0:
+ return 0
+ tokens = utt.split() # list of strings
+ if remove_stopwords: # remove stopwords from utt
+ tokens = [t for t in tokens if t not in STOPWORDS]
+ prev_words = [s.split() for s in prev_utts] # list of list of ints
+ prev_words = list(set(flatten(prev_words))) # list of ints, no duplicates
+ return extrep_frac(tokens, prev_words)
+
+
+def extrep_repeated_ngram_frac(utt, history, n, person):
+ """
+ Sentence-level attribute function. See explanation above.
+ Returns fraction of n-grams in utt that already appeared in a previous utterance.
+ Additional inputs:
+ n: int, the size of the n-grams considered.
+ person: If 'self', identify n-grams that have already been used by self (bot).
+ If 'partner', identify n-grams that have already been used by partner (human).
+ """
+ assert utt.strip() != ""
+ if person == 'self':
+ prev_utts = history.own_utts # should already be tokenized
+ elif person == 'partner':
+ prev_utts = history.partner_utts # should already be tokenized
+ else:
+ raise ValueError("person must be 'self' or 'partner', but it is: ", person)
+ if len(prev_utts) == 0:
+ return 0
+ utt_ngrams = get_ngrams(utt, n)
+ prev_ngrams = [get_ngrams(prev, n) for prev in prev_utts] # list of list of strings
+ prev_ngrams = list(set(flatten(prev_ngrams))) # list of strings, no duplicates
+ return extrep_frac(utt_ngrams, prev_ngrams)
+
+
+def avg_nidf(utt, history):
+ """
+ Sentence-level attribute function. See explanation above.
+ Returns the mean NIDF of the words in utt.
+ """
+ words = utt.split()
+ problem_words = [w for w in words if w not in word2nidf]
+ ok_words = [w for w in words if w in word2nidf]
+ if len(ok_words) == 0:
+ print("WARNING: For all the words in the utterance '%s', we do not have the "
+ "NIDF score. Marking as avg_nidf=1." % utt)
+ return 1 # rarest possible sentence
+ nidfs = [word2nidf[w] for w in ok_words]
+ avg_nidf = sum(nidfs)/len(nidfs)
+ if len(problem_words) > 0:
+ print("WARNING: When calculating avg_nidf for the utterance '%s', we don't "
+ "know NIDF for the following words: %s" % (utt, str(problem_words)))
+ assert avg_nidf >= 0 and avg_nidf <= 1
+ return avg_nidf
+
+
+def contains_qmark(utt, history):
+ """
+ Sentence-level attribute function. See explanation above.
+ Returns 1 if utt contains a question mark, otherwise 0.
+ """
+ return int("?" in utt)
+
+
+def lastutt_sim_arora_sent(utt, history):
+ """
+ Sentence-level attribute function. See explanation above.
+
+ Returns
+ cos_sim(sent_emb(last_utt), sent_emb(utt))
+ the cosine similarity of the Arora-style sentence embeddings for the current
+ response (utt) and the partner's last utterance (last_utt, which is in history).
+
+ - If there is no last_utt (i.e. utt is the first utterance of the conversation),
+ returns None.
+ - If one or both of utt and last_utt are all-OOV; thus we can't compute sentence
+ embeddings, return the string 'oov'.
+ """
+ partner_utts = history.partner_utts
+ if len(partner_utts) == 0:
+ # print('WARNING: returning lastuttsim = None because bot goes first')
+ return None
+ last_utt = partner_utts[-1] # string
+ if "__SILENCE__" in last_utt:
+ assert last_utt.strip() == "__SILENCE__"
+ # print('WARNING: returning lastuttsim = None because bot goes first')
+ return None
+
+ # Get sentence embeddings. Here we're naively splitting last_utt and utt; this is
+ # fine given that we assume both utt and history are lowercase and tokenized.
+ # Both last_utt_emb and response_emb are tensors length glove_dim (or None)
+ last_utt_emb = sent_embedder.embed_sent(last_utt.split())
+ response_emb = sent_embedder.embed_sent(utt.split())
+ if last_utt_emb is None or response_emb is None:
+ return 'oov'
+
+ sim = torch.nn.functional.cosine_similarity(last_utt_emb, response_emb, dim=0)
+ return sim.item()
+
+
+def wordlist_frac(utt, history, word_list):
+ """
+ Sentence-level attribute function. See explanation above.
+ Returns the fraction of words in utt that are in word_list.
+ Additional inputs:
+ word_list: list of strings.
+ """
+ words = utt.split()
+ num_in_list = len([w for w in words if w in word_list])
+ return num_in_list/len(words)
+
+
+# In this dict, the keys are the names of the sentence-level attributes, and the values
+# are functions with input (utt, history), returning the attribute value measured on utt
+ATTR2SENTSCOREFN = {
+
+ # Proportion of words in utt that appear earlier in utt
+ "intrep_word":
+ (lambda x: intrep_repeated_word_frac(x[0], x[1], remove_stopwords=False)),
+
+ # Proportion of non-stopwords in utt that appear earlier in utt
+ "intrep_nonstopword":
+ (lambda x: intrep_repeated_word_frac(x[0], x[1], remove_stopwords=True)),
+
+ # Proportion of 2-grams in utt that appear earlier in utt
+ "intrep_2gram":
+ (lambda x: intrep_repeated_ngram_frac(x[0], x[1], n=2)),
+
+ # Proportion of 3-grams in utt that appear earlier in utt
+ "intrep_3gram":
+ (lambda x: intrep_repeated_ngram_frac(x[0], x[1], n=3)),
+
+ # Proportion of words in utt that appeared in a previous bot utterance
+ "extrep_word":
+ (lambda x: extrep_repeated_word_frac(x[0], x[1], remove_stopwords=False,
+ person='self')),
+
+ # Proportion of non-stopwords in utt that appeared in a previous bot utterance
+ "extrep_nonstopword":
+ (lambda x: extrep_repeated_word_frac(x[0], x[1], remove_stopwords=True,
+ person='self')),
+
+ # Proportion of 2-grams in utt that appeared in a previous bot utterance
+ "extrep_2gram":
+ (lambda x: extrep_repeated_ngram_frac(x[0], x[1], n=2, person='self')),
+
+ # Proportion of 3-grams in utt that appeared in a previous bot utterance
+ "extrep_3gram":
+ (lambda x: extrep_repeated_ngram_frac(x[0], x[1], n=3, person='self')),
+
+ # Proportion of words in utt that appeared in a previous partner utterance
+ "partnerrep_word":
+ (lambda x: extrep_repeated_word_frac(x[0], x[1], remove_stopwords=False,
+ person='partner')),
+
+ # Proportion of non-stopwords in utt that appeared in a previous partner utterance
+ "partnerrep_nonstopword":
+ (lambda x: extrep_repeated_word_frac(x[0], x[1], remove_stopwords=True,
+ person='partner')),
+
+ # Proportion of 2-grams in utt that appeared in a previous partner utterance
+ "partnerrep_2gram":
+ (lambda x: extrep_repeated_ngram_frac(x[0], x[1], n=2, person='partner')),
+
+ # Proportion of 3-grams in utt that appeared in a previous partner utterance
+ "partnerrep_3gram":
+ (lambda x: extrep_repeated_ngram_frac(x[0], x[1], n=3, person='partner')),
+
+ # Mean NIDF score of the words in utt
+ "avg_nidf":
+ (lambda x: avg_nidf(x[0], x[1])),
+
+ # 1 if utt contains '?', 0 otherwise
+ "question":
+ (lambda x: contains_qmark(x[0], x[1])),
+
+ # Proportion of words in utt that are interrogative words
+ "qn_words":
+ (lambda x: wordlist_frac(x[0], x[1], word_list=QN_WORDS)),
+
+ # Cosine similarity of utt to partner's last utterance
+ "lastuttsim":
+ (lambda x: lastutt_sim_arora_sent(x[0], x[1])),
+}
+
+
+def eval_attr(utt, history, attr):
+ """
+ Given a conversational history and an utterance, compute the requested
+ sentence-level attribute for utt.
+
+ Inputs:
+ utt: string. The utterance, tokenized and lowercase
+ history: a ConvAI2History. This represents the conversation history.
+ attr: string. The name of the sentence-level attribute.
+ Returns:
+ value: float. The value of the attribute for utt.
+ """
+ # Check everything is lowercased already
+ assert utt == utt.lower()
+ for thing in [history.persona_lines, history.partner_utts, history.own_utts]:
+ for line in thing:
+ if line != "__SILENCE__":
+ assert line == line.lower()
+
+ # Eval attribute
+ return ATTR2SENTSCOREFN[attr]((utt, history))
+
+
+# ========================================
+# GETTING CONTROL VARIABLE BUCKETS
+# For Conditional Training (CT) models, the code in this section allows us to determine
+# what bucket a given control variable value should go into.
+# ========================================
+
+def get_qn_bucket_probs():
+ """
+ Assuming we have 11 CT question buckets (0 to 10), compute P(bucket|question=1) and
+ P(bucket|question=0); this is needed so we can probabilistically assign incoming
+ training examples to buckets.
+
+ Returns:
+ prob_bucket_given_qn: list of floats length 11; P(bucket|question=1)
+ prob_bucket_given_notqn: list of floats length 11; P(bucket|question=0)
+ """
+ prob_qn = 41101/131438 # P(question=1), computed across ConvAI2 dataset. ~31%
+
+ # Compute P(bucket), i.e. the total sizes of the buckets.
+ # This is done by assuming that buckets 1 to 10 are equal in size, but bucket 0
+ # is larger because we have more non-questions than questions. Therefore:
+ # P(question=1) = P(bucket=1, question=1) + ... + P(bucket=10, question=1)
+ # = prob_bucket_n * 0.1 + ... + prob_bucket_n * 1
+ # = 5.5 * prob_bucket_n
+ # Thus we can derive the value for prob_bucket_n and prob_bucket_0:
+ prob_bucket_n = prob_qn/5.5 # P(bucket=n) for n=1,...,10
+ prob_bucket_0 = 1 - 10*prob_bucket_n # P(bucket=0)
+ prob_bucket = [prob_bucket_0] + [prob_bucket_n]*10 # list length 11, P(bucket)
+
+ # Compute P(bucket|qn=1) and P(bucket|qn=0) using Bayes Rule:
+ # P(bucket|qn=1) = P(bucket) * P(qn=1|bucket) / P(qn=1)
+ # P(bucket|qn=0) = P(bucket) * P(qn=0|bucket) / P(qn=0)
+ prob_bucket_given_qn = [pb * (i/10) / prob_qn
+ for i, pb in enumerate(prob_bucket)]
+ prob_bucket_given_notqn = [pb * ((10-i)/10) / (1-prob_qn)
+ for i, pb in enumerate(prob_bucket)]
+
+ return prob_bucket_given_qn, prob_bucket_given_notqn
+
+
+PROB_BUCKET_GIVEN_QN, PROB_BUCKET_GIVEN_NOTQN = get_qn_bucket_probs()
+
+
+def bucket_question(ex, ctrl, num_buckets):
+ """
+ Given an example (where the target response may or may not be a question) and its
+ history, probabilistically determine what question-asking CT bucket to use.
+
+ Inputs:
+ ex: message dictionary containing a bool field 'question'
+ ctrl: string. The name of the CT control. Should be 'question'.
+ num_buckets: int. The number of question-asking CT buckets. Assumed to be 11.
+ Returns:
+ out: int. bucket number.
+ """
+ assert num_buckets == 11
+ is_qn = int(ex['question'])
+ assert is_qn in [0, 1]
+ is_qn = bool(is_qn)
+ if is_qn: # Sample from P(bucket|qn=1)
+ out = np.random.choice(range(num_buckets), 1, p=PROB_BUCKET_GIVEN_QN)
+ else: # Sample from P(bucket|qn=0)
+ out = np.random.choice(range(num_buckets), 1, p=PROB_BUCKET_GIVEN_NOTQN)
+ out = int(out[0])
+ return out
+
+
+def sort_into_bucket(val, bucket_lbs):
+ """
+ Returns the highest bucket such that val >= lower bound for that bucket.
+
+ Inputs:
+ val: float. The value to be sorted into a bucket.
+ bucket_lbs: list of floats, sorted ascending.
+
+ Returns:
+ bucket_id: int in range(num_buckets); the bucket that val belongs to.
+ """
+ num_buckets = len(bucket_lbs)
+ for bucket_id in range(num_buckets-1, -1, -1): # iterate descending
+ lb = bucket_lbs[bucket_id]
+ if val >= lb:
+ return bucket_id
+ raise ValueError('val %f is not >= any of the lower bounds: %s' % (val, bucket_lbs))
+
+
+def bucket_contvar(ex, ctrl, num_buckets):
+ """
+ Given ex, which contains a continuous value for a particular control variable,
+ return the bucketed version of that control value.
+
+ Inputs:
+ ex: message dictionary. Assume it has key ctrl, mapping to the value.
+ ctrl: string. The name of the CT control.
+ num_buckets: int. The number of buckets for this control variable.
+ """
+ if ctrl not in ex.keys():
+ raise ValueError("Control %s not found in example. Available keys in "
+ "this example: %s" % (ctrl, ', '.join(ex.keys())))
+
+ # Get the control variable value
+ ctrl_val = ex[ctrl] # string. the value of the control variable for this example
+ if ctrl == 'avg_nidf':
+ ctrl_val = float(ctrl_val)
+ assert ctrl_val >= 0
+ assert ctrl_val <= 1
+ elif ctrl == 'lastuttsim':
+ if ctrl_val == 'None': # bot goes first in conversation
+ assert num_buckets == 11
+ return 10 # The last bucket is for when the bot goes first
+ else:
+ ctrl_val = float(ctrl_val)
+ assert ctrl_val >= -1
+ assert ctrl_val <= 1
+ else:
+ raise ValueError('Unexpected CT ctrl: %s' % ctrl)
+
+ # Get the bucket lowerbounds
+ bucket_lbs = CONTROL2BUCKETLBS[(ctrl, num_buckets)] # lst len num_buckets of floats
+ if ctrl == 'lastuttsim':
+ # The 'bot goes first' bucket 10 has no lower bound
+ assert len(bucket_lbs) == num_buckets-1
+ else:
+ assert len(bucket_lbs) == num_buckets
+
+ # Determine the correct bucket and return the bucket id
+ return sort_into_bucket(ctrl_val, bucket_lbs)
+
+
+# The default embedding size for CT control variable embeddings
+CONTROL2DEFAULTEMBSIZE = {
+ 'question': 10,
+ 'avg_nidf': 10,
+ 'lastuttsim': 10,
+}
+
+# The default number of buckets for CT control variables
+CONTROL2DEFAULTNUMBUCKETS = {
+ 'question': 11,
+ 'avg_nidf': 10,
+ 'lastuttsim': 11, # 11th bucket is for when the bot goes first in the conversation
+}
+
+# This dictionary maps from the name of a CT control variable, to a function that
+# takes (ex, ctrl, num_buckets) as input, and returns the correct bucket_id
+# for that control and this example.
+CONTROL2BUCKETINGFN = {
+ 'question': bucket_question,
+ 'avg_nidf': bucket_contvar,
+ 'lastuttsim': bucket_contvar,
+}
+
+# Bucket lowerbounds. These are produced using the get_bucket_lowerbounds.py script.
+AVG_NIDF_10BUCKET_LBS = [0.0, 0.1598414705378728, 0.17498045049881217,
+ 0.18658836637678175, 0.19671787445075514, 0.2070643776875113,
+ 0.2182630256396894, 0.23053753067016441, 0.24624559431359425,
+ 0.2707252238670671]
+LASTUTTSIM_10BUCKET_LBS = [-0.3870984613895416, -0.08026778697967529,
+ -0.025567850098013878, 0.019155802205204964,
+ 0.06262511014938354, 0.10953287780284882,
+ 0.16335178911685944, 0.2319537252187729,
+ 0.3283223509788513, 0.4921867549419403]
+
+
+# This dictionary maps from (CT control variable name, num_buckets) to the lower bounds
+CONTROL2BUCKETLBS = {
+ ('avg_nidf', 10): AVG_NIDF_10BUCKET_LBS,
+
+ # Note: For lastuttsim, the 11th bucket is for when the bot goes first; it doesn't
+ # have a LB but it does have an embedding.
+ ('lastuttsim', 11): LASTUTTSIM_10BUCKET_LBS,
+}
+
+
+def get_ctrl_vec(exs, history, control_settings):
+ """
+ Given a batch of examples with given history, return the bucketed CT control values.
+ This is used both when training and evaluating CT systems.
+
+ Inputs:
+ exs: list length batch_size of message dictionaries. Each dictionary contains
+ a 'text' field, and a field for each CT control we're using, along with the
+ value of the CT control variable.
+ history: list length batch_size of ConvAI2History objects. These represent the
+ conversation history.
+ control_settings: dictionary containing info about CT controls.
+ See ControllableSeq2seqAgent.control_settings.
+
+ Returns:
+ ctrl_vec: torch Tensor shape (batch_size, num_controls), with the bucketed values
+ for the CT controls we're using. If there's no CT controls, return None.
+ """
+ if len(control_settings) == 0:
+ return None
+
+ # ctrl_vec is shape (bsz, num_controls) filled with -1's
+ ctrl_vec = -torch.ones((len(exs), len(control_settings))).long()
+
+ for batch_idx, (ex, hist) in enumerate(zip(exs, history)):
+ for ctrl, ctrl_info in control_settings.items():
+ if ctrl not in ex:
+ raise ValueError("The CT control '%s' is not present as a key in the "
+ "message dictionary:\n%s\nIf training a CT model, "
+ "perhaps your training data is missing the "
+ "annotations. If talking interactively, perhaps you "
+ "forgot to set --set-controls." % (ctrl, str(ex)))
+ set_val = ctrl_info['set_value'] # is either int or None
+ if set_val is not None: # if we're using some preset bucket for this ctrl
+ bucket = set_val # override with set_val, an int
+ else: # bucket the control val given in ex
+ num_buckets = ctrl_info['num_buckets']
+ bucketing_fn = CONTROL2BUCKETINGFN[ctrl] # bucketing fn for this ctrl
+ bucket = bucketing_fn(ex, ctrl, num_buckets) # int
+
+ # If we have multiple CT controls, ctrl_idx tells us which order they go in
+ ctrl_idx = ctrl_info['idx'] # int
+ ctrl_vec[batch_idx, ctrl_idx] = bucket
+ return ctrl_vec
diff --git a/projects/controllable_dialogue/controllable_seq2seq/modules.py b/projects/controllable_dialogue/controllable_seq2seq/modules.py
new file mode 100644
--- /dev/null
+++ b/projects/controllable_dialogue/controllable_seq2seq/modules.py
@@ -0,0 +1,768 @@
+#!/usr/bin/env python3
+
+# Copyright (c) Facebook, Inc. and its affiliates.
+# This source code is licensed under the MIT license found in the
+# LICENSE file in the root directory of this source tree.
+
+"""
+This file is derived from parlai/core/seq2seq/modules.py, and adapted to handle
+CT controlled models.
+"""
+
+import math
+import torch
+import torch.nn as nn
+from torch.nn.parameter import Parameter
+from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence
+import torch.nn.functional as F
+
+from parlai.core.utils import NEAR_INF
+
+
+def opt_to_kwargs(opt):
+ """Get kwargs for seq2seq from opt."""
+ kwargs = {}
+ for k in ['numlayers', 'dropout', 'bidirectional', 'rnn_class',
+ 'lookuptable', 'decoder', 'numsoftmax',
+ 'attention', 'attention_length', 'attention_time',
+ 'input_dropout', 'control_settings']:
+ if k in opt:
+ kwargs[k] = opt[k]
+ return kwargs
+
+
+def pad(tensor, length, dim=0, pad=0):
+ """Pad tensor to a specific length.
+
+ :param tensor: vector to pad
+ :param length: new length
+ :param dim: (default 0) dimension to pad
+
+ :returns: padded tensor if the tensor is shorter than length
+ """
+ if tensor.size(dim) < length:
+ return torch.cat(
+ [tensor, tensor.new(*tensor.size()[:dim],
+ length - tensor.size(dim),
+ *tensor.size()[dim + 1:]).fill_(pad)],
+ dim=dim)
+ else:
+ return tensor
+
+
+class Seq2seq(nn.Module):
+ """Sequence to sequence parent module."""
+
+ RNN_OPTS = {'rnn': nn.RNN, 'gru': nn.GRU, 'lstm': nn.LSTM}
+
+ def __init__(
+ self, num_features, embeddingsize, hiddensize, numlayers=2, dropout=0,
+ bidirectional=False, rnn_class='lstm', lookuptable='unique',
+ decoder='same', numsoftmax=1,
+ attention='none', attention_length=48, attention_time='post',
+ padding_idx=0, start_idx=1, unknown_idx=3, input_dropout=0,
+ longest_label=1, control_settings={}
+ ):
+ """Initialize seq2seq model.
+
+ See cmdline args in Seq2seqAgent for description of arguments.
+ """
+ super().__init__()
+ self.attn_type = attention
+
+ self.NULL_IDX = padding_idx
+ self.register_buffer('START', torch.LongTensor([start_idx]))
+ self.longest_label = longest_label
+
+ rnn_class = Seq2seq.RNN_OPTS[rnn_class]
+ self.decoder = RNNDecoder(
+ num_features, embeddingsize, hiddensize,
+ padding_idx=padding_idx, rnn_class=rnn_class,
+ numlayers=numlayers, dropout=dropout,
+ attn_type=attention, attn_length=attention_length,
+ attn_time=attention_time,
+ bidir_input=bidirectional,
+ control_settings=control_settings)
+
+ shared_lt = (self.decoder.lt # share embeddings between rnns
+ if lookuptable in ('enc_dec', 'all') else None)
+ shared_rnn = self.decoder.rnn if decoder == 'shared' else None
+ self.encoder = RNNEncoder(
+ num_features, embeddingsize, hiddensize,
+ padding_idx=padding_idx, rnn_class=rnn_class,
+ numlayers=numlayers, dropout=dropout,
+ bidirectional=bidirectional,
+ shared_lt=shared_lt, shared_rnn=shared_rnn,
+ unknown_idx=unknown_idx, input_dropout=input_dropout)
+
+ shared_weight = (self.decoder.lt.weight # use embeddings for projection
+ if lookuptable in ('dec_out', 'all') else None)
+ self.output = OutputLayer(
+ num_features, embeddingsize, hiddensize, dropout=dropout,
+ numsoftmax=numsoftmax, shared_weight=shared_weight,
+ padding_idx=padding_idx)
+
+ def _encode(self, xs, prev_enc=None):
+ """Encode the input or return cached encoder state."""
+ if prev_enc is not None:
+ return prev_enc
+ else:
+ return self.encoder(xs)
+
+ def _starts(self, bsz):
+ """Return bsz start tokens."""
+ return self.START.detach().expand(bsz, 1)
+
+ def _decode_forced(self, ys, ctrl_inputs, encoder_states):
+ """Decode with teacher forcing."""
+ bsz = ys.size(0)
+ seqlen = ys.size(1)
+
+ hidden = encoder_states[1]
+ attn_params = (encoder_states[0], encoder_states[2])
+
+ # input to model is START + each target except the last
+ y_in = ys.narrow(1, 0, seqlen - 1)
+ xs = torch.cat([self._starts(bsz), y_in], 1)
+
+ scores = []
+ if self.attn_type == 'none':
+ # do the whole thing in one go
+ output, hidden = self.decoder(xs, ctrl_inputs, hidden, attn_params)
+ score = self.output(output)
+ scores.append(score)
+ else:
+ # need to feed in one token at a time so we can do attention
+ for i in range(seqlen):
+ xi = xs.select(1, i).unsqueeze(1)
+ output, hidden = self.decoder(xi, ctrl_inputs, hidden, attn_params)
+ score = self.output(output)
+ scores.append(score)
+
+ scores = torch.cat(scores, 1)
+ return scores
+
+ def _decode(self, ctrl_inputs, encoder_states, maxlen):
+ """Decode maxlen tokens."""
+ hidden = encoder_states[1]
+ attn_params = (encoder_states[0], encoder_states[2])
+ bsz = encoder_states[0].size(0)
+
+ xs = self._starts(bsz) # input start token
+
+ scores = []
+ for _ in range(maxlen):
+ # generate at most longest_label tokens
+ output, hidden = self.decoder(xs, ctrl_inputs, hidden, attn_params)
+ score = self.output(output)
+ scores.append(score)
+ xs = score.max(2)[1] # next input is current predicted output
+
+ scores = torch.cat(scores, 1)
+ return scores
+
+ def _align_inds(self, encoder_states, cand_inds):
+ """Select the encoder states relevant to valid candidates."""
+ enc_out, hidden, attn_mask = encoder_states
+
+ # LSTM or GRU/RNN hidden state?
+ if isinstance(hidden, torch.Tensor):
+ hid, cell = hidden, None
+ else:
+ hid, cell = hidden
+
+ if len(cand_inds) != hid.size(1):
+ # if the number of candidates is mismatched from the number of
+ # hidden states, we throw out the hidden states we won't rank with
+ cand_indices = hid.new(cand_inds)
+ hid = hid.index_select(1, cand_indices)
+ if cell is None:
+ hidden = hid
+ else:
+ cell = cell.index_select(1, cand_indices)
+ hidden = (hid, cell)
+
+ if self.attn_type != 'none':
+ enc_out = enc_out.index_select(0, cand_indices)
+ attn_mask = attn_mask.index_select(0, cand_indices)
+
+ return enc_out, hidden, attn_mask
+
+ def _extract_cur(self, encoder_states, index, num_cands):
+ """Extract encoder states at current index and expand them."""
+ enc_out, hidden, attn_mask = encoder_states
+ if isinstance(hidden, torch.Tensor):
+ cur_hid = (hidden.select(1, index).unsqueeze(1)
+ .expand(-1, num_cands, -1))
+ else:
+ cur_hid = (hidden[0].select(1, index).unsqueeze(1)
+ .expand(-1, num_cands, -1).contiguous(),
+ hidden[1].select(1, index).unsqueeze(1)
+ .expand(-1, num_cands, -1).contiguous())
+
+ cur_enc, cur_mask = None, None
+ if self.attn_type != 'none':
+ cur_enc = (enc_out[index].unsqueeze(0)
+ .expand(num_cands, -1, -1))
+ cur_mask = (attn_mask[index].unsqueeze(0)
+ .expand(num_cands, -1))
+ return cur_enc, cur_hid, cur_mask
+
+ def _rank(self, cands, cand_inds, encoder_states):
+ """Rank each cand by the average log-probability of the sequence."""
+ if cands is None:
+ return None
+ encoder_states = self._align_inds(encoder_states, cand_inds)
+
+ cand_scores = []
+ for batch_idx in range(len(cands)):
+ # we do one set of candidates at a time
+ curr_cs = cands[batch_idx]
+ num_cands = curr_cs.size(0)
+
+ # select just the one hidden state
+ cur_enc_states = self._extract_cur(
+ encoder_states, batch_idx, num_cands)
+
+ score = self._decode_forced(curr_cs, None, cur_enc_states)
+ true_score = F.log_softmax(score, dim=2).gather(
+ 2, curr_cs.unsqueeze(2))
+ nonzero = curr_cs.ne(0).float()
+ scores = (true_score.squeeze(2) * nonzero).sum(1)
+ seqlens = nonzero.sum(1)
+ scores /= seqlens
+ cand_scores.append(scores)
+
+ max_len = max(len(c) for c in cand_scores)
+ cand_scores = torch.cat(
+ [pad(c, max_len, pad=self.NULL_IDX).unsqueeze(0)
+ for c in cand_scores], 0)
+ return cand_scores
+
+ def forward(self, xs, ctrl_inputs=None, ys=None, cands=None,
+ prev_enc=None, maxlen=None, seq_len=None):
+ """Get output predictions from the model.
+
+ :param xs: (bsz x seqlen) LongTensor input to the encoder
+ :param ys: expected output from the decoder. used for teacher
+ forcing to calculate loss.
+ :param cands: set of candidates to rank
+ :param prev_enc: if you know you'll pass in the same xs multiple
+ times, you can pass in the encoder output from the
+ last forward pass to skip recalcuating the same
+ encoder output.
+ :param maxlen: max number of tokens to decode. if not set, will
+ use the length of the longest label this model
+ has seen. ignored when ys is not None.
+ :param seq_len this is the sequence length of the input (xs), i.e.
+ xs.size(1). we use this to recover the proper
+ output sizes in the case when we distribute over
+ multiple gpus
+ :param ctrl_inputs: (bsz x num_controls) LongTensor containing control vars
+
+ :returns: scores, candidate scores, and encoder states
+ scores contains the model's predicted token scores.
+ (bsz x seqlen x num_features)
+ candidate scores are the score the model assigned to each candidate
+ (bsz x num_cands)
+ encoder states are the (output, hidden, attn_mask) states from the
+ encoder. feed this back in to skip encoding on the next call.
+ """
+ if ys is not None:
+ # keep track of longest label we've ever seen
+ # we'll never produce longer ones than that during prediction
+ self.longest_label = max(self.longest_label, ys.size(1))
+
+ encoder_states = self._encode(xs, prev_enc)
+
+ # rank candidates if they are available
+ cand_scores = None
+ if cands is not None:
+ cand_inds = [i for i in range(cands.size(0))]
+ cand_scores = self._rank(cands, cand_inds, encoder_states)
+
+ if ys is not None:
+ # use teacher forcing
+ scores = self._decode_forced(ys, ctrl_inputs, encoder_states)
+ else:
+ scores = self._decode(ctrl_inputs, encoder_states,
+ maxlen or self.longest_label)
+
+ if seq_len is not None:
+ # when using multiple gpus, we need to make sure output of
+ # encoder is correct size for gathering; we recover this with
+ # the parameter seq_len
+ if encoder_states[0].size(1) < seq_len:
+ out_pad_tensor = torch.zeros(
+ encoder_states[0].size(0),
+ seq_len - encoder_states[0].size(1),
+ encoder_states[0].size(2)
+ ).cuda()
+ new_out = torch.cat([encoder_states[0], out_pad_tensor], 1)
+ encoder_states = (new_out, encoder_states[1], encoder_states[2])
+
+ return scores, cand_scores, encoder_states
+
+
+class ControlEncoder(nn.Module):
+ """
+ Given CT control variable inputs, gives the concatenated control embeddings vector.
+ """
+
+ def __init__(self, control_settings):
+ super().__init__()
+ self.control_settings = control_settings # see ControllableSeq2seqAgent
+
+ # int to string mapping giving the canonical ordering of the controls
+ self.idx2ctrl = {d['idx']: control for control, d in
+ self.control_settings.items()}
+
+ # Initialize control embeddings
+ self.control_embeddings = nn.ModuleDict({
+ c: nn.Embedding(d['num_buckets'], d['embsize'], sparse=False)
+ for c, d in control_settings.items()
+ }) # maps from string (ctrl name) to nn.Embedding
+
+ def forward(self, control_inputs):
+ """
+ Inputs:
+ :param control_inputs: (bsz x num_control_vars) LongTensor of control
+ variable values (i.e. bucket ids)
+
+ Outputs:
+ :returns control_embs: (bsz x sum of control emb sizes) FloatTensor of
+ control variable embeddings, concatenated
+ """
+ # list length num_control_vars of tensors shape (bsz, 1)
+ control_inputs = torch.split(control_inputs, 1, dim=1)
+
+ # list length num_control_vars of tensors shape (bsz)
+ control_inputs = [torch.squeeze(t, 1) for t in control_inputs]
+
+ assert len(control_inputs) == len(self.control_settings)
+
+ control_embs = []
+ for idx, inputs in enumerate(control_inputs):
+ control = self.idx2ctrl[idx] # string, the ctrl var for this position
+ # Append a tensor shape (bsz, control embedding size)
+ control_embs.append(self.control_embeddings[control](inputs))
+ control_embs = torch.cat(control_embs, dim=1) # shape (bsz, sum of ctrl embszs)
+
+ return control_embs
+
+
+class UnknownDropout(nn.Module):
+ """With set frequency, replaces tokens with unknown token.
+
+ This layer can be used right before an embedding layer to make the model
+ more robust to unknown words at test time.
+ """
+
+ def __init__(self, unknown_idx, probability):
+ """Initialize layer.
+
+ :param unknown_idx: index of unknown token, replace tokens with this
+ :param probability: during training, replaces tokens with unknown token
+ at this rate.
+ """
+ super().__init__()
+ self.unknown_idx = unknown_idx
+ self.prob = probability
+
+ def forward(self, input):
+ """If training and dropout rate > 0, masks input with unknown token."""
+ if self.training and self.prob > 0:
+ mask = input.new(input.size()).float().uniform_(0, 1) < self.prob
+ input.masked_fill_(mask, self.unknown_idx)
+ return input
+
+
+class RNNEncoder(nn.Module):
+ """RNN Encoder."""
+
+ def __init__(self, num_features, embeddingsize, hiddensize,
+ padding_idx=0, rnn_class='lstm', numlayers=2, dropout=0.1,
+ bidirectional=False, shared_lt=None, shared_rnn=None,
+ input_dropout=0, unknown_idx=None, sparse=False):
+ """Initialize recurrent encoder."""
+ super().__init__()
+
+ self.dropout = nn.Dropout(p=dropout)
+ self.layers = numlayers
+ self.dirs = 2 if bidirectional else 1
+ self.hsz = hiddensize
+
+ if input_dropout > 0 and unknown_idx is None:
+ raise RuntimeError('input_dropout > 0 but unknown_idx not set')
+ self.input_dropout = UnknownDropout(unknown_idx, input_dropout)
+
+ if shared_lt is None:
+ self.lt = nn.Embedding(num_features, embeddingsize,
+ padding_idx=padding_idx,
+ sparse=sparse)
+ else:
+ self.lt = shared_lt
+
+ if shared_rnn is None:
+ self.rnn = rnn_class(embeddingsize, hiddensize, numlayers,
+ dropout=dropout if numlayers > 1 else 0,
+ batch_first=True, bidirectional=bidirectional)
+ elif bidirectional:
+ raise RuntimeError('Cannot share decoder with bidir encoder.')
+ else:
+ self.rnn = shared_rnn
+
+ def forward(self, xs):
+ """Encode sequence.
+
+ :param xs: (bsz x seqlen) LongTensor of input token indices
+
+ :returns: encoder outputs, hidden state, attention mask
+ encoder outputs are the output state at each step of the encoding.
+ the hidden state is the final hidden state of the encoder.
+ the attention mask is a mask of which input values are nonzero.
+ """
+ bsz = len(xs)
+
+ # embed input tokens
+ xs = self.input_dropout(xs)
+ xes = self.dropout(self.lt(xs))
+ attn_mask = xs.ne(0)
+ try:
+ x_lens = torch.sum(attn_mask.int(), dim=1)
+ xes = pack_padded_sequence(xes, x_lens, batch_first=True)
+ packed = True
+ except ValueError:
+ # packing failed, don't pack then
+ packed = False
+
+ encoder_output, hidden = self.rnn(xes)
+ if packed:
+ encoder_output, _ = pad_packed_sequence(encoder_output,
+ batch_first=True)
+ if self.dirs > 1:
+ # project to decoder dimension by taking sum of forward and back
+ if isinstance(self.rnn, nn.LSTM):
+ hidden = (hidden[0].view(-1, self.dirs, bsz, self.hsz).sum(1),
+ hidden[1].view(-1, self.dirs, bsz, self.hsz).sum(1))
+ else:
+ hidden = hidden.view(-1, self.dirs, bsz, self.hsz).sum(1)
+
+ return encoder_output, hidden, attn_mask
+
+
+class RNNDecoder(nn.Module):
+ """Recurrent decoder module.
+
+ Can be used as a standalone language model or paired with an encoder.
+ """
+
+ def __init__(self, num_features, embeddingsize, hiddensize,
+ padding_idx=0, rnn_class='lstm', numlayers=2, dropout=0.1,
+ bidir_input=False, attn_type='none', attn_time='pre',
+ attn_length=-1, sparse=False, control_settings={}):
+ """Initialize recurrent decoder."""
+ super().__init__()
+ self.dropout = nn.Dropout(p=dropout)
+ self.layers = numlayers
+ self.hsz = hiddensize
+ self.esz = embeddingsize
+
+ self.lt = nn.Embedding(num_features, embeddingsize,
+ padding_idx=padding_idx, sparse=sparse)
+
+ # The inputsize is expanded to accommodate the CT embeddings
+ inputsize = embeddingsize + sum([d['embsize']
+ for d in control_settings.values()])
+ self.rnn = rnn_class(inputsize, hiddensize, numlayers,
+ dropout=dropout if numlayers > 1 else 0,
+ batch_first=True)
+
+ self.attn_type = attn_type
+ self.attn_time = attn_time
+ self.attention = AttentionLayer(attn_type=attn_type,
+ hiddensize=hiddensize,
+ embeddingsize=embeddingsize,
+ bidirectional=bidir_input,
+ attn_length=attn_length,
+ attn_time=attn_time)
+
+ self.control_encoder = ControlEncoder(control_settings=control_settings)
+
+ def forward(self, xs, ctrl_inputs=None, hidden=None, attn_params=None):
+ """Decode from input tokens.
+
+ :param xs: (bsz x seqlen) LongTensor of input token indices
+ :param ctrl_inputs: (bsz, num_controls) LongTensor
+ :param hidden: hidden state to feed into decoder. default (None)
+ initializes tensors using the RNN's defaults.
+ :param attn_params: (optional) tuple containing attention parameters,
+ default AttentionLayer needs encoder_output states
+ and attention mask (e.g. encoder_input.ne(0))
+
+ :returns: output state(s), hidden state.
+ output state of the encoder. for an RNN, this is
+ (bsz, seq_len, num_directions * hiddensize).
+ hidden state will be same dimensions as input
+ hidden state. for an RNN, this is a tensor of sizes
+ (bsz, numlayers * num_directions, hiddensize).
+ """
+ # sequence indices => sequence embeddings
+ xes = self.dropout(self.lt(xs)) # shape (bsz, seqlen, embsize)
+
+ # Concatenate the control embeddings
+ if ctrl_inputs is not None:
+ ctrl_embs = self.dropout(
+ self.control_encoder(ctrl_inputs)) # shape (bsz, sum of ctrl emb sizes)
+ ctrl_embs_tiled = ctrl_embs.unsqueeze(1).repeat(
+ 1, xes.size(1), 1) # shape (bsz, seqlen, sum of ctrl emb sizes)
+ xes = torch.cat(
+ [xes, ctrl_embs_tiled], 2) # shape (bsz,seqlen,embsize+sum of ctrl embsz)
+
+ if self.attn_time == 'pre':
+ # modify input vectors with attention
+ xes, _attw = self.attention(xes, hidden, attn_params)
+
+ # feed tokens into rnn
+ output, new_hidden = self.rnn(xes, hidden)
+
+ if self.attn_time == 'post':
+ # modify output vectors with attention
+ output, _attw = self.attention(output, new_hidden, attn_params)
+
+ return output, new_hidden
+
+
+class OutputLayer(nn.Module):
+ """Takes in final states and returns distribution over candidates."""
+
+ def __init__(self, num_features, embeddingsize, hiddensize, dropout=0,
+ numsoftmax=1, shared_weight=None, padding_idx=-1):
+ """Initialize output layer.
+
+ :param num_features: number of candidates to rank
+ :param hiddensize: (last) dimension of the input vectors
+ :param embeddingsize: (last) dimension of the candidate vectors
+ :param numsoftmax: (default 1) number of softmaxes to calculate.
+ see arxiv.org/abs/1711.03953 for more info.
+ increasing this slows down computation but can
+ add more expressivity to the embeddings.
+ :param shared_weight: (num_features x esz) vector of weights to use as
+ the final linear layer's weight matrix. default
+ None starts with a new linear layer.
+ :param padding_idx: model should output a large negative number for
+ score at this index. if set to -1 (default),
+ this is disabled. if >= 0, subtracts one from
+ num_features and always outputs -1e20 at this
+ index. only used when shared_weight is not None.
+ setting this param helps protect gradient from
+ entering shared embedding matrices.
+ """
+ super().__init__()
+ self.dropout = nn.Dropout(p=dropout)
+
+ self.padding_idx = padding_idx if shared_weight is not None else -1
+
+ # embedding to scores
+ if shared_weight is None:
+ # just a regular linear layer
+ self.e2s = nn.Linear(embeddingsize, num_features, bias=True)
+ else:
+ # use shared weights and a bias layer instead
+ if padding_idx == 0:
+ num_features -= 1 # don't include padding
+ shared_weight = shared_weight.narrow(0, 1, num_features)
+ elif padding_idx > 0:
+ raise RuntimeError('nonzero pad_idx not yet implemented')
+ self.weight = Parameter(shared_weight)
+ self.bias = Parameter(torch.Tensor(num_features))
+ self.reset_parameters()
+ self.e2s = lambda x: F.linear(x, self.weight, self.bias)
+
+ self.numsoftmax = numsoftmax
+ if numsoftmax > 1:
+ self.esz = embeddingsize
+ self.softmax = nn.Softmax(dim=1)
+ self.prior = nn.Linear(hiddensize, numsoftmax, bias=False)
+ self.latent = nn.Linear(hiddensize, numsoftmax * embeddingsize)
+ self.activation = nn.Tanh()
+ else:
+ # rnn output to embedding
+ if hiddensize != embeddingsize:
+ # learn projection to correct dimensions
+ self.o2e = nn.Linear(hiddensize, embeddingsize, bias=True)
+ else:
+ # no need for any transformation here
+ self.o2e = lambda x: x
+
+ def reset_parameters(self):
+ """Reset bias param."""
+ if hasattr(self, 'bias'):
+ stdv = 1. / math.sqrt(self.bias.size(0))
+ self.bias.data.uniform_(-stdv, stdv)
+
+ def forward(self, input):
+ """Compute scores from inputs.
+
+ :param input: (bsz x seq_len x num_directions * hiddensize) tensor of
+ states, e.g. the output states of an RNN
+
+ :returns: (bsz x seqlen x num_cands) scores for each candidate
+ """
+ # next compute scores over dictionary
+ if self.numsoftmax > 1:
+ bsz = input.size(0)
+ seqlen = input.size(1) if input.dim() > 1 else 1
+
+ # first compute different softmax scores based on input vec
+ # hsz => numsoftmax * esz
+ latent = self.latent(input)
+ active = self.dropout(self.activation(latent))
+ # esz => num_features
+ logit = self.e2s(active.view(-1, self.esz))
+
+ # calculate priors: distribution over which softmax scores to use
+ # hsz => numsoftmax
+ prior_logit = self.prior(input).view(-1, self.numsoftmax)
+ # softmax over numsoftmax's
+ prior = self.softmax(prior_logit)
+
+ # now combine priors with logits
+ prob = self.softmax(logit).view(bsz * seqlen, self.numsoftmax, -1)
+ probs = (prob * prior.unsqueeze(2)).sum(1).view(bsz, seqlen, -1)
+ scores = probs.log()
+ else:
+ # hsz => esz, good time for dropout
+ e = self.dropout(self.o2e(input))
+ # esz => num_features
+ scores = self.e2s(e)
+
+ if self.padding_idx == 0:
+ pad_score = scores.new(scores.size(0),
+ scores.size(1),
+ 1).fill_(-NEAR_INF)
+ scores = torch.cat([pad_score, scores], dim=-1)
+
+ return scores
+
+
+class AttentionLayer(nn.Module):
+ """Computes attention between hidden and encoder states.
+
+ See arxiv.org/abs/1508.04025 for more info on each attention type.
+ """
+
+ def __init__(self, attn_type, hiddensize, embeddingsize,
+ bidirectional=False, attn_length=-1, attn_time='pre'):
+ """Initialize attention layer."""
+ super().__init__()
+ self.attention = attn_type
+
+ if self.attention != 'none':
+ hsz = hiddensize
+ hszXdirs = hsz * (2 if bidirectional else 1)
+ if attn_time == 'pre':
+ # attention happens on the input embeddings
+ input_dim = embeddingsize
+ elif attn_time == 'post':
+ # attention happens on the output of the rnn
+ input_dim = hsz
+ else:
+ raise RuntimeError('unsupported attention time')
+
+ # linear layer for combining applied attention weights with input
+ self.attn_combine = nn.Linear(hszXdirs + input_dim, input_dim,
+ bias=False)
+
+ if self.attention == 'local':
+ # local attention over fixed set of output states
+ if attn_length < 0:
+ raise RuntimeError('Set attention length to > 0.')
+ self.max_length = attn_length
+ # combines input and previous hidden output layer
+ self.attn = nn.Linear(hsz + input_dim, attn_length, bias=False)
+ # combines attention weights with encoder outputs
+ elif self.attention == 'concat':
+ self.attn = nn.Linear(hsz + hszXdirs, hsz, bias=False)
+ self.attn_v = nn.Linear(hsz, 1, bias=False)
+ elif self.attention == 'general':
+ # equivalent to dot if attn is identity
+ self.attn = nn.Linear(hsz, hszXdirs, bias=False)
+
+ def forward(self, xes, hidden, attn_params):
+ """Compute attention over attn_params given input and hidden states.
+
+ :param xes: input state. will be combined with applied
+ attention.
+ :param hidden: hidden state from model. will be used to select
+ states to attend to in from the attn_params.
+ :param attn_params: tuple of encoder output states and a mask showing
+ which input indices are nonzero.
+
+ :returns: output, attn_weights
+ output is a new state of same size as input state `xes`.
+ attn_weights are the weights given to each state in the
+ encoder outputs.
+ """
+ if self.attention == 'none':
+ # do nothing, no attention
+ return xes, None
+
+ if type(hidden) == tuple:
+ # for lstms use the "hidden" state not the cell state
+ hidden = hidden[0]
+ last_hidden = hidden[-1] # select hidden state from last RNN layer
+
+ enc_out, attn_mask = attn_params
+ bsz, seqlen, hszXnumdir = enc_out.size()
+ numlayersXnumdir = last_hidden.size(1)
+
+ if self.attention == 'local':
+ # local attention weights aren't based on encoder states
+ h_merged = torch.cat((xes.squeeze(1), last_hidden), 1)
+ attn_weights = F.softmax(self.attn(h_merged), dim=1)
+
+ # adjust state sizes to the fixed window size
+ if seqlen > self.max_length:
+ offset = seqlen - self.max_length
+ enc_out = enc_out.narrow(1, offset, self.max_length)
+ seqlen = self.max_length
+ if attn_weights.size(1) > seqlen:
+ attn_weights = attn_weights.narrow(1, 0, seqlen)
+ else:
+ hid = last_hidden.unsqueeze(1)
+ if self.attention == 'concat':
+ # concat hidden state and encoder outputs
+ hid = hid.expand(bsz, seqlen, numlayersXnumdir)
+ h_merged = torch.cat((enc_out, hid), 2)
+ # then do linear combination of them with activation
+ active = F.tanh(self.attn(h_merged))
+ attn_w_premask = self.attn_v(active).squeeze(2)
+ elif self.attention == 'dot':
+ # dot product between hidden and encoder outputs
+ if numlayersXnumdir != hszXnumdir:
+ # enc_out has two directions, so double hid
+ hid = torch.cat([hid, hid], 2)
+ enc_t = enc_out.transpose(1, 2)
+ attn_w_premask = torch.bmm(hid, enc_t).squeeze(1)
+ elif self.attention == 'general':
+ # before doing dot product, transform hidden state with linear
+ # same as dot if linear is identity
+ hid = self.attn(hid)
+ enc_t = enc_out.transpose(1, 2)
+ attn_w_premask = torch.bmm(hid, enc_t).squeeze(1)
+
+ # calculate activation scores, apply mask if needed
+ if attn_mask is not None:
+ # remove activation from NULL symbols
+ attn_w_premask.masked_fill_((1 - attn_mask), -NEAR_INF)
+ attn_weights = F.softmax(attn_w_premask, dim=1)
+
+ # apply the attention weights to the encoder states
+ attn_applied = torch.bmm(attn_weights.unsqueeze(1), enc_out)
+ # concatenate the input and encoder states
+ merged = torch.cat((xes.squeeze(1), attn_applied.squeeze(1)), 1)
+ # combine them with a linear layer and tanh activation
+ output = torch.tanh(self.attn_combine(merged).unsqueeze(1))
+
+ return output, attn_weights
diff --git a/projects/controllable_dialogue/controllable_seq2seq/nidf.py b/projects/controllable_dialogue/controllable_seq2seq/nidf.py
new file mode 100644
--- /dev/null
+++ b/projects/controllable_dialogue/controllable_seq2seq/nidf.py
@@ -0,0 +1,157 @@
+#!/usr/bin/env python3
+
+# Copyright (c) Facebook, Inc. and its affiliates.
+# This source code is licensed under the MIT license found in the
+# LICENSE file in the root directory of this source tree.
+
+"""
+This file contains code to compute NIDF measures, used for specificity control.
+"""
+
+from parlai.core.params import ParlaiParser
+from parlai.agents.repeat_label.repeat_label import RepeatLabelAgent
+from parlai.core.worlds import create_task
+from parlai.core.utils import TimeLogger
+from collections import Counter
+import os
+import math
+import pickle
+
+
+CONTROLLABLE_DIR = 'controllable_dialogue'
+PARLAI_FORMAT_DIR = 'controllable_dialogue/ConvAI2_parlaiformat'
+
+
+def get_word_counts(opt, count_inputs):
+ """Goes through the dataset specified in opt and gets word counts.
+
+ Inputs:
+ count_inputs: If True, include both input and reply when counting words
+ and utterances. Otherwise, only include reply text.
+
+ Returns:
+ word_counter_per_sent: a Counter mapping each word to the number of
+ utterances in which it appears.
+ num_sents: int. number of utterances counted
+ """
+ # Create repeat label agent and assign it to the specified task
+ agent = RepeatLabelAgent(opt)
+ world = create_task(opt, agent)
+
+ # Count word frequency for all words in dataset
+ word_counter_per_sent = Counter()
+ num_sents = 0
+ count = 0
+ log_timer = TimeLogger()
+ while True:
+ count += 1
+
+ world.parley()
+ reply = world.acts[0].get('labels', world.acts[0].get('eval_labels'))[0]
+
+ words = reply.split()
+ words_no_dups = list(set(words)) # remove duplicates
+ word_counter_per_sent.update(words_no_dups)
+ num_sents += 1
+
+ # Optionally count words in input text
+ if count_inputs:
+ input = world.acts[0]['text']
+ input = input.split('\n')[-1] # e.g. in ConvAI2, this removes persona
+ words = input.split()
+ words_no_dups = list(set(words)) # remove duplicates
+ word_counter_per_sent.update(words_no_dups)
+ num_sents += 1
+
+ if log_timer.time() > opt['log_every_n_secs']:
+ text, _log = log_timer.log(world.total_parleys, world.num_examples())
+ print(text)
+
+ if world.epoch_done():
+ print('EPOCH DONE')
+ break
+
+ return word_counter_per_sent, num_sents
+
+
+def learn_nidf(opt):
+ """
+ Go through ConvAI2 and Twitter data, and count word frequences.
+ Save word2count.pkl, which contains word2count, and total num_sents.
+ These are both needed to calculate NIDF later.
+ """
+
+ opt['log_every_n_secs'] = 2
+
+ print('Counting words in Twitter train set...')
+ opt['datatype'] = 'train:ordered'
+ opt['task'] = 'twitter'
+ wc1, ns1 = get_word_counts(opt, count_inputs=True)
+
+ print('Counting words in Twitter val set...')
+ opt['datatype'] = 'valid'
+ opt['task'] = 'twitter'
+ wc2, ns2 = get_word_counts(opt, count_inputs=True)
+
+ opt['task'] = 'fromfile:parlaiformat'
+
+ print('Counting words in ConvAI2 train set...')
+ opt['datatype'] = 'train:ordered'
+ opt['fromfile_datapath'] = os.path.join(
+ opt['datapath'], PARLAI_FORMAT_DIR, 'train.txt'
+ )
+ # Don't include inputs because ConvAI2 train set reverses every conversation
+ wc3, ns3 = get_word_counts(opt, count_inputs=False)
+
+ print('Counting words in ConvAI2 val set...')
+ opt['datatype'] = 'valid'
+ opt['fromfile_datapath'] = os.path.join(
+ opt['datapath'], PARLAI_FORMAT_DIR, 'valid.txt'
+ )
+ wc4, ns4 = get_word_counts(opt, count_inputs=True)
+
+ # Merge word counts
+ word_counter = Counter()
+ for wc in [wc1, wc2, wc3, wc4]:
+ for word, count in wc.items():
+ word_counter[word] += count
+ num_sents = ns1 + ns2 + ns3 + ns4
+
+ # Write word2count and num_sents to file
+ word2count_fp = os.path.join(opt['datapath'], CONTROLLABLE_DIR, 'word2count.pkl')
+ print("Saving word count stats to %s..." % word2count_fp)
+ data = {
+ "word2count": word_counter,
+ "num_sents": num_sents
+ }
+ with open(word2count_fp, "wb") as f:
+ pickle.dump(data, f)
+
+
+def load_word2nidf(opt):
+ """
+ Loads word count stats from word2count.pkl file in data/controllable_dialogue,
+ computes NIDF for all words, and returns the word2nidf dictionary.
+
+ Returns:
+ word2nidf: dict mapping words to their NIDF score (float between 0 and 1)
+ """
+ word2count_fp = os.path.join(opt['datapath'], CONTROLLABLE_DIR, 'word2count.pkl')
+ print("Loading word count stats from %s..." % word2count_fp)
+ with open(word2count_fp, "rb") as f:
+ data = pickle.load(f)
+ num_sents = data['num_sents']
+ print('num_sents: ', num_sents)
+ word2count = data['word2count']
+ min_c = min(word2count.values()) # max count
+ max_c = max(word2count.values()) # min count
+ word2nidf = {w: (math.log(max_c)-math.log(c))/(math.log(max_c)-math.log(min_c))
+ for w, c in word2count.items()}
+ print("Done loading word2nidf dictionary.")
+ return word2nidf
+
+
+if __name__ == '__main__':
+ parser = ParlaiParser()
+ opt = parser.parse_args()
+ learn_nidf(opt)
diff --git a/projects/controllable_dialogue/controllable_seq2seq/stopwords.py b/projects/controllable_dialogue/controllable_seq2seq/stopwords.py
new file mode 100644
--- /dev/null
+++ b/projects/controllable_dialogue/controllable_seq2seq/stopwords.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python3
+
+# Copyright (c) Facebook, Inc. and its affiliates.
+# This source code is licensed under the MIT license found in the
+# LICENSE file in the root directory of this source tree.
+
+# From here https://www.ranks.nl/stopwords
+sw = [
+ 'a', 'about', 'above', 'after', 'again', 'against', 'all', 'am', 'an', 'and', 'any',
+ 'are', "aren't", 'as', 'at', 'be', 'because', 'been', 'before', 'being', 'below',
+ 'between', 'both', 'but', 'by', "can't", 'cannot', 'could', "couldn't", 'did',
+ "didn't", 'do', 'does', "doesn't", 'doing', "don't", 'down', 'during', 'each', 'few',
+ 'for', 'from', 'further', 'had', "hadn't", 'has', "hasn't", 'have', "haven't",
+ 'having', 'he', "he'd", "he'll", "he's", 'her', 'here', "here's", 'hers', 'herself',
+ 'him', 'himself', 'his', 'how', "how's", 'i', "i'd", "i'll", "i'm", "i've", 'if',
+ 'in', 'into', 'is', "isn't", 'it', "it's", 'its', 'itself', "let's", 'me', 'more',
+ 'most', "mustn't", 'my', 'myself', 'no', 'nor', 'not', 'of', 'off', 'on', 'once',
+ 'only', 'or', 'other', 'ought', 'our', 'ours', 'ourselves', 'out', 'over', 'own',
+ 'same', "shan't", 'she', "she'd", "she'll", "she's", 'should', "shouldn't", 'so',
+ 'some', 'such', 'than', 'that', "that's", 'the', 'their', 'theirs', 'them',
+ 'themselves', 'then', 'there', "there's", 'these', 'they', "they'd", "they'll",
+ "they're", "they've", 'this', 'those', 'through', 'to', 'too', 'under', 'until',
+ 'up', 'very', 'was', "wasn't", 'we', "we'd", "we'll", "we're", "we've", 'were',
+ "weren't", 'what', "what's", 'when', "when's", 'where', "where's", 'which', 'while',
+ 'who', "who's", 'whom', 'why', "why's", 'with', "won't", 'would', "wouldn't", 'you',
+ "you'd", "you'll", "you're", "you've", 'your', 'yours', 'yourself', 'yourselves'
+]
+
+# add punctuation
+STOPWORDS = sw + [".", "?", "!", ","]
diff --git a/projects/controllable_dialogue/controllable_seq2seq/util.py b/projects/controllable_dialogue/controllable_seq2seq/util.py
new file mode 100644
--- /dev/null
+++ b/projects/controllable_dialogue/controllable_seq2seq/util.py
@@ -0,0 +1,172 @@
+#!/usr/bin/env python3
+
+# Copyright (c) Facebook, Inc. and its affiliates.
+# This source code is licensed under the MIT license found in the
+# LICENSE file in the root directory of this source tree.
+
+"""
+This file contains some useful code for handling history in ConvAI2 dialogues,
+and for inspecting and reordering the n-best candidates after beam search.
+"""
+
+from parlai.core.torch_agent import TorchAgent
+from .controls import eval_attr
+
+
+class ConvAI2History(object):
+ """
+ An object to store the history of a ConvAI2 conversation in a convenient format.
+ """
+
+ def __init__(self, text, assume_persontokens=True, dictionary=None):
+ """
+ Separates text (the dialogue context) into:
+ self.persona_lines: list of strings; the persona lines with "your persona:"
+ removed.
+ self.partner_utts: list of strings; the partner's utterances.
+ self.own_utts: list of strings; the bot's own utterances.
+ All of the above are lowercase and tokenized.
+
+ Inputs:
+ text: string. Contains several lines separated by \n. The first few lines are
+ persona lines beginning "your persona: ", then the next lines are dialogue.
+ assume_persontokens: If True, assert that the dialogue lines start with
+ __p1__ and __p2__ respectively, and then remove them.
+ dictionary: parlai DictionaryAgent.
+ """
+ p1_token, p2_token = TorchAgent.P1_TOKEN, TorchAgent.P2_TOKEN
+
+ # Split text into lines
+ lines = [t.strip() for t in text.split('\n')]
+
+ # Identify the persona lines and remove "your persona"
+ persona_lines = [t for t in lines if "your persona:" in t]
+ persona_lines = [remove_prefix(pl, "your persona:") for pl in persona_lines]
+ persona_lines = [fix_personaline_period(pl) for pl in persona_lines]
+
+ # Identify the dialogue lines. It's assumed that p1 goes first.
+ utts = lines[len(persona_lines):]
+ p1_utts = [utts[i] for i in range(0, len(utts), 2)]
+ p2_utts = [utts[i] for i in range(1, len(utts), 2)]
+
+ # Check for and remove the __p1__ and __p2__ prefixes
+ if assume_persontokens:
+ p1_utts = [remove_prefix(utt, p1_token) for utt in p1_utts]
+ p2_utts = [remove_prefix(utt, p2_token) for utt in p2_utts]
+
+ # Tokenize and lowercase
+ if dictionary is not None:
+ persona_lines = [" ".join(dictionary.tokenize(pl)) for pl in persona_lines]
+ p1_utts = [" ".join(dictionary.tokenize(utt)) for utt in p1_utts]
+ p2_utts = [" ".join(dictionary.tokenize(utt)) for utt in p2_utts]
+
+ # Strip trailing whitespace and discard any empty lines
+ self.persona_lines = [l.strip() for l in persona_lines if l.strip() != ""]
+ self.partner_utts = [l.strip() for l in p1_utts if l.strip() != ""]
+ self.own_utts = [l.strip() for l in p2_utts if l.strip() != ""]
+
+
+def remove_prefix(utt, prefix):
+ """
+ Check that utt begins with prefix+" ", and then remove.
+
+ Inputs:
+ utt: string
+ prefix: string
+
+ Returns:
+ new utt: utt with the prefix+" " removed.
+ """
+ try:
+ assert utt[:len(prefix)+1] == prefix+" "
+ except AssertionError as e:
+ print("ERROR: utterance '%s' does not start with '%s '" % (utt, prefix))
+ print(repr(utt[:len(prefix)+1]))
+ print(repr(prefix+" "))
+ raise e
+ return utt[len(prefix)+1:]
+
+
+def fix_personaline_period(line):
+ """
+ Sometimes the tokenized persona lines have a period at the end but no space before
+ the period. This function fixes it, e.g. changes
+ 'my favorite color is blue.' to 'my favorite color is blue .'
+ """
+ assert len(line) >= 2
+ assert line[-1] == "." and line[-2] != " "
+ pl = line[:-1] + " ."
+ return pl
+
+
+def show_beam_cands(n_best_beam_preds, history, dictionary):
+ """
+ Pretty-print the n-best candidates from beam search, along with their probabilities
+
+ Inputs:
+ n_best_beam_preds: list length num_candidates of (prediction, score) pairs.
+ prediction is a tensor of word indices, score is a single float tensor.
+ history: ConvAI2History
+ dictionary: parlai DictionaryAgent
+ """
+ print("")
+ print("persona: ", history.persona_lines)
+ print("partner_utts: ", history.partner_utts)
+ print("own_utts: ", history.own_utts)
+ print("")
+ for idx, (pred, score) in enumerate(n_best_beam_preds):
+ text = dictionary.vec2txt(pred.tolist())
+ text = text.replace('__start__ ', '').replace(' __end__', '')
+ print("%i %.4f %s" % (idx, score, text))
+ print("")
+
+
+def reorder_extrep2gram_qn(n_best_beam_preds, history, dictionary, verbose):
+ """
+ Inputs:
+ n_best_beam_preds: list length num_candidates of (prediction, score) pairs.
+ prediction is a tensor of word indices, score is a single float tensor.
+ history: ConvAI2History
+ dictionary: parlai DictionaryAgent
+ verbose: bool. If True, print out the selection process.
+
+ Outputs: (tensor, tensor) pair which is the chosen (prediction, score)
+ """
+ # Optionally print out the history
+ if verbose:
+ print("persona: ", history.persona_lines)
+ print("partner_utts: ", history.partner_utts)
+ print("own_utts: ", history.own_utts)
+
+ # Go through the candidates, measuring their extrep_2gram level
+ # Optionally print out the original ordering
+ candidates = [] # list of (orig_idx, pred, text, score, extrep_2gram) tuples
+ if verbose:
+ print("\nORIGINAL ORDER:")
+ for idx, (pred, score) in enumerate(n_best_beam_preds):
+ text = dictionary.vec2txt(pred.tolist())
+ text = text.replace('__start__ ', '').replace(' __end__', '')
+ if verbose:
+ print("%i %.4f %s" % (idx, score, text))
+ extrep_2gram = eval_attr(text, history, 'extrep_2gram')
+ candidates.append((idx, pred, text, score, extrep_2gram))
+
+ # Sort the candidates by ascending repetition. Tiebreak using original ranking.
+ candidates = sorted(candidates, key=lambda x: (x[4], x[0]))
+
+ # Optionally print out the new ordering
+ if verbose:
+ print("\nSORTED BY EXTREP_2GRAM:")
+ for (idx, _, text, _, extrep_2gram) in candidates:
+ print("%i %.4f %s" % (idx, extrep_2gram, text))
+ print("")
+
+ # Identify the top-ranked (w.r.t. new ordering) candidate that contains '?'
+ for (_, pred, text, score, _) in candidates:
+ if "?" not in text:
+ continue
+ return (pred, score)
+
+ # If there was no candidate containing '?', return top-ranked (w.r.t extrep_2gram)
+ (_, pred, score, _, _) = candidates[0]
+ return (pred, score)
diff --git a/projects/controllable_dialogue/eval_wordstat.py b/projects/controllable_dialogue/eval_wordstat.py
new file mode 100644
--- /dev/null
+++ b/projects/controllable_dialogue/eval_wordstat.py
@@ -0,0 +1,277 @@
+#!/usr/bin/env python3
+
+# Copyright (c) Facebook, Inc. and its affiliates.
+# This source code is licensed under the MIT license found in the
+# LICENSE file in the root directory of this source tree.
+
+"""
+This script is derived from parlai/core/scripts/eval_wordstat.py
+
+This script measures many different metrics of the text generated for the validation
+set - including all the controllable attributes.
+"""
+
+from parlai.core.params import ParlaiParser
+from parlai.core.dict import DictionaryAgent
+from parlai.core.agents import create_agent
+from parlai.core.worlds import create_task
+from parlai.core.utils import TimeLogger
+from parlai.core.metrics import normalize_answer
+from parlai.core.logs import TensorboardLogger
+from controllable_seq2seq.controls import (
+ ATTR2SENTSCOREFN,
+ eval_attr,
+ initialize_control_information,
+)
+from controllable_seq2seq.util import ConvAI2History
+from collections import Counter
+
+import copy
+import random
+import json
+import time
+import os
+
+
+def setup_args(parser=None):
+ if parser is None:
+ parser = ParlaiParser(True, True, 'compute statistics from model predictions')
+ DictionaryAgent.add_cmdline_args(parser)
+
+ # These defaults can be overriden by both .opt file and user's command line flags
+ parser.add_argument('-ne', '--num-examples', type=int, default=-1)
+ parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=2)
+ parser.add_argument('-ed', '--external-dict', type=str, default=None,
+ help='External dictionary for stat computation')
+ parser.add_argument('-fb', '--freq-bins', type=str, default='0,100,1000,10000',
+ help='Bins boundaries for rare words stat')
+ parser.add_argument('-gr', '--gold-response', type=bool, default=False,
+ help='Compute stats for gold response')
+
+ # These settings override .opt file but not user's command line flags
+ parser.set_params(
+ datatype='valid',
+ task='projects.controllable_dialogue.tasks.agents',
+ model='projects.controllable_dialogue.controllable_seq2seq.controllable_seq2seq:ControllableSeq2seqAgent', # noqa: E501
+ batchsize=64,
+ beam_size=20,
+ beam_min_n_best=10,
+ use_reply='model',
+ )
+ TensorboardLogger.add_cmdline_args(parser)
+ return parser
+
+
+def get_word_stats(text, agent_dict, bins=[0, 100, 1000, 100000]):
+ """
+ Function which takes text sequence and dict, returns word freq and length statistics
+
+ :param sequence: text sequence
+ :param agent_dict: can be external dict or dict from the model
+ :param bins: list with range boundaries
+ :return: freqs dictionary, num words, avg word length, avg char length
+ """
+ pred_list = agent_dict.tokenize(text)
+ pred_freq = [agent_dict.freq[word] for word in pred_list]
+ freqs = {i: 0 for i in bins}
+ for f in pred_freq:
+ for b in bins:
+ if f <= b:
+ freqs[b] += 1
+ break
+
+ wlength = len(pred_list)
+ clength = len(text) # including spaces
+ return freqs, len(pred_freq), wlength, clength
+
+
+def update_sent_attr_stats(sent_attrs, history, prediction):
+ """
+ Update the sent_attrs dict with the attributes of a prediction with given history.
+
+ Inputs:
+ sent_attrs: dictionary mapping each attr (a string) to a list of floats
+ (the scores).
+ history: a ConvAI2History
+ prediction: string. the response text for which we measure sent attributes
+ """
+ for attr in sent_attrs.keys():
+ attr_score = eval_attr(prediction, history, attr)
+ sent_attrs[attr].append(attr_score)
+ return sent_attrs
+
+
+def eval_wordstat(opt, print_parser=None):
+ """Evaluates a model.
+
+ :param opt: tells the evaluation function how to run
+ :param print_parser: if provided, prints the options that are set within the
+ model after loading the model
+ """
+ random.seed(42)
+
+ # Setup control information
+ initialize_control_information(opt)
+
+ # Create model and assign it to the specified task
+ agent = create_agent(opt, requireModelExists=True)
+ world = create_task(opt, agent)
+
+ if opt.get('external_dict'):
+ print('[ Using external dictionary from: {} ]'.format(
+ opt['external_dict']))
+ dict_opt = copy.deepcopy(opt)
+ dict_opt['dict_file'] = opt['external_dict']
+ dictionary = DictionaryAgent(dict_opt)
+ else:
+ print('[ Using model bundled dictionary ]')
+ dictionary = agent.dict
+
+ batch_size = opt['batchsize']
+
+ if print_parser:
+ # Show arguments after loading model
+ print_parser.opt = agent.opt
+ print_parser.print_args()
+ log_every_n_secs = opt.get('log_every_n_secs', -1)
+ if log_every_n_secs <= 0:
+ log_every_n_secs = float('inf')
+ log_time = TimeLogger()
+
+ data = {} # This will be written to the output json file
+ data['opt'] = agent.opt # Save the opt to json
+
+ # Determine the output filename
+ if opt['gold_response']: # Special output file for gold response
+ model_dir, _ = os.path.split(opt.get('model_file'))
+ outfile = os.path.join(model_dir, 'goldresponse')
+ if opt['use_reply'] != 'label':
+ raise ValueError('You should set --use-reply label (not --use-reply model) '
+ 'when measuring goldresponse stats')
+ else:
+ outfile = "%s.%s.%s.%s" % (
+ opt.get('model_file'),
+ opt.get('datatype'),
+ "use%sreply" % agent.opt['use_reply'],
+ "beam%i" % agent.opt['beam_size'],
+ )
+ if agent.opt['beam_size'] > 1:
+ outfile += ".beamminnbest%i" % agent.opt['beam_min_n_best']
+ if len(agent.control_settings) > 0:
+ outfile += ".setcontrols:" + "_".join(
+ ["%s%s" % (c, str(agent.control_settings[c]['set_value']))
+ for c in sorted(agent.control_settings.keys())])
+ if agent.opt['beam_reorder'] not in ['none', False]:
+ outfile += ".beamreorder_%s" % agent.opt['beam_reorder']
+ if len(agent.wd_features) > 0:
+ sorted_bfw = sorted(list(zip(agent.wd_features, agent.wd_wts)),
+ key=lambda x: x[0])
+ outfile += ".WDfeatures:" + "_".join(
+ ["%s%s" % (f, str(w)) for f, w in sorted_bfw])
+ if opt['num_examples'] != -1:
+ outfile += ".numex%i" % opt['num_examples']
+ outfile += ".wordstats.json"
+ print("\nOutfile: %s\n" % outfile)
+
+ cnt = 0
+ word_statistics = {
+ 'mean_wlength': [], # list of length (in words) of utterances
+ 'mean_clength': [], # list of length (in chars) of utterances
+ 'freqs_cnt': Counter(), # Counter for word frequencies, bucketed
+ 'word_cnt': 0, # total number of words in all utterances
+ 'pred_list': [], # list of generated utterances after applying normalize_answer
+ 'pure_pred_list': [], # list of generated utterances
+ 'context_list': [] # list of text inputs (persona and conversation history)
+ }
+ bins = [int(i) for i in opt['freq_bins'].split(',')]
+
+ # This dictionary records all the sentence-level controllable attributes
+ # For each attribute, we have a list of all the values
+ sent_attrs = {attr: [] for attr in ATTR2SENTSCOREFN.keys()} # str to list of floats
+
+ # histories will be a list of ConvAI2History objects
+ histories = []
+
+ def process_prediction(prediction, word_statistics):
+ word_statistics['pred_list'].append(normalize_answer(prediction))
+ freqs, _cnt, wlength, clength = get_word_stats(
+ prediction, dictionary, bins=bins
+ )
+ word_statistics['word_cnt'] += _cnt
+ word_statistics['mean_wlength'].append(wlength)
+ word_statistics['mean_clength'].append(clength)
+ word_statistics['freqs_cnt'] += Counter(freqs)
+ return word_statistics
+
+ t0 = time.time()
+ while not world.epoch_done():
+ world.parley()
+ # orig eval_wordstat.py handles bsz=1 but for simplicity we assume bsz>1
+ assert batch_size != 1
+ for world_idx, w in enumerate(world.worlds):
+ try:
+ try:
+ response_act = w.acts[-1]
+ prediction = response_act['text']
+ except KeyError:
+ continue
+ if opt['gold_response']:
+ # If we're measuring gold response, use eval_label as prediction
+ prediction = w.acts[0]['eval_labels'][0]
+ response_act = {'text': prediction}
+ word_statistics['context_list'].append(w.acts[0]['text'])
+ word_statistics['pure_pred_list'].append(prediction)
+ except IndexError:
+ continue
+ cnt += 1
+ word_statistics = process_prediction(prediction, word_statistics)
+
+ # Compute and record sentence-level attributes
+ history = ConvAI2History(w.acts[0]['text'])
+ histories.append(history)
+ sent_attrs = update_sent_attr_stats(sent_attrs, history, prediction)
+
+ # Periodically log some info
+ if log_time.time() > log_every_n_secs:
+ report = world.report()
+ text, report = log_time.log(report['exs'], world.num_examples(), report)
+ print(text)
+
+ if opt['num_examples'] > 0 and cnt >= opt['num_examples']:
+ break
+ if world.epoch_done():
+ print("EPOCH DONE")
+ print("Time to process %i examples: %f seconds" % (cnt, time.time()-t0))
+
+ # Compute percent unique
+ # Note this is w.r.t. normalized pred_list not original pure_pred_list
+ unique_list = []
+ cntr = Counter(word_statistics['pred_list'])
+ for k, v in cntr.items():
+ if v == 1:
+ unique_list.append(k)
+ unique_percent = len(unique_list) / len(word_statistics['pred_list']) * 100
+
+ # Print a final report
+ report = world.report()
+ if opt['gold_response']:
+ report['ppl'] = 0.0 # For gold responses, overwrite the perplexity
+ print(report)
+
+ # Put all information in data dict
+ data['unique_percent'] = unique_percent # percent of all responses that are unique
+ data['word_statistics'] = word_statistics # word stats, as in orig eval_wordstat
+ data['report'] = report # the final report
+ data['histories'] = [(hist.persona_lines, hist.partner_utts, hist.own_utts)
+ for hist in histories] # history for each example
+ data['sent_attrs'] = sent_attrs # all sentence attribute values for responses
+
+ # Write data to outfile
+ print("Writing to %s..." % outfile)
+ with open(outfile, 'w') as f:
+ json.dump(data, f)
+
+
+if __name__ == '__main__':
+ parser = setup_args()
+ eval_wordstat(parser.parse_args(print_args=False), print_parser=parser)
diff --git a/projects/controllable_dialogue/get_bucket_lowerbounds.py b/projects/controllable_dialogue/get_bucket_lowerbounds.py
new file mode 100644
--- /dev/null
+++ b/projects/controllable_dialogue/get_bucket_lowerbounds.py
@@ -0,0 +1,122 @@
+#!/usr/bin/env python3
+
+# Copyright (c) Facebook, Inc. and its affiliates.
+# This source code is licensed under the MIT license found in the
+# LICENSE file in the root directory of this source tree.
+
+"""
+For a given (continuous) control variable in the dataset, bucket the data and return
+the lower bounds for those buckets.
+"""
+
+from parlai.core.params import ParlaiParser
+from parlai.agents.repeat_label.repeat_label import RepeatLabelAgent
+from parlai.core.worlds import create_task
+from parlai.core.utils import TimeLogger
+from controllable_seq2seq.controls import sort_into_bucket
+from collections import Counter
+import random
+
+
+def bucket_data(opt):
+ # create repeat label agent and assign it to the specified task
+ agent = RepeatLabelAgent(opt)
+ world = create_task(opt, agent)
+
+ if opt['num_examples'] == -1:
+ num_examples = world.num_examples()
+ else:
+ num_examples = opt['num_examples']
+ log_timer = TimeLogger()
+
+ assert opt['control'] != ''
+ ctrl = opt['control']
+
+ num_buckets = opt['num_buckets']
+
+ ctrl_vals = [] # list of floats
+
+ for _ in range(num_examples):
+ world.parley()
+ world.acts[0]['labels'] = world.acts[0].get(
+ 'labels', world.acts[0].pop('eval_labels', None))
+
+ if ctrl not in world.acts[0].keys():
+ raise Exception('Error: control %s isn\'t in the data. available keys: %s'
+ % (ctrl, ', '.join(world.acts[0].keys())))
+ ctrl_val = world.acts[0][ctrl]
+ if ctrl_val == "None":
+ assert ctrl == 'lastuttsim'
+ ctrl_val = None
+ else:
+ ctrl_val = float(ctrl_val)
+ if ctrl == 'avg_nidf':
+ assert ctrl_val >= 0
+ assert ctrl_val <= 1
+ elif ctrl == 'question':
+ assert ctrl_val in [0, 1]
+ elif ctrl == 'lastuttsim':
+ if ctrl_val is not None:
+ assert ctrl_val >= -1
+ assert ctrl_val <= 1
+ else:
+ raise Exception('Unexpected ctrl name: %s' % ctrl)
+ ctrl_vals.append(ctrl_val)
+
+ if log_timer.time() > opt['log_every_n_secs']:
+ text, _log = log_timer.log(world.total_parleys, world.num_examples())
+ print(text)
+
+ if world.epoch_done():
+ print('EPOCH DONE')
+ break
+
+ if ctrl == 'lastuttsim':
+ num_nones = len([v for v in ctrl_vals if v is None])
+ ctrl_vals = [v for v in ctrl_vals if v is not None]
+ print("Have %i Nones for lastuttsim; these have been removed "
+ "for bucket calculation" % num_nones)
+
+ print('Collected %i control vals between %.6f and %.6f'
+ % (len(ctrl_vals), min(ctrl_vals), max(ctrl_vals)))
+
+ # Calculate bucket lower bounds
+ print('Calculating lowerbounds for %i buckets...' % num_buckets)
+ ctrl_vals = sorted(ctrl_vals)
+ lb_indices = [int(len(ctrl_vals)*i/num_buckets) for i in range(num_buckets)]
+ lbs = [ctrl_vals[idx] for idx in lb_indices]
+ print('\nBucket lowerbounds for control %s: ' % ctrl)
+ print(lbs)
+
+ # Calculate the actual bucket sizes
+ bucket_sizes = Counter()
+ bucket_ids = [sort_into_bucket(ctrl_val, lbs) for ctrl_val in ctrl_vals]
+ bucket_sizes.update(bucket_ids)
+ print('\nBucket sizes: ')
+ for bucket_id in sorted(bucket_sizes.keys()):
+ print("%i: %i" % (bucket_id, bucket_sizes[bucket_id]))
+
+
+def main():
+ random.seed(42)
+ # Get command line arguments
+ parser = ParlaiParser()
+ parser.add_argument('-n', '--num-examples', default=-1, type=int,
+ help='Total number of exs to convert, -1 to convert \
+ all examples')
+ parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=2)
+ parser.add_argument('--control', type=str, default='',
+ help='the control for which we want to calculate the buckets')
+ parser.add_argument('--num-buckets', type=int, default=10,
+ help='the number of buckets we want to calculate')
+
+ parser.set_defaults(task="projects.controllable_dialogue.tasks.agents")
+ parser.set_defaults(datatype="train:stream")
+
+ opt = parser.parse_args()
+
+ bucket_data(opt)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/projects/controllable_dialogue/interactive.py b/projects/controllable_dialogue/interactive.py
new file mode 100644
--- /dev/null
+++ b/projects/controllable_dialogue/interactive.py
@@ -0,0 +1,27 @@
+#!/usr/bin/env python3
+
+# Copyright (c) Facebook, Inc. and its affiliates.
+# This source code is licensed under the MIT license found in the
+# LICENSE file in the root directory of this source tree.
+
+from parlai.scripts.interactive import setup_args, interactive
+
+import random
+
+if __name__ == '__main__':
+ random.seed(42)
+ parser = setup_args()
+
+ parser.set_params(
+ batchsize=1,
+ beam_size=20,
+ beam_min_n_best=10,
+ )
+
+ print('\n' + '*' * 80)
+ print('WARNING: This dialogue model is a research project that was trained on a')
+ print('large amount of open-domain Twitter data. It may generate offensive content.'
+ )
+ print('*' * 80 + '\n')
+
+ interactive(parser.parse_args(print_args=False), print_parser=parser)
diff --git a/projects/controllable_dialogue/make_control_dataset.py b/projects/controllable_dialogue/make_control_dataset.py
new file mode 100644
--- /dev/null
+++ b/projects/controllable_dialogue/make_control_dataset.py
@@ -0,0 +1,110 @@
+#!/usr/bin/env python3
+
+# Copyright (c) Facebook, Inc. and its affiliates.
+# This source code is licensed under the MIT license found in the
+# LICENSE file in the root directory of this source tree.
+
+"""
+Make a copy of the ConvAI2 dataset with CT control variables annotated.
+"""
+
+from parlai.core.params import ParlaiParser
+from parlai.agents.repeat_label.repeat_label import RepeatLabelAgent
+from parlai.core.worlds import create_task
+from parlai.core.utils import msg_to_str, TimeLogger
+from controllable_seq2seq.util import ConvAI2History
+from controllable_seq2seq.controls import eval_attr, initialize_control_information
+import random
+
+
+def make_dataset(opt):
+
+ # Initialize control information so we can compute sentence attributes.
+ # Here we set build_task=False so we don't download data/controllable_dialogue
+ # (because we're trying to create it instead).
+ initialize_control_information(opt, build_task=False)
+
+ # Create repeat label agent and assign it to the specified task
+ agent = RepeatLabelAgent(opt)
+ world = create_task(opt, agent)
+ ignorefields = opt.get('ignore_fields', '')
+ outfile = opt['outfile']
+
+ # Number of examples to process
+ if opt['num_examples'] == -1:
+ num_examples = world.num_examples()
+ else:
+ num_examples = opt['num_examples']
+
+ # List of controls to include:
+ controls = opt['controls'].split(',') if opt['controls'] != '' else []
+
+ print('[ starting to convert.. ]')
+ print('[ saving output to {} ]'.format(outfile))
+ fw = open(outfile, 'w')
+ log_timer = TimeLogger()
+
+ for _ in range(num_examples):
+ world.parley()
+ world.acts[0]['labels'] = world.acts[0].get(
+ 'labels', world.acts[0].pop('eval_labels', None))
+
+ # Need to get history in order to compute control values
+ hist = ConvAI2History(world.acts[0]['text'], assume_persontokens=False)
+ response = world.acts[0]['labels'][0]
+
+ # Compute control values
+ for ctrl in controls:
+ ctrl_val = eval_attr(response, hist, ctrl)
+ if ctrl == 'avg_nidf':
+ assert ctrl_val >= 0
+ assert ctrl_val <= 1
+ elif ctrl == 'question':
+ assert ctrl_val in [0, 1]
+ elif ctrl == 'lastuttsim':
+ if ctrl_val is not None:
+ assert ctrl_val >= -1
+ assert ctrl_val <= 1
+ else:
+ raise Exception('unexpected ctrl name: %s' % ctrl)
+ world.acts[0][ctrl] = ctrl_val # add control value to act
+
+ # Write to file
+ txt = msg_to_str(world.acts[0], ignore_fields=ignorefields)
+ fw.write(txt + '\n')
+ if world.acts[0].get('episode_done', False):
+ fw.write('\n')
+
+ if log_timer.time() > opt['log_every_n_secs']:
+ text, _log = log_timer.log(world.total_parleys, world.num_examples())
+ print(text)
+
+ if world.epoch_done():
+ print('EPOCH DONE')
+ break
+ fw.close()
+
+
+if __name__ == '__main__':
+ random.seed(42)
+ # Get command line arguments
+ parser = ParlaiParser()
+ parser.add_argument('-n', '--num-examples', default=-1, type=int,
+ help='Total number of exs to convert, -1 to convert \
+ all examples')
+ parser.add_argument('-of', '--outfile', default=None, type=str,
+ help='Output file where to save, by default will be \
+ created in /tmp')
+ parser.add_argument('-if', '--ignore-fields', default='id', type=str,
+ help='Ignore these fields from the message (returned\
+ with .act() )')
+ parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=2)
+
+ parser.add_argument('--controls', type=str, default='',
+ help='comma-separated controls to be included')
+
+ parser.set_defaults(task="fromfile:parlaiformat")
+ parser.set_defaults(datatype="train:stream")
+
+ opt = parser.parse_args()
+ make_dataset(opt)
diff --git a/projects/controllable_dialogue/tasks/agents.py b/projects/controllable_dialogue/tasks/agents.py
new file mode 100644
--- /dev/null
+++ b/projects/controllable_dialogue/tasks/agents.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+
+# Copyright (c) Facebook, Inc. and its affiliates.
+# This source code is licensed under the MIT license found in the
+# LICENSE file in the root directory of this source tree.
+
+import copy
+from .build import build, make_path
+from parlai.core.utils import warn_once
+from parlai.core.teachers import ParlAIDialogTeacher
+
+
+def _path(opt):
+ build(opt)
+ datatype = opt['datatype'].split(':')[0]
+ if datatype == 'test':
+ warn_once("WARNING: Test set not included. Setting datatype to valid.")
+ datatype = 'valid'
+ return make_path(opt, datatype + '.txt')
+
+
+class DefaultTeacher(ParlAIDialogTeacher):
+ def __init__(self, opt, shared=None):
+ opt = copy.deepcopy(opt)
+ opt['parlaidialogteacher_datafile'] = _path(opt)
+ super().__init__(opt, shared)
diff --git a/projects/controllable_dialogue/tasks/build.py b/projects/controllable_dialogue/tasks/build.py
new file mode 100644
--- /dev/null
+++ b/projects/controllable_dialogue/tasks/build.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+
+# Copyright (c) Facebook, Inc. and its affiliates.
+# This source code is licensed under the MIT license found in the
+# LICENSE file in the root directory of this source tree.
+
+import os
+import parlai.core.params as params
+import parlai.core.build_data as build_data
+
+
+URL_ROOT = 'https://parl.ai/downloads/controllable_dialogue/'
+FOLDER_NAME = 'controllable_dialogue'
+
+
+def build(opt):
+ dpath = os.path.join(opt['datapath'], FOLDER_NAME)
+ version = '1.0'
+
+ if not build_data.built(dpath, version_string=version):
+ if build_data.built(dpath):
+ # older version exists, so remove the outdated files.
+ build_data.remove_dir(dpath)
+ build_data.make_dir(dpath)
+
+ # first download the data files
+ fname_data = 'data_v1.tar.gz'
+ build_data.download(URL_ROOT + fname_data, dpath, fname_data)
+ build_data.untar(dpath, fname_data)
+
+ # next download the wordstats files
+ fname_wordstats = 'wordstats_v1.tar.gz'
+ build_data.download(URL_ROOT + fname_wordstats, dpath, fname_wordstats)
+ build_data.untar(dpath, fname_wordstats)
+
+ print("Data has been placed in " + dpath)
+
+ build_data.mark_done(dpath, version)
+
+
+def make_path(opt, fname):
+ return os.path.join(opt['datapath'], FOLDER_NAME, fname)
+
+
+if __name__ == '__main__':
+ opt = params.ParlaiParser().parse_args(print_args=False)
+ build(opt)
diff --git a/projects/controllable_dialogue/train_controllable_seq2seq.py b/projects/controllable_dialogue/train_controllable_seq2seq.py
new file mode 100644
--- /dev/null
+++ b/projects/controllable_dialogue/train_controllable_seq2seq.py
@@ -0,0 +1,58 @@
+#!/usr/bin/env python3
+
+# Copyright (c) Facebook, Inc. and its affiliates.
+# This source code is licensed under the MIT license found in the
+# LICENSE file in the root directory of this source tree.
+
+"""
+Train ControllableSeq2seq model.
+"""
+
+from parlai.scripts.train_model import TrainLoop
+from parlai.scripts.train_model import setup_args as tm_setupargs
+
+
+def setup_args():
+ """Defaults for baseline model"""
+ parser = tm_setupargs()
+
+ parser.set_defaults(
+ task='projects.controllable_dialogue.tasks.agents',
+ model='projects.controllable_dialogue.controllable_seq2seq.controllable_seq2seq:ControllableSeq2seqAgent', # noqa: E501
+ dict_file='models:controllable_dialogue/dict_twit30k_train_split',
+ dict_lower=True,
+ dict_include_valid=True,
+ dict_maxexs=-1,
+ datatype='train',
+ batchsize=64,
+ hiddensize=1024,
+ embeddingsize=300,
+ attention='general',
+ numlayers=2,
+ rnn_class='lstm',
+ learningrate=3,
+ dropout=0.1,
+ gradient_clip=0.1,
+ lookuptable='enc_dec',
+ optimizer='sgd',
+ embedding_type='glove',
+ momentum=0.9,
+ bidirectional=False,
+ context_length=-1,
+ person_tokens=True,
+ add_p1_after_newln=True,
+ beam_min_n_best=30,
+ validation_every_n_secs=90,
+ validation_metric='ppl',
+ validation_metric_mode='min',
+ validation_patience=12,
+ log_every_n_secs=10,
+ dict_tokenizer='split',
+ )
+ return parser
+
+
+if __name__ == '__main__':
+ parser = setup_args()
+ opt = parser.parse_args()
+ TrainLoop(opt).train()
| diff --git a/parlai/core/testing_utils.py b/parlai/core/testing_utils.py
--- a/parlai/core/testing_utils.py
+++ b/parlai/core/testing_utils.py
@@ -272,6 +272,35 @@ def eval_model(opt, skip_test=False):
)
+def display_data(opt):
+ """
+ Runs through a display data run.
+
+ :return: (stdout_train, stdout_valid, stdout_test)
+ :rtype (str, str, str)
+ """
+ import parlai.scripts.display_data as dd
+ parser = dd.setup_args()
+ parser.set_params(**opt)
+ popt = parser.parse_args(print_args=False)
+
+ with capture_output() as train_output:
+ popt['datatype'] = 'train:stream'
+ dd.display_data(popt)
+ with capture_output() as valid_output:
+ popt['datatype'] = 'valid:stream'
+ dd.display_data(popt)
+ with capture_output() as test_output:
+ popt['datatype'] = 'test:stream'
+ dd.display_data(popt)
+
+ return (
+ train_output.getvalue(),
+ valid_output.getvalue(),
+ test_output.getvalue(),
+ )
+
+
def download_unittest_models():
from parlai.core.params import ParlaiParser
from parlai.core.build_data import download_models
diff --git a/tests/nightly/gpu/test_controllable.py b/tests/nightly/gpu/test_controllable.py
new file mode 100644
--- /dev/null
+++ b/tests/nightly/gpu/test_controllable.py
@@ -0,0 +1,240 @@
+#!/usr/bin/env python
+
+# Copyright (c) Facebook, Inc. and its affiliates.
+# This source code is licensed under the MIT license found in the
+# LICENSE file in the root directory of this source tree.
+
+import unittest
+import parlai.core.testing_utils as testing_utils
+
+"""
+Integration tests for the Controllable Dialogue project.
+
+See projects/controllable_dialogue.
+"""
+
+
+FAST_MODE = True
+NUM_EXAMPLES = 512 if FAST_MODE else -1
+NO_REPETITION = 'extrep_2gram:-3.5,extrep_nonstopword:-1e20,intrep_nonstopword:-1e20'
+
+
+@testing_utils.skipUnlessGPU
+class TestControllableDialogue(unittest.TestCase):
+ def test_dataset_integrity(self):
+ """
+ Check the controllble dialogue data loads.
+ """
+ train_output, valid_output, _ = testing_utils.display_data({
+ 'task': 'projects.controllable_dialogue.tasks.agents',
+ })
+
+ # check valid data
+ self.assertIn('[lastuttsim]', train_output)
+ self.assertIn(
+ "hi , how are you doing ? i'm getting ready to do some cheetah "
+ "chasing to stay in shape .",
+ train_output
+ )
+ self.assertIn('131438 examples', train_output)
+
+ # check valid data
+ self.assertIn(
+ "hello what are doing today ?",
+ valid_output
+ )
+ self.assertIn('[lastuttsim]', valid_output)
+ self.assertIn('7801 examples', valid_output)
+
+ def test_train_model(self):
+ """
+ Check the training script doesn't crash.
+ """
+ import projects.controllable_dialogue.train_controllable_seq2seq as tcs2s
+ parser = tcs2s.setup_args()
+ # make it much smaller just for testing
+ parser.set_params(
+ max_train_time=120,
+ validation_max_exs=128,
+ batchsize=16,
+ truncate=32,
+ short_final_eval=True,
+ )
+ with testing_utils.capture_output():
+ opt = parser.parse_args()
+ tcs2s.TrainLoop(opt).train()
+
+ def test_convai2_finetuned_greedy(self):
+ """
+ Check the greedy model produces correct results.
+ """
+ _, valid, _ = testing_utils.eval_model({
+ 'model_file': 'models:controllable_dialogue/convai2_finetuned_baseline',
+ 'task': 'projects.controllable_dialogue.tasks.agents',
+ 'beam_size': 1,
+ 'batchsize': 64,
+ }, skip_test=True)
+
+ self.assertAlmostEqual(valid['ppl'], 22.86, delta=0.1)
+ self.assertAlmostEqual(valid['f1'], 0.1702, delta=0.0002)
+
+ def test_convai2_finetuned_beamsearch(self):
+ """
+ Check the beamsearch baseline produces correct results.
+ """
+ _, valid, _ = testing_utils.eval_model({
+ 'model_file': 'models:controllable_dialogue/convai2_finetuned_baseline',
+ 'task': 'projects.controllable_dialogue.tasks.agents',
+ 'beam_size': 20,
+ 'beam_min_n_best': 10,
+ 'batchsize': 64,
+ 'num_examples': NUM_EXAMPLES,
+ }, skip_test=True)
+
+ if FAST_MODE:
+ self.assertAlmostEqual(valid['ppl'], 23.54, delta=0.1)
+ self.assertAlmostEqual(valid['f1'], 0.1575, delta=0.0002)
+ else:
+ self.assertAlmostEqual(valid['ppl'], 22.86, delta=0.1)
+ self.assertAlmostEqual(valid['f1'], 0.1516, delta=0.0002)
+
+ def test_convai2_finetuned_norepetition(self):
+ """
+ Checks the finetuned model with repetition blocking produces correct results.
+ """
+ _, valid, _ = testing_utils.eval_model({
+ 'model_file': 'models:controllable_dialogue/convai2_finetuned_baseline',
+ 'task': 'projects.controllable_dialogue.tasks.agents',
+ 'beam_size': 20,
+ 'beam_min_n_best': 10,
+ 'use_reply': 'model',
+ 'batchsize': 64,
+ 'num_examples': NUM_EXAMPLES,
+ 'weighted_decoding': NO_REPETITION,
+ }, skip_test=True)
+
+ if FAST_MODE:
+ self.assertAlmostEqual(valid['ppl'], 26.66, delta=0.1)
+ self.assertAlmostEqual(valid['f1'], 0.1389, delta=0.0002)
+ else:
+ self.assertAlmostEqual(valid['ppl'], 25.83, delta=0.1)
+ self.assertAlmostEqual(valid['f1'], .1375, delta=0.0002)
+
+ def test_ct_question_bucket7(self):
+ """
+ Checks the question-controlled model (z=7) produces correct results.
+ """
+ _, valid, _ = testing_utils.eval_model({
+ # b11e10 stands for 11 buckets, embedding size 10
+ 'model_file': 'models:controllable_dialogue/control_questionb11e10',
+ 'task': 'projects.controllable_dialogue.tasks.agents',
+ 'beam_size': 20,
+ 'beam_min_n_best': 10,
+ 'batchsize': 64,
+ 'use_reply': 'model',
+ 'num_examples': NUM_EXAMPLES,
+ 'weighted_decoding': NO_REPETITION,
+ 'set_controls': 'question:7',
+ }, skip_test=True)
+
+ if FAST_MODE:
+ self.assertAlmostEqual(valid['ppl'], 31.04, delta=0.1)
+ self.assertAlmostEqual(valid['f1'], 0.1362, delta=0.0002)
+ else:
+ self.assertAlmostEqual(valid['ppl'], 29.22, delta=0.1)
+ self.assertAlmostEqual(valid['f1'], 0.1336, delta=0.0002)
+
+ def test_ct_question_bucket10(self):
+ """
+ Checks the question-controlled model (z=10 boost) produces correct results.
+ """
+ _, valid, _ = testing_utils.eval_model({
+ 'model_file': 'models:controllable_dialogue/control_questionb11e10',
+ 'task': 'projects.controllable_dialogue.tasks.agents',
+ 'beam_size': 20,
+ 'beam_min_n_best': 10,
+ 'batchsize': 64,
+ 'use_reply': 'model',
+ 'num_examples': NUM_EXAMPLES,
+ 'weighted_decoding': 'extrep_nonstopword:-1e20,intrep_nonstopword:-1e20',
+ 'set_controls': 'question:10',
+ 'beam_reorder': 'best_extrep2gram_qn',
+ }, skip_test=True)
+
+ if FAST_MODE:
+ self.assertAlmostEqual(valid['ppl'], 31.27, delta=0.1)
+ self.assertAlmostEqual(valid['f1'], 0.1400, delta=0.0002)
+ else:
+ self.assertAlmostEqual(valid['ppl'], 30.26, delta=0.1)
+ self.assertAlmostEqual(valid['f1'], 0.1304, delta=0.0002)
+
+ def test_ct_specificity_bucket7(self):
+ """
+ Checks the specificity-CT model (z=7) produces correct results.
+ """
+ _, valid, _ = testing_utils.eval_model({
+ 'model_file': 'models:controllable_dialogue/control_avgnidf10b10e',
+ 'task': 'projects.controllable_dialogue.tasks.agents',
+ 'beam_size': 20,
+ 'beam_min_n_best': 10,
+ 'use_reply': 'model',
+ 'batchsize': 64,
+ 'num_examples': NUM_EXAMPLES,
+ 'weighted_decoding': NO_REPETITION,
+ 'set_controls': 'avg_nidf:7',
+ }, skip_test=True)
+
+ if FAST_MODE:
+ self.assertAlmostEqual(valid['ppl'], 38.64, delta=0.1)
+ self.assertAlmostEqual(valid['f1'], 0.1376, delta=0.0002)
+ else:
+ self.assertAlmostEqual(valid['ppl'], 37.03, delta=0.1)
+ self.assertAlmostEqual(valid['f1'], 0.1365, delta=0.0002)
+
+ def test_wd_specificity(self):
+ """
+ Checks the specificity-weighted decoding model produces correct results.
+ """
+ _, valid, _ = testing_utils.eval_model({
+ 'model_file': 'models:controllable_dialogue/convai2_finetuned_baseline',
+ 'task': 'projects.controllable_dialogue.tasks.agents',
+ 'beam_size': 20,
+ 'beam_min_n_best': 10,
+ 'use_reply': 'model',
+ 'batchsize': 64,
+ 'num_examples': NUM_EXAMPLES,
+ 'weighted_decoding': NO_REPETITION + ',nidf:4',
+ }, skip_test=True)
+
+ if FAST_MODE:
+ self.assertAlmostEqual(valid['ppl'], 25.74, delta=0.1)
+ self.assertAlmostEqual(valid['f1'], 0.1366, delta=0.0002)
+ else:
+ self.assertAlmostEqual(valid['ppl'], 25.57, delta=0.1)
+ self.assertAlmostEqual(valid['f1'], 0.1349, delta=0.0002)
+
+ def test_wd_responsiveness(self):
+ """
+ Checks the responsiveness-weighted decoding model produces correct results.
+ """
+ _, valid, _ = testing_utils.eval_model({
+ 'model_file': 'models:controllable_dialogue/convai2_finetuned_baseline',
+ 'task': 'projects.controllable_dialogue.tasks.agents',
+ 'beam_size': 20,
+ 'beam_min_n_best': 10,
+ 'use_reply': 'model',
+ 'batchsize': 64,
+ 'num_examples': NUM_EXAMPLES,
+ 'weighted_decoding': NO_REPETITION + ',intrep_2gram:-1e20,partnerrep_2gram:-1e20,lastuttsim:5' # noqa: E501
+ }, skip_test=True)
+
+ if FAST_MODE:
+ self.assertAlmostEqual(valid['ppl'], 26.16, delta=0.1)
+ self.assertAlmostEqual(valid['f1'], 0.1399, delta=0.0002)
+ else:
+ self.assertAlmostEqual(valid['ppl'], 25.47, delta=0.1)
+ self.assertAlmostEqual(valid['f1'], 0.1369, delta=0.0002)
+
+
+if __name__ == '__main__':
+ unittest.main()
| What makes a good conversation? How controllable attributes affect human judgments
Congratulations on NAACL and a great paper!
"All of our code, pretrained models, and full chatlogs, will be released open-source" - do you have a timeframe on that? Would love to try it out.
Thanks!
| cc @abisee
Hi @JohannesTK, thanks for your interest!
The NAACL camera-ready deadline is April 1, so we'll have finished our revised version of the paper by then. We hope to release the code, pretrained models and chatlogs shortly after that, though it usually takes a little while to clean everything up to the level that people can use it, so it might take 1-2 weeks after the camera-ready deadline.
Thanks for the update, @abisee.
Will be looking forward to it and hope you have a successful presentation at NAACL!
@abisee weighted decoding and conditional training are somewhat model-agnostic in that they should work with any encoder-decoder model, so would you be integrating the two approaches at the framework-level prior to the release? I noticed that you used a 2-layer LSTM with attention in your paper, but it would be ideal if your release works with any encoder-decoder model with minimal effort. Thanks!
Good paper, btw!
> The NAACL camera-ready deadline is April 1, so we'll have finished our revised version of the paper by then. We hope to release the code, pretrained models and chatlogs shortly after that, though it usually takes a little while to clean everything up to the level that people can use it, so it might take 1-2 weeks after the camera-ready deadline.
Update: Apologies, looks like my previous estimate for when this would be done was a bit over-optimistic. But I am now working on it and hope to have it done soon. Thanks for your patience! | 2019-04-30T07:31:15Z | [] | [] |
facebookresearch/ParlAI | 1,738 | facebookresearch__ParlAI-1738 | [
"1737"
] | 724f53942ad7ac6340e6cf8e7a531ac2ddf3e43a | diff --git a/parlai/core/torch_ranker_agent.py b/parlai/core/torch_ranker_agent.py
--- a/parlai/core/torch_ranker_agent.py
+++ b/parlai/core/torch_ranker_agent.py
@@ -69,7 +69,7 @@ def add_cmdline_args(cls, argparser):
def __init__(self, opt, shared=None):
# Must call _get_init_model() first so that paths are updated if necessary
# (e.g., a .dict file)
- init_model, _ = self._get_init_model(opt, shared)
+ init_model, is_finetune = self._get_init_model(opt, shared)
opt['rank_candidates'] = True
super().__init__(opt, shared)
@@ -115,7 +115,7 @@ def __init__(self, opt, shared=None):
optim_params,
states.get('optimizer'), states.get('optimizer_type')
)
- self.build_lr_scheduler(states)
+ self.build_lr_scheduler(states, hard_reset=is_finetune)
if shared is None and is_distributed():
self.model = torch.nn.parallel.DistributedDataParallel(
| diff --git a/tests/test_transformers.py b/tests/test_transformers.py
--- a/tests/test_transformers.py
+++ b/tests/test_transformers.py
@@ -368,9 +368,78 @@ def test_xlm(self):
)
+def test_learning_rate_resuming(self, args):
+ mdl = args['model']
+ with testing_utils.tempdir() as tmpdir:
+ model_file = os.path.join(tmpdir, 'model')
+ stdout1, valid1, test1 = testing_utils.train_model(dict(
+ model_file=model_file,
+ lr_scheduler='invsqrt',
+ **args,
+ ))
+ stdout2, valid2, test2 = testing_utils.train_model(dict(
+ model_file=model_file,
+ lr_scheduler='invsqrt',
+ **args,
+ ))
+ # make sure the number of updates is being tracked correctly
+ self.assertGreater(
+ valid2['num_updates'],
+ valid1['num_updates'],
+ '({}) Number of updates is not increasing'.format(mdl)
+ )
+ # make sure the learning rate is decreasing
+ self.assertLess(
+ valid2['lr'],
+ valid1['lr'],
+ '({}) Learning rate is not decreasing'.format(mdl)
+ )
+ # but make sure we're not loading the scheduler if we're fine
+ # tuning
+ stdout3, valid3, test3 = testing_utils.train_model(dict(
+ init_model=os.path.join(tmpdir, 'model'),
+ model_file=os.path.join(tmpdir, 'newmodel'),
+ lr_scheduler='invsqrt',
+ **args,
+ ))
+ self.assertEqual(
+ valid3['num_updates'],
+ valid1['num_updates'],
+ '({}) Finetuning LR scheduler reset failed '
+ '(num_updates).'.format(mdl)
+ )
+ self.assertEqual(
+ valid3['lr'],
+ valid1['lr'],
+ '({}) Finetuning LR scheduler reset failed '
+ '(lr).'.format(mdl)
+ )
+ # and make sure we're not loading the scheduler if it changes
+ stdout4, valid4, test4 = testing_utils.train_model(dict(
+ init_model=os.path.join(tmpdir, 'model'),
+ model_file=os.path.join(tmpdir, 'newmodel2'),
+ lr_scheduler='reduceonplateau',
+ **args
+ ))
+ self.assertEqual(
+ valid4['num_updates'],
+ valid1['num_updates'],
+ '({}) LR scheduler change reset failed (num_updates).'
+ '\n{}'.format(mdl, stdout4)
+ )
+ self.assertEqual(
+ valid4['lr'],
+ 1e-3,
+ '({}) LR is not correct in final resume.\n{}'.format(
+ mdl, stdout4)
+ )
+
+
class TestLearningRateScheduler(unittest.TestCase):
- def test_resuming(self):
- BASE_ARGS = dict(
+ """Test learning rate scheduler for both generative and ranking
+ transformers."""
+ def test_resuming_generator(self):
+ GENERATOR_ARGS = dict(
task='integration_tests:nocandidate',
model='transformer/generator',
optimizer='adamax',
@@ -384,66 +453,23 @@ def test_resuming(self):
skip_generation=True,
warmup_updates=1,
)
+ test_learning_rate_resuming(self, GENERATOR_ARGS)
- with testing_utils.tempdir() as tmpdir:
- model_file = os.path.join(tmpdir, 'model')
-
- stdout1, valid1, test1 = testing_utils.train_model(dict(
- model_file=model_file,
- lr_scheduler='invsqrt',
- **BASE_ARGS,
- ))
- stdout2, valid2, test2 = testing_utils.train_model(dict(
- model_file=model_file,
- lr_scheduler='invsqrt',
- **BASE_ARGS,
- ))
- # make sure the number of updates is being tracked correctly
- self.assertGreater(
- valid2['num_updates'],
- valid1['num_updates'],
- 'Number of updates is not increasing'
- )
- # make sure the learning rate is decreasing
- self.assertLess(
- valid2['lr'],
- valid1['lr'],
- 'Learning rate is not decreasing'
- )
- # but make sure we're not loading the scheduler if we're fine tuning
- stdout3, valid3, test3 = testing_utils.train_model(dict(
- init_model=os.path.join(tmpdir, 'model'),
- model_file=os.path.join(tmpdir, 'newmodel'),
- lr_scheduler='invsqrt',
- **BASE_ARGS,
- ))
- self.assertEqual(
- valid3['num_updates'],
- valid1['num_updates'],
- 'Finetuning LR scheduler reset failed (num_updates).'
- )
- self.assertEqual(
- valid3['lr'],
- valid1['lr'],
- 'Finetuning LR scheduler reset failed (lr).'
- )
- # and make sure we're not loading the scheduler if it changes
- stdout4, valid4, test4 = testing_utils.train_model(dict(
- init_model=os.path.join(tmpdir, 'model'),
- model_file=os.path.join(tmpdir, 'newmodel2'),
- lr_scheduler='reduceonplateau',
- **BASE_ARGS
- ))
- self.assertEqual(
- valid4['num_updates'],
- valid1['num_updates'],
- 'LR scheduler change reset failed (num_updates).\n' + stdout4
- )
- self.assertEqual(
- valid4['lr'],
- 1e-3,
- 'LR is not correct in final resume.\n' + stdout4
- )
+ def test_resuming_ranker(self):
+ RANKER_ARGS = dict(
+ task='integration_tests:candidate',
+ model='transformer/ranker',
+ optimizer='adamax',
+ learningrate=1e-3,
+ batchsize=32,
+ num_epochs=1,
+ n_layers=1,
+ n_heads=1,
+ ffn_size=32,
+ embedding_size=32,
+ warmup_updates=1,
+ )
+ test_learning_rate_resuming(self, RANKER_ARGS)
if __name__ == '__main__':
| lr scheduler does not reset during fine turning in local model
**Bug description**
When train_model.py is loading model with init-model argument with , lr scheduler seems to continue from the last learning rate recorded, based on model status stored in init-model location, rather than have a hard reset.
Notice that:
* When bert bi ranker model is first downloaded and loaded via --pretrained-path argument, the issue does NOT persist. Only when model is loaded locally.
* I am not sure this should be the expected behavior or not, I assume not. Main basis of my assumptions are based on @emilydinan advice at #823 for fine-turn. In addition, @stephenroller 's comment on #1734 indicates that LR scheduler are expected to be reset during fine-turn.
**Reproduction steps**
Model 1 is first trained locally on convai2 task (with bert bi ranker) and complete training. Later fine turning with --init-model argument point to model 1 path, combine with --model-file argument point to folder does not exist (and expect to create). The LR scheduler does not reset, but rather continue decay based on last learning rate recorded.
**Expected behavior**
LR scheduler expected to reset to default state based on arguments given at command line inputs.
**My silly opinions**
In my opinion, it should be expected to perform one of the followings:
* At command line interface for train_model.py, a new argument should be allow to pass in to control "hard_reset" on lr scheduler, or scheduler do automatically hard reset when --init-model paramter is used.
* At [torch ranker](https://github.com/facebookresearch/ParlAI/blob/46f656abf28c4fba2818c62a2199b25d484e0b41/parlai/core/torch_ranker_agent.py#L118), the "[hard_reset](https://github.com/facebookresearch/ParlAI/blob/master/parlai/core/torch_agent.py#L686)" parameter should be controlled or accessed from outside of ranker.
| You’re right. It’s actually okay in TGA:
https://github.com/facebookresearch/ParlAI/blob/72a17b3343f0c3b7ad8d7827d5a3d6a726ef377e/parlai/core/torch_generator_agent.py#L375 | 2019-06-03T14:12:56Z | [] | [] |
facebookresearch/ParlAI | 1,899 | facebookresearch__ParlAI-1899 | [
"1887"
] | 8efba770f7f34fd8a1b62f93e4dcc9f6ff2d7b60 | diff --git a/parlai/agents/bert_classifier/bert_classifier.py b/parlai/agents/bert_classifier/bert_classifier.py
--- a/parlai/agents/bert_classifier/bert_classifier.py
+++ b/parlai/agents/bert_classifier/bert_classifier.py
@@ -129,9 +129,7 @@ def upgrade_opt(cls, opt_on_disk):
def build_model(self):
"""Construct the model."""
num_classes = len(self.class_list)
- self.model = BertWrapper(
- BertModel.from_pretrained(self.pretrained_path), num_classes
- )
+ return BertWrapper(BertModel.from_pretrained(self.pretrained_path), num_classes)
def init_optim(self, params, optim_states=None, saved_optim_type=None):
"""Initialize the optimizer."""
diff --git a/parlai/agents/bert_ranker/bi_encoder_ranker.py b/parlai/agents/bert_ranker/bi_encoder_ranker.py
--- a/parlai/agents/bert_ranker/bi_encoder_ranker.py
+++ b/parlai/agents/bert_ranker/bi_encoder_ranker.py
@@ -57,7 +57,7 @@ def __init__(self, opt, shared=None):
self.rank_loss = torch.nn.CrossEntropyLoss(reduce=True, size_average=True)
def build_model(self):
- self.model = BiEncoderModule(self.opt)
+ return BiEncoderModule(self.opt)
@staticmethod
def dictionary_class():
diff --git a/parlai/agents/bert_ranker/cross_encoder_ranker.py b/parlai/agents/bert_ranker/cross_encoder_ranker.py
--- a/parlai/agents/bert_ranker/cross_encoder_ranker.py
+++ b/parlai/agents/bert_ranker/cross_encoder_ranker.py
@@ -51,7 +51,7 @@ def __init__(self, opt, shared=None):
self.END_IDX = self.dict.end_idx
def build_model(self):
- self.model = BertWrapper(
+ return BertWrapper(
BertModel.from_pretrained(self.pretrained_path),
1,
add_transformer_layer=self.opt['add_transformer_layer'],
diff --git a/parlai/agents/fairseq/fairseq.py b/parlai/agents/fairseq/fairseq.py
--- a/parlai/agents/fairseq/fairseq.py
+++ b/parlai/agents/fairseq/fairseq.py
@@ -312,6 +312,8 @@ def add_cmdline_args(cls, argparser):
# one last time, restore any user set defaults
argparser.set_defaults(**old_defaults)
+ # default weight decay in fairseq is zero not None
+ argparser.set_defaults(weight_decay=0.0)
@staticmethod
def dictionary_class():
@@ -347,7 +349,8 @@ def __init__(self, opt, shared=None):
# meters for keeping track of loss, ppl, etc.
self.meters = defaultdict(AverageMeter)
- # actually construct the model and generator
+ # actually construct the criterion, model and generator
+ self.criterion = self.build_criterion()
self.model = self.build_model()
# Construct the generator and scorer
@@ -365,9 +368,6 @@ def __init__(self, opt, shared=None):
)
self.scorer = SequenceScorer([self.model], self.dict)
- # set up the grader and the trainer
- self.criterion = criterions.build_criterion(self.args, self.task)
-
# TODO: we might choose to add a --no-fp16 opt in the future to
# explicitly disable fp16 instead
if not self.args.fp16 and torch.cuda.get_device_capability(0)[0] >= 7:
@@ -425,6 +425,11 @@ def build_model(self):
)
return model
+ def build_criterion(self):
+ """Set up the grader."""
+ # TorchAgent will call this without ready=True before self.args is ready
+ return criterions.build_criterion(self.args, self.task)
+
def share(self):
shared = super().share()
shared['model'] = self.model
diff --git a/parlai/agents/memnn/memnn.py b/parlai/agents/memnn/memnn.py
--- a/parlai/agents/memnn/memnn.py
+++ b/parlai/agents/memnn/memnn.py
@@ -97,7 +97,7 @@ def build_dictionary(self):
def build_model(self):
"""Build MemNN model."""
kwargs = opt_to_kwargs(self.opt)
- self.model = MemNN(
+ return MemNN(
len(self.dict),
self.opt['embedding_size'],
padding_idx=self.NULL_IDX,
diff --git a/parlai/agents/seq2seq/seq2seq.py b/parlai/agents/seq2seq/seq2seq.py
--- a/parlai/agents/seq2seq/seq2seq.py
+++ b/parlai/agents/seq2seq/seq2seq.py
@@ -160,7 +160,7 @@ def build_model(self, states=None):
states = {}
kwargs = opt_to_kwargs(opt)
- self.model = Seq2seq(
+ model = Seq2seq(
len(self.dict),
opt['embeddingsize'],
opt['hiddensize'],
@@ -176,43 +176,32 @@ def build_model(self, states=None):
print('skipping preinitialization of embeddings for bpe')
elif not states and opt['embedding_type'] != 'random':
# `not states`: only set up embeddings if not loading model
- self._copy_embeddings(self.model.decoder.lt.weight, opt['embedding_type'])
+ self._copy_embeddings(model.decoder.lt.weight, opt['embedding_type'])
if opt['lookuptable'] in ['unique', 'dec_out']:
# also set encoder lt, since it's not shared
self._copy_embeddings(
- self.model.encoder.lt.weight, opt['embedding_type'], log=False
+ model.encoder.lt.weight, opt['embedding_type'], log=False
)
if states:
# set loaded states if applicable
- self.model.load_state_dict(states['model'])
-
- if self.use_cuda:
- self.model.cuda()
+ model.load_state_dict(states['model'])
if opt['embedding_type'].endswith('fixed'):
print('Seq2seq: fixing embedding weights.')
- self.model.decoder.lt.weight.requires_grad = False
- self.model.encoder.lt.weight.requires_grad = False
+ model.decoder.lt.weight.requires_grad = False
+ model.encoder.lt.weight.requires_grad = False
if opt['lookuptable'] in ['dec_out', 'all']:
- self.model.decoder.e2s.weight.requires_grad = False
-
- if self.use_cuda:
- self.model.cuda()
+ model.output.weight.requires_grad = False
- return self.model
+ return model
def build_criterion(self):
# set up criteria
if self.opt.get('numsoftmax', 1) > 1:
- self.criterion = nn.NLLLoss(ignore_index=self.NULL_IDX, size_average=False)
+ return nn.NLLLoss(ignore_index=self.NULL_IDX, reduction='sum')
else:
- self.criterion = nn.CrossEntropyLoss(
- ignore_index=self.NULL_IDX, size_average=False
- )
-
- if self.use_cuda:
- self.criterion.cuda()
+ return nn.CrossEntropyLoss(ignore_index=self.NULL_IDX, reduction='sum')
def batchify(self, *args, **kwargs):
"""Override batchify options for seq2seq."""
diff --git a/parlai/agents/transformer/biencoder.py b/parlai/agents/transformer/biencoder.py
--- a/parlai/agents/transformer/biencoder.py
+++ b/parlai/agents/transformer/biencoder.py
@@ -8,16 +8,10 @@
class BiencoderAgent(TransformerRankerAgent):
- """ Equivalent of bert_ranker/biencoder but does not rely on an external
- library (hugging face).
"""
-
- def __init__(self, opt, shared=None):
- super().__init__(opt, shared)
- # favor average instead of sum for the loss.
- self.rank_loss = torch.nn.CrossEntropyLoss(reduce=True, size_average=True)
- if self.use_cuda:
- self.rank_loss.cuda()
+ Equivalent of bert_ranker/biencoder but does not rely on an external
+ library (hugging face).
+ """
def vectorize(self, *args, **kwargs):
""" Add the start and end token to the text.
diff --git a/parlai/agents/transformer/crossencoder.py b/parlai/agents/transformer/crossencoder.py
--- a/parlai/agents/transformer/crossencoder.py
+++ b/parlai/agents/transformer/crossencoder.py
@@ -17,9 +17,6 @@ class CrossencoderAgent(TorchRankerAgent):
def __init__(self, opt, shared=None):
super().__init__(opt, shared)
- self.rank_loss = torch.nn.CrossEntropyLoss(reduce=True, size_average=True)
- if self.use_cuda:
- self.rank_loss.cuda()
self.data_parallel = opt.get('data_parallel') and self.use_cuda
if self.data_parallel:
from parlai.core.distributed_utils import is_distributed
@@ -35,8 +32,7 @@ def add_cmdline_args(cls, argparser):
return argparser
def build_model(self, states=None):
- self.model = CrossEncoderModule(self.opt, self.dict, self.NULL_IDX)
- return self.model
+ return CrossEncoderModule(self.opt, self.dict, self.NULL_IDX)
def vectorize(self, *args, **kwargs):
""" Add the start and end token to the text.
diff --git a/parlai/agents/transformer/polyencoder.py b/parlai/agents/transformer/polyencoder.py
--- a/parlai/agents/transformer/polyencoder.py
+++ b/parlai/agents/transformer/polyencoder.py
@@ -86,8 +86,7 @@ def __init__(self, opt, shared=None):
self.model = torch.nn.DataParallel(self.model)
def build_model(self, states=None):
- self.model = PolyEncoderModule(self.opt, self.dict, self.NULL_IDX)
- return self.model
+ return PolyEncoderModule(self.opt, self.dict, self.NULL_IDX)
def vectorize(self, *args, **kwargs):
""" Add the start and end token to the labels.
diff --git a/parlai/agents/transformer/transformer.py b/parlai/agents/transformer/transformer.py
--- a/parlai/agents/transformer/transformer.py
+++ b/parlai/agents/transformer/transformer.py
@@ -188,12 +188,14 @@ def _score(self, output, cands):
)
def build_model(self, states=None):
- self.model = TransformerMemNetModel(self.opt, self.dict)
+ model = TransformerMemNetModel(self.opt, self.dict)
if self.opt['embedding_type'] != 'random':
- self._copy_embeddings(
- self.model.embeddings.weight, self.opt['embedding_type']
- )
- return self.model
+ self._copy_embeddings(model.embeddings.weight, self.opt['embedding_type'])
+ return model
+
+ def build_criterion(self):
+ """Build and return criterion, favoring average instead of sum for the loss."""
+ return torch.nn.CrossEntropyLoss(reduction='mean')
def batchify(self, obs_batch, sort=False):
"""Override so that we can add memories to the Batch object."""
@@ -264,11 +266,9 @@ def add_cmdline_args(cls, argparser):
return agent
def build_model(self, states=None):
- self.model = TransformerGeneratorModel(self.opt, self.dict)
+ model = TransformerGeneratorModel(self.opt, self.dict)
if self.opt['embedding_type'] != 'random':
self._copy_embeddings(
- self.model.encoder.embeddings.weight, self.opt['embedding_type']
+ model.encoder.embeddings.weight, self.opt['embedding_type']
)
- if self.use_cuda:
- self.model.cuda()
- return self.model
+ return model
diff --git a/parlai/core/torch_agent.py b/parlai/core/torch_agent.py
--- a/parlai/core/torch_agent.py
+++ b/parlai/core/torch_agent.py
@@ -650,10 +650,22 @@ def __init__(self, opt, shared=None):
"""Initialize agent."""
super().__init__(opt, shared)
opt = self.opt
+
+ # check for cuda
+ self.use_cuda = not opt['no_cuda'] and torch.cuda.is_available()
+ if self.use_cuda:
+ if not shared:
+ print('[ Using CUDA ]')
+ if not shared and opt['gpu'] != -1:
+ torch.cuda.set_device(opt['gpu'])
+ # indicate whether using fp16
+ self.fp16 = self.use_cuda and self.opt.get('fp16', False)
+
if not shared:
# intitialize any important structures from scratch
self.replies = {} # past replies
self.dict = self.build_dictionary()
+
if opt.get('fp16'):
# Volta cores revert to FP32 hardware if tensors are not multiples
# of 8 in all dimensions. This INCLUDES the embeddings layer! As
@@ -674,6 +686,8 @@ def __init__(self, opt, shared=None):
# copy initialized data from shared table
self.opt = shared['opt']
self.dict = shared['dict']
+ self.model = shared['model']
+ self.criterion = shared['criterion']
self.metrics = shared['metrics']
if self.opt['batchsize'] == 1:
# if we're not using batching (e.g. mturk), then replies really need
@@ -685,16 +699,6 @@ def __init__(self, opt, shared=None):
if opt.get('numthreads', 1) > 1:
torch.set_num_threads(1)
- # check for cuda
- self.use_cuda = not opt['no_cuda'] and torch.cuda.is_available()
- if self.use_cuda:
- if not shared:
- print('[ Using CUDA ]')
- if not shared and opt['gpu'] != -1:
- torch.cuda.set_device(opt['gpu'])
- # indicate whether using fp16
- self.fp16 = self.use_cuda and self.opt.get('fp16', False)
-
# Default to the class name, sans "Agent". child can override
self.id = type(self).__name__.replace("Agent", "")
@@ -783,6 +787,10 @@ def _get_init_model(self, opt, shared):
return init_model, is_finetune
+ def build_model(self):
+ """Construct the model and return it."""
+ raise NotImplementedError('not implemented for this class')
+
def init_optim(self, params, optim_states=None, saved_optim_type=None):
"""
Initialize optimizer with model parameters.
@@ -1153,8 +1161,8 @@ def share(self):
shared['metrics'] = self.metrics
shared['dict'] = self.dict
- if hasattr(self, 'model'):
- shared['model'] = self.model
+ shared['model'] = self.model
+ shared['criterion'] = self.criterion
shared['opt'] = self.opt
shared['replies'] = self.replies
return shared
diff --git a/parlai/core/torch_classifier_agent.py b/parlai/core/torch_classifier_agent.py
--- a/parlai/core/torch_classifier_agent.py
+++ b/parlai/core/torch_classifier_agent.py
@@ -125,18 +125,23 @@ def __init__(self, opt, shared=None):
self.threshold = None
# set up model and optimizers
- weight_tensor = torch.FloatTensor(self.class_weights)
- self.classifier_loss = torch.nn.CrossEntropyLoss(weight_tensor)
+
if shared:
self.model = shared['model']
else:
- self.build_model()
+ self.model = self.build_model()
+ self.criterion = self.build_criterion()
+ if self.model is None or self.criterion is None:
+ raise AttributeError(
+ 'build_model() and build_criterion() need to return the model or criterion'
+ )
+ if self.use_cuda:
+ self.model.cuda()
+ self.critieron.cuda()
if init_model:
print('Loading existing model parameters from ' + init_model)
self.load(init_model)
if self.use_cuda:
- self.model.cuda()
- self.classifier_loss.cuda()
if self.opt['data_parallel']:
if is_distributed():
raise ValueError(
@@ -152,6 +157,10 @@ def __init__(self, opt, shared=None):
self.init_optim(optim_params)
self.build_lr_scheduler()
+ def build_criterion(self):
+ weight_tensor = torch.FloatTensor(self.class_weights)
+ return torch.nn.CrossEntropyLoss(weight_tensor)
+
def share(self):
"""Share model parameters."""
shared = super().share()
@@ -214,7 +223,7 @@ def train_step(self, batch):
# calculate loss
labels = self._get_labels(batch)
scores = self.score(batch)
- loss = self.classifier_loss(scores, labels)
+ loss = self.criterion(scores, labels)
loss.backward()
self.update_params()
@@ -251,7 +260,7 @@ def eval_step(self, batch):
preds = self._format_interactive_output(probs, prediction_id)
else:
labels = self._get_labels(batch)
- loss = self.classifier_loss(scores, labels)
+ loss = self.criterion(scores, labels)
self.metrics['loss'] += loss.item()
self.metrics['examples'] += len(batch.text_vec)
self._update_confusion_matrix(batch, preds)
@@ -349,7 +358,3 @@ def score(self, batch):
class.
"""
raise NotImplementedError('Abstract class: user must implement score()')
-
- def build_model(self):
- """Build a new model (implemented by children classes)."""
- raise NotImplementedError('Abstract class: user must implement build_model()')
diff --git a/parlai/core/torch_generator_agent.py b/parlai/core/torch_generator_agent.py
--- a/parlai/core/torch_generator_agent.py
+++ b/parlai/core/torch_generator_agent.py
@@ -352,8 +352,6 @@ def __init__(self, opt, shared=None):
if shared:
# set up shared properties
- self.model = shared['model']
- self.criterion = shared['criterion']
states = shared.get('states', {})
else:
# Note: we cannot change the type of metrics ahead of time, so you
@@ -372,8 +370,16 @@ def __init__(self, opt, shared=None):
)
print('[ Saving dot beam logs in {} ]'.format(self.beam_dot_dir))
- self.build_criterion()
- self.build_model()
+ self.criterion = self.build_criterion()
+ self.model = self.build_model()
+ if self.model is None or self.criterion is None:
+ raise AttributeError(
+ 'build_model() and build_criterion() need to return the model or criterion'
+ )
+ if self.use_cuda:
+ self.model.cuda()
+ self.criterion.cuda()
+
check_synced_parameters(self.model)
print("Total parameters: {}".format(self._total_parameters()))
print("Trainable parameters: {}".format(self._trainable_parameters()))
@@ -410,6 +416,16 @@ def __init__(self, opt, shared=None):
self.reset()
+ def build_criterion(self):
+ """
+ Construct and return the loss function.
+
+ By default torch.nn.CrossEntropyLoss.
+
+ If overridden, this model should produce a sum that can be used for a per-token loss.
+ """
+ return torch.nn.CrossEntropyLoss(ignore_index=self.NULL_IDX, reduction='sum')
+
def _v2t(self, vec):
"""Convert token indices to string of tokens."""
new_vec = []
@@ -431,32 +447,6 @@ def set_interactive_mode(self, mode, shared=False):
else:
self.skip_generation = self.opt.get('skip_generation', False)
- @abstractmethod
- def build_model(self):
- """
- Construct the model.
-
- The model should be set to self.model, and support
- the TorchGeneratorModel interface.
- """
- pass
-
- def build_criterion(self):
- """
- Construct the loss function.
-
- By default torch.nn.CrossEntropyLoss. The criterion function should be
- set to self.criterion.
-
- If overridden, this model should (1) handle calling cuda and (2)
- produce a sum that can be used for a per-token loss.
- """
- self.criterion = nn.CrossEntropyLoss(
- ignore_index=self.NULL_IDX, reduction='sum'
- )
- if self.use_cuda:
- self.criterion.cuda()
-
def _dummy_batch(self, batchsize, maxlen):
"""
Create a dummy batch.
@@ -503,7 +493,6 @@ def reset_metrics(self):
def share(self):
"""Share internal states between parent and child instances."""
shared = super().share()
- shared['criterion'] = self.criterion
if self.opt.get('numthreads', 1) > 1:
shared['states'] = { # don't share optimizer states
'optimizer_type': self.opt['optimizer']
diff --git a/parlai/core/torch_ranker_agent.py b/parlai/core/torch_ranker_agent.py
--- a/parlai/core/torch_ranker_agent.py
+++ b/parlai/core/torch_ranker_agent.py
@@ -135,7 +135,6 @@ def __init__(self, opt, shared=None):
super().__init__(opt, shared)
if shared:
- self.model = shared['model']
states = None
else:
# Note: we cannot change the type of metrics ahead of time, so you
@@ -146,7 +145,16 @@ def __init__(self, opt, shared=None):
self.metrics['mrr'] = 0.0
self.metrics['train_accuracy'] = 0.0
- self.build_model()
+ self.criterion = self.build_criterion()
+ self.model = self.build_model()
+ if self.model is None or self.criterion is None:
+ raise AttributeError(
+ 'build_model() and build_criterion() need to return the model or criterion'
+ )
+ if self.use_cuda:
+ self.model.cuda()
+ self.criterion.cuda()
+
if self.fp16:
self.model = self.model.half()
if init_model:
@@ -156,10 +164,6 @@ def __init__(self, opt, shared=None):
states = {}
self.rank_top_k = opt.get('rank_top_k', -1)
- self.rank_loss = nn.CrossEntropyLoss(reduce=True, size_average=False)
- if self.use_cuda:
- self.model.cuda()
- self.rank_loss.cuda()
# Vectorize and save fixed/vocab candidates once upfront if applicable
self.set_fixed_candidates(shared)
@@ -181,6 +185,14 @@ def __init__(self, opt, shared=None):
self.model, device_ids=[self.opt['gpu']], broadcast_buffers=False
)
+ def build_criterion(self):
+ """
+ Construct and return the loss function.
+
+ By default torch.nn.CrossEntropyLoss.
+ """
+ return torch.nn.CrossEntropyLoss(reduction='sum')
+
def set_interactive_mode(self, mode, shared=False):
self.candidates = self.opt['candidates']
if mode:
@@ -236,11 +248,6 @@ def score_candidates(self, batch, cand_vecs, cand_encs=None):
"""
pass
- @abstractmethod
- def build_model(self):
- """Build a new model (implemented by children classes)."""
- pass
-
def _get_batch_train_metrics(self, scores):
"""
Get fast metrics calculations if we train with batch candidates.
@@ -327,7 +334,7 @@ def train_step(self, batch):
)
try:
scores = self.score_candidates(batch, cand_vecs)
- loss = self.rank_loss(scores, label_inds)
+ loss = self.criterion(scores, label_inds)
self.backward(loss)
self.update_params()
except RuntimeError as e:
@@ -392,7 +399,7 @@ def eval_step(self, batch):
# Update metrics
if label_inds is not None:
- loss = self.rank_loss(scores, label_inds)
+ loss = self.criterion(scores, label_inds)
self.metrics['loss'] += loss.item()
self.metrics['examples'] += batchsize
for b in range(batchsize):
diff --git a/projects/controllable_dialogue/controllable_seq2seq/controllable_seq2seq.py b/projects/controllable_dialogue/controllable_seq2seq/controllable_seq2seq.py
--- a/projects/controllable_dialogue/controllable_seq2seq/controllable_seq2seq.py
+++ b/projects/controllable_dialogue/controllable_seq2seq/controllable_seq2seq.py
@@ -330,7 +330,15 @@ def __init__(self, opt, shared=None):
print('[ Loading existing model params from {} ]' ''.format(init_model))
states = self.load(init_model)
- self._init_model(states=states)
+ self.model = self.build_model(states=states)
+ if self.use_cuda:
+ self.model.cuda()
+ if self.multigpu:
+ self.model = torch.nn.DataParallel(self.model)
+ self.model.encoder = self.model.module.encoder
+ self.model.decoder = self.model.module.decoder
+ self.model.longest_label = self.model.module.longest_label
+ self.model.output = self.model.module.output
# set up criteria
if opt.get('numsoftmax', 1) > 1:
@@ -383,12 +391,12 @@ def _add_control(self, states):
# weights) to states
states['model'][key] = init_decoder_ih_l0
- def _init_model(self, states=None):
+ def build_model(self, states=None):
"""Initialize model, override to change model setup."""
opt = self.opt
kwargs = opt_to_kwargs(opt)
- self.model = Seq2seq(
+ model = Seq2seq(
len(self.dict),
opt['embeddingsize'],
opt['hiddensize'],
@@ -404,11 +412,11 @@ def _init_model(self, states=None):
print('skipping preinitialization of embeddings for bpe')
elif not states and opt['embedding_type'] != 'random':
# `not states`: only set up embeddings if not loading model
- self._copy_embeddings(self.model.decoder.lt.weight, opt['embedding_type'])
+ self._copy_embeddings(model.decoder.lt.weight, opt['embedding_type'])
if opt['lookuptable'] in ['unique', 'dec_out']:
# also set encoder lt, since it's not shared
self._copy_embeddings(
- self.model.encoder.lt.weight, opt['embedding_type'], log=False
+ model.encoder.lt.weight, opt['embedding_type'], log=False
)
if states:
@@ -416,25 +424,16 @@ def _init_model(self, states=None):
self._add_control(states)
# set loaded states if applicable
- self.model.load_state_dict(states['model'])
+ model.load_state_dict(states['model'])
if opt['embedding_type'].endswith('fixed'):
print('Seq2seq: fixing embedding weights.')
- self.model.decoder.lt.weight.requires_grad = False
- self.model.encoder.lt.weight.requires_grad = False
+ model.decoder.lt.weight.requires_grad = False
+ model.encoder.lt.weight.requires_grad = False
if opt['lookuptable'] in ['dec_out', 'all']:
- self.model.decoder.e2s.weight.requires_grad = False
+ model.decoder.output.e2s.weight.requires_grad = False
- if self.use_cuda:
- self.model.cuda()
- if self.multigpu:
- self.model = torch.nn.DataParallel(self.model)
- self.model.encoder = self.model.module.encoder
- self.model.decoder = self.model.module.decoder
- self.model.longest_label = self.model.module.longest_label
- self.model.output = self.model.module.output
-
- return self.model
+ return model
def _init_controls(self):
"""
diff --git a/projects/self_feeding/self_feeding_agent.py b/projects/self_feeding/self_feeding_agent.py
--- a/projects/self_feeding/self_feeding_agent.py
+++ b/projects/self_feeding/self_feeding_agent.py
@@ -502,7 +502,7 @@ def dialog_step(self, batch):
if label_inds is None:
loss = None
else:
- loss = self.rank_loss(scores, label_inds)
+ loss = self.criterion(scores, label_inds)
self.update_dia_metrics(loss, ranks, label_inds, batchsize)
return loss, preds, cand_ranked
@@ -536,7 +536,7 @@ def feedback_step(self, batch):
if label_inds is None:
loss = None
else:
- loss = self.rank_loss(scores, label_inds)
+ loss = self.criterion(scores, label_inds)
self.update_fee_metrics(loss, ranks, label_inds, batchsize)
return loss, preds, cand_ranked
| diff --git a/tests/test_torch_agent.py b/tests/test_torch_agent.py
--- a/tests/test_torch_agent.py
+++ b/tests/test_torch_agent.py
@@ -70,6 +70,17 @@ def txt2vec(self, txt):
class TorchAgent(TorchAgent):
"""Use MockDict instead of regular DictionaryAgent."""
+ def __init__(self, opt, shared=None):
+ self.model = self.build_model()
+ self.criterion = self.build_criterion()
+ super().__init__(opt, shared)
+
+ def build_model(self):
+ return {}
+
+ def build_criterion(self):
+ return {}
+
@staticmethod
def dictionary_class():
"""Replace normal dictionary class with mock one."""
| Move build_model from TGA/TCA/TRA to TA
Currently, the `build_model` functions in each of Torch(Ranker/Classifier/Generator)Agent are redundant; we should relocate this function to the top-level TorchAgent from which all of these agents are subclassed.
| 2019-08-07T10:56:59Z | [] | [] |
|
facebookresearch/ParlAI | 1,934 | facebookresearch__ParlAI-1934 | [
"1652"
] | c1e35bac34e7336b794f3ac48b100f9b17d14e86 | diff --git a/parlai/core/torch_generator_agent.py b/parlai/core/torch_generator_agent.py
--- a/parlai/core/torch_generator_agent.py
+++ b/parlai/core/torch_generator_agent.py
@@ -287,10 +287,16 @@ def add_cmdline_args(cls, argparser):
)
agent.add_argument(
'--inference',
- choices={'beam', 'greedy'},
+ choices={'beam', 'greedy', 'topk', 'nucleus'},
default='greedy',
help='Generation algorithm',
)
+ agent.add_argument(
+ '--topk', type=int, default=10, help='K used in Top K sampling'
+ )
+ agent.add_argument(
+ '--topp', type=float, default=0.9, help='p used in nucleus sampling'
+ )
super(TorchGeneratorAgent, cls).add_cmdline_args(argparser)
return agent
@@ -626,6 +632,28 @@ def _treesearch_factory(self, device):
eos_token=self.END_IDX,
device=device,
)
+ elif method == 'topk':
+ return TopKSampling(
+ self.opt['topk'],
+ beam_size,
+ min_length=self.beam_min_length,
+ min_n_best=self.beam_min_n_best,
+ padding_token=self.NULL_IDX,
+ bos_token=self.START_IDX,
+ eos_token=self.END_IDX,
+ device=device,
+ )
+ elif method == 'nucleus':
+ return NucleusSampling(
+ self.opt['topp'],
+ beam_size,
+ min_length=self.beam_min_length,
+ min_n_best=self.beam_min_n_best,
+ padding_token=self.NULL_IDX,
+ bos_token=self.START_IDX,
+ eos_token=self.END_IDX,
+ device=device,
+ )
else:
raise ValueError(f"Can't use inference method {method}")
@@ -986,3 +1014,66 @@ def select_paths(self, logprobs, prior_scores):
tok_ids = best_idxs % voc_size
return (hyp_ids, tok_ids, best_scores)
+
+
+class TopKSampling(TreeSearch):
+ """
+ Top-K sampling (Fan et al., 2018).
+
+ Samples from a truncated distribution where only the most probable K words
+ are considered at each time.
+
+ Typical values of k are 2, 10, 50.
+
+ See https://arxiv.org/abs/1805.04833 for details.
+ """
+
+ def __init__(self, k, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.k = k
+
+ def select_paths(self, logprobs, prior_scores):
+ values, indices = logprobs.topk(self.k, dim=-1)
+ probs = torch.softmax(values, dim=-1)
+ choices = torch.multinomial(probs, 1)[:, 0]
+ hyp_ids = torch.arange(logprobs.size(0)).to(logprobs.device)
+ tok_ids = indices[hyp_ids, choices]
+ scores = values[hyp_ids, choices]
+ best_scores = prior_scores.expand_as(scores) + scores
+ return (hyp_ids, tok_ids, best_scores)
+
+
+class NucleusSampling(TreeSearch):
+ """
+ Nucelus, aka top-p sampling (Holtzman et al., 2019).
+
+ Samples from a truncated distribution which covers a fixed CDF proportion
+ of the original distribution.
+
+ Typical values of p are 0.3 and 0.9.
+
+ See https://arxiv.org/abs/1904.09751 for details.
+ """
+
+ def __init__(self, p, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.p = p
+
+ def select_paths(self, logprobs, prior_scores):
+ # Unlike the other treesearch methods, we have to switch to linspace
+ # for the probabilities in order to compute the CDF.
+ probs = torch.softmax(logprobs, dim=-1)
+ sprobs, sinds = probs.sort(dim=-1, descending=True)
+ # The subtraction here is so that we always include the first word to
+ # go over p. For example, if the most probable token has a prob of 0.5, and
+ # p = 0.3, then we need still need to include that first token.
+ mask = (sprobs.cumsum(dim=-1) - sprobs[:, :1]) >= self.p
+ sprobs[mask] = 0
+ sprobs.div_(sprobs.sum(dim=-1).unsqueeze(1))
+ choices = torch.multinomial(sprobs, 1)[:, 0]
+ hyp_ids = torch.arange(logprobs.size(0)).to(logprobs.device)
+ tok_ids = sinds[hyp_ids, choices]
+ # Convert back to logspace.
+ scores = sprobs[hyp_ids, choices].log()
+ best_scores = prior_scores.expand_as(scores) + scores
+ return (hyp_ids, tok_ids, best_scores)
| diff --git a/tests/test_transformers.py b/tests/test_transformers.py
--- a/tests/test_transformers.py
+++ b/tests/test_transformers.py
@@ -4,6 +4,8 @@
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
+"""Test many variants of transformers."""
+
import os
import unittest
import parlai.core.testing_utils as testing_utils
@@ -14,6 +16,7 @@ class TestTransformerRanker(unittest.TestCase):
@testing_utils.retry(ntries=3)
def test_repeater(self):
+ """Test a simple repeat-after-me model."""
stdout, valid, test = testing_utils.train_model(
dict(
task='integration_tests:candidate',
@@ -45,6 +48,7 @@ def test_repeater(self):
)
def test_resuming(self):
+ """Test saving and resuming training."""
with testing_utils.tempdir() as tmpdir:
model_file = os.path.join(tmpdir, 'model')
@@ -86,9 +90,10 @@ def test_resuming(self):
)
def test_resuming_reduce_on_plateau(self):
- """ Reduce on Plateau can be tricky when combined
- with warmup. See:
- https://github.com/facebookresearch/ParlAI/pull/1812
+ """
+ Reduce on Plateau can be tricky when combined with warmup.
+
+ See: https://github.com/facebookresearch/ParlAI/pull/1812
"""
with testing_utils.tempdir() as tmpdir:
model_file = os.path.join(tmpdir, 'model')
@@ -125,9 +130,7 @@ def test_resuming_reduce_on_plateau(self):
)
def test_backcomp(self):
- """
- Tests that the transformer ranker model files continue to work over time.
- """
+ """Tests that the transformer ranker model files continue to work over time."""
testing_utils.download_unittest_models()
stdout, valid, test = testing_utils.eval_model(
@@ -169,6 +172,7 @@ def test_backcomp(self):
@testing_utils.retry(ntries=3)
def test_xlm(self):
+ """Test --variant xlm."""
stdout, valid, test = testing_utils.train_model(
dict(
task='integration_tests:candidate',
@@ -243,6 +247,7 @@ class TestTransformerGenerator(unittest.TestCase):
@testing_utils.retry(ntries=3)
def test_greedysearch(self):
+ """Test greedy search."""
stdout, valid, test = testing_utils.train_model(
dict(
task='integration_tests:nocandidate',
@@ -277,6 +282,7 @@ def test_greedysearch(self):
@testing_utils.retry(ntries=3)
def test_beamsearch(self):
+ """Test beamsearch."""
stdout, valid, test = testing_utils.train_model(
dict(
task='integration_tests:nocandidate',
@@ -309,10 +315,50 @@ def test_beamsearch(self):
test['bleu'], 0.95, "test bleu = {}\nLOG:\n{}".format(test['bleu'], stdout)
)
+ def test_nucleus(self):
+ """Test nucleus generation."""
+ # Nucleus is inherently stochastic, just ensure no crash.
+ testing_utils.train_model(
+ dict(
+ task='integration_tests:nocandidate',
+ model='transformer/generator',
+ optimizer='adamax',
+ learningrate=7e-3,
+ batchsize=32,
+ num_epochs=20,
+ n_layers=1,
+ n_heads=1,
+ ffn_size=32,
+ embedding_size=32,
+ inference='nucleus',
+ topp=0.3,
+ beam_size=5,
+ )
+ )
+
+ def test_topk(self):
+ """Test topk generation."""
+ # Topk is inherently stochastic, just ensure no crash.
+ testing_utils.train_model(
+ dict(
+ task='integration_tests:nocandidate',
+ model='transformer/generator',
+ optimizer='adamax',
+ learningrate=7e-3,
+ batchsize=32,
+ num_epochs=20,
+ n_layers=1,
+ n_heads=1,
+ ffn_size=32,
+ embedding_size=32,
+ inference='topk',
+ topk=5,
+ beam_size=5,
+ )
+ )
+
def test_generator_backcomp(self):
- """
- Tests that the generator model files work over time.
- """
+ """Tests that the generator model files work over time."""
testing_utils.download_unittest_models()
stdout, valid, test = testing_utils.eval_model(
@@ -379,6 +425,7 @@ def test_badinput(self):
@testing_utils.retry(ntries=3)
def test_xlm(self):
+ """Test --variant xlm."""
stdout, valid, test = testing_utils.train_model(
dict(
task='integration_tests:nocandidate',
@@ -417,6 +464,7 @@ def test_xlm(self):
def test_learning_rate_resuming(self, args):
+ """Test learning rate resumes correctly."""
mdl = args['model']
with testing_utils.tempdir() as tmpdir:
model_file = os.path.join(tmpdir, 'model')
@@ -481,10 +529,10 @@ def test_learning_rate_resuming(self, args):
class TestLearningRateScheduler(unittest.TestCase):
- """Test learning rate scheduler for both generative and ranking
- transformers."""
+ """Test learning rate scheduler for both generative and ranking transformers."""
def test_resuming_generator(self):
+ """Test generators resume correctly."""
GENERATOR_ARGS = dict(
task='integration_tests:nocandidate',
model='transformer/generator',
@@ -502,6 +550,7 @@ def test_resuming_generator(self):
test_learning_rate_resuming(self, GENERATOR_ARGS)
def test_resuming_ranker(self):
+ """Test resuming learning rate for the ranker."""
RANKER_ARGS = dict(
task='integration_tests:candidate',
model='transformer/ranker',
| Add Top-k sampling (and stochastic sampling) to beam search
Currently we only support beam search in Torch Generator Agent. Top-K sampling is a popular strategy introduced by [Fan et al., 2018](https://arxiv.org/abs/1805.04833) which should be added.
We should aim to implement this as a somewhat clean API so as to decouple it from the Torch Generator Agent nicely.
| 2019-08-18T19:19:22Z | [] | [] |
|
facebookresearch/ParlAI | 2,037 | facebookresearch__ParlAI-2037 | [
"1999"
] | 9dc28ff4e06a16568b8943ff1d639508473c2d7b | diff --git a/parlai/core/agents.py b/parlai/core/agents.py
--- a/parlai/core/agents.py
+++ b/parlai/core/agents.py
@@ -498,27 +498,6 @@ def load_agent_module(opt):
model_class = get_agent_module(new_opt['model'])
- if hasattr(model_class, 'upgrade_opt'):
- new_opt = model_class.upgrade_opt(new_opt)
-
- # add model arguments to new_opt if they aren't in new_opt already
- for k, v in opt.items():
- if k not in new_opt:
- new_opt[k] = v
- new_opt['model_file'] = model_file
- if not new_opt.get('dict_file'):
- new_opt['dict_file'] = model_file + '.dict'
- elif new_opt.get('dict_file') and not os.path.isfile(new_opt['dict_file']):
- old_dict_file = new_opt['dict_file']
- new_opt['dict_file'] = model_file + '.dict'
- if not os.path.isfile(new_opt['dict_file']):
- warn_once(
- 'WARNING: Neither the specified dict file ({}) nor the '
- '`model_file`.dict file ({}) exists, check to make sure either '
- 'is correct. This may manifest as a shape mismatch later '
- 'on.'.format(old_dict_file, new_opt['dict_file'])
- )
-
# check for model version
if hasattr(model_class, 'model_version'):
curr_version = new_opt.get('model_version', 0)
@@ -542,6 +521,27 @@ def load_agent_module(opt):
m.format(m='modelname', v=curr_version, c='ModelAgent')
)
+ if hasattr(model_class, 'upgrade_opt'):
+ new_opt = model_class.upgrade_opt(new_opt)
+
+ # add model arguments to new_opt if they aren't in new_opt already
+ for k, v in opt.items():
+ if k not in new_opt:
+ new_opt[k] = v
+ new_opt['model_file'] = model_file
+ if not new_opt.get('dict_file'):
+ new_opt['dict_file'] = model_file + '.dict'
+ elif new_opt.get('dict_file') and not os.path.isfile(new_opt['dict_file']):
+ old_dict_file = new_opt['dict_file']
+ new_opt['dict_file'] = model_file + '.dict'
+ if not os.path.isfile(new_opt['dict_file']):
+ warn_once(
+ 'WARNING: Neither the specified dict file ({}) nor the '
+ '`model_file`.dict file ({}) exists, check to make sure either '
+ 'is correct. This may manifest as a shape mismatch later '
+ 'on.'.format(old_dict_file, new_opt['dict_file'])
+ )
+
# if we want to load weights from --init-model, compare opts with
# loaded ones
compare_init_model_opts(opt, new_opt)
| diff --git a/parlai/core/testing_utils.py b/parlai/core/testing_utils.py
--- a/parlai/core/testing_utils.py
+++ b/parlai/core/testing_utils.py
@@ -13,6 +13,7 @@
import tempfile
import shutil
import io
+from typing import Tuple
try:
@@ -302,6 +303,30 @@ def display_data(opt):
return (train_output.getvalue(), valid_output.getvalue(), test_output.getvalue())
+def display_model(opt) -> Tuple[str, str]:
+ """
+ Run display_model.py.
+
+ :return: (stdout_valid, stdout_test)
+ """
+ import parlai.scripts.display_model as dm
+
+ parser = dm.setup_args()
+ parser.set_params(**opt)
+ popt = parser.parse_args(print_args=False)
+ with capture_output() as train_output:
+ # evalmode so that we don't hit train_step
+ popt['datatype'] = 'train:evalmode:stream'
+ dm.display_model(popt)
+ with capture_output() as valid_output:
+ popt['datatype'] = 'valid:stream'
+ dm.display_model(popt)
+ with capture_output() as test_output:
+ popt['datatype'] = 'test:stream'
+ dm.display_model(popt)
+ return (train_output.getvalue(), valid_output.getvalue(), test_output.getvalue())
+
+
def download_unittest_models():
"""Download the unittest pretrained models."""
from parlai.core.params import ParlaiParser
diff --git a/tests/test_torch_agent.py b/tests/test_torch_agent.py
--- a/tests/test_torch_agent.py
+++ b/tests/test_torch_agent.py
@@ -982,5 +982,27 @@ def get_opt(init_mf, mf):
self.assertFalse(is_finetune)
+class TestLegacyVersioning(unittest.TestCase):
+ def test_legacy_version(self):
+ # simply tries to load and run some models with versioning attached
+ with self.assertRaises(RuntimeError):
+ testing_utils.display_model(
+ {
+ 'model_file': 'models:convai2/seq2seq/convai2_self_seq2seq_model',
+ 'task': 'convai2',
+ 'no_cuda': True,
+ }
+ )
+
+ testing_utils.display_model(
+ {
+ 'model': 'legacy:seq2seq:0',
+ 'model_file': 'models:convai2/seq2seq/convai2_self_seq2seq_model',
+ 'task': 'convai2',
+ 'no_cuda': True,
+ }
+ )
+
+
if __name__ == '__main__':
unittest.main()
| Legacy seq2seq doesn't output version requirement
**Bug description**
Attempting to evaluate the pretrained seq2seq convai2 model results in a bug.
**Reproduction steps**
`python examples/eval_model.py -mf models:convai2/seq2seq/convai2_self_seq2seq_model -t convai2`
**Expected behavior**
The model evaluates
**Logs**
Please paste the command line output:
```
python examples/eval_model.py -mf models:convai2/seq2seq/convai2_self_seq2seq_model -t convai2
[ warning: overriding opt['model_file'] to /Users/kshuster/ParlAI/data/models/convai2/seq2seq/convai2_self_seq2seq_model (previously: /Users/edinan/ParlAI/data/models/convai2/seq2seq/convai2_self_seq2seq_model )]
[ warning: overriding opt['task'] to convai2 (previously: convai2:self:no_cands )]
Traceback (most recent call last):
File "examples/eval_model.py", line 17, in <module>
eval_model(opt, print_parser=parser)
File "/Users/kshuster/ParlAI/parlai/scripts/eval_model.py", line 108, in eval_model
agent = create_agent(opt, requireModelExists=True)
File "/Users/kshuster/ParlAI/parlai/core/agents.py", line 683, in create_agent
model = load_agent_module(opt)
File "/Users/kshuster/ParlAI/parlai/core/agents.py", line 502, in load_agent_module
new_opt = model_class.upgrade_opt(new_opt)
File "/Users/kshuster/ParlAI/parlai/core/torch_generator_agent.py", line 248, in upgrade_opt
assert 'beam_size' in opt_from_disk
AssertionError
```
| Why has this not been picked up by the gpu nightly test?
No idea, but I can still repro on master
It's because it needs `-m legacy:seq2seq:0`
So the real problem is the version checker isn't occurring early enough.
I'd really like to just get rid of the legacy copies. | 2019-10-02T13:50:57Z | [] | [] |
facebookresearch/ParlAI | 2,072 | facebookresearch__ParlAI-2072 | [
"2046"
] | 9cfcd60b4f8822cdb38f362fcd17855e839f876e | diff --git a/parlai/core/torch_agent.py b/parlai/core/torch_agent.py
--- a/parlai/core/torch_agent.py
+++ b/parlai/core/torch_agent.py
@@ -39,7 +39,6 @@
warn_once,
round_sigfigs,
)
-from parlai.core.distributed_utils import is_primary_worker
try:
import torch
@@ -999,7 +998,7 @@ def report(self):
if hasattr(self, 'scheduler') and self.scheduler is not None:
current_lr = round_sigfigs(self.optimizer.param_groups[0]['lr'], 4)
metrics['lr'] = round_sigfigs(current_lr, 4)
- metrics['num_updates'] = self._number_training_updates
+ metrics['total_train_updates'] = self._number_training_updates
steps = self.metrics['updates']
if steps > 0 and self.opt.get('gradient_clip', -1) > 0:
@@ -1013,7 +1012,7 @@ def report(self):
def _gpu_usage(self):
"""
- Computes GPU memory usage.
+ Compute GPU memory usage.
Includes both allocated and cached memory; this should be close to the
output of nvidia-smi, but not reflect of how much is currently demanded
diff --git a/parlai/zoo/model_list.py b/parlai/zoo/model_list.py
--- a/parlai/zoo/model_list.py
+++ b/parlai/zoo/model_list.py
@@ -248,7 +248,7 @@
"-mf zoo:light/biranker_dialogue/model"
),
"result": "{'exs': 6623, 'accuracy': 0.7586, 'f1': 0.7802, 'hits@1': 0.759, 'hits@5': 0.965," # noqa: E501
- "'hits@10': 0.994, 'hits@100': 1.0, 'bleu': 0.7255, 'lr': 5e-05, 'num_updates': 15050," # noqa: E501
+ "'hits@10': 0.994, 'hits@100': 1.0, 'bleu': 0.7255, 'lr': 5e-05, 'total_train_updates': 15050," # noqa: E501
"'examples': 6623, 'loss': 5307.0, 'mean_loss': 0.8013, 'mean_rank': 1.599, 'train_accuracy': 0}", # noqa: E501
},
{
@@ -318,7 +318,7 @@
),
"result2": (
"[ Finished evaluating tasks ['convai2'] using datatype valid ]\n"
- "{'exs': 7801, 'accuracy': 0.8942, 'f1': 0.9065, 'hits@1': 0.894, 'hits@5': 0.99, 'hits@10': 0.997, 'hits@100': 1.0, 'bleu': 0.8941, 'lr': 5e-09, 'num_updates': 0, 'examples': 7801, 'loss': 3004.0, 'mean_loss': 0.385, 'mean_rank': 1.234, 'mrr': 0.9359}"
+ "{'exs': 7801, 'accuracy': 0.8942, 'f1': 0.9065, 'hits@1': 0.894, 'hits@5': 0.99, 'hits@10': 0.997, 'hits@100': 1.0, 'bleu': 0.8941, 'lr': 5e-09, 'total_train_updates': 0, 'examples': 7801, 'loss': 3004.0, 'mean_loss': 0.385, 'mean_rank': 1.234, 'mrr': 0.9359}"
),
},
{
@@ -348,7 +348,7 @@
),
"result2": (
"[ Finished evaluating tasks ['convai2'] using datatype valid ]\n"
- "{'exs': 7801, 'accuracy': 0.8686, 'f1': 0.8833, 'hits@1': 0.869, 'hits@5': 0.987, 'hits@10': 0.996, 'hits@100': 1.0, 'bleu': 0.8685, 'lr': 5e-09, 'num_updates': 0, 'examples': 7801, 'loss': 28.77, 'mean_loss': 0.003688, 'mean_rank': 1.301, 'mrr': 0.9197}"
+ "{'exs': 7801, 'accuracy': 0.8686, 'f1': 0.8833, 'hits@1': 0.869, 'hits@5': 0.987, 'hits@10': 0.996, 'hits@100': 1.0, 'bleu': 0.8685, 'lr': 5e-09, 'total_train_updates': 0, 'examples': 7801, 'loss': 28.77, 'mean_loss': 0.003688, 'mean_rank': 1.301, 'mrr': 0.9197}"
),
},
{
@@ -423,7 +423,7 @@
"--round 3 -dt test -mf zoo:dialogue_safety/single_turn/model -bs 40"
),
"result": (
- "{'exs': 3000, 'accuracy': 0.9627, 'f1': 0.9627, 'bleu': 9.627e-10, 'lr': 5e-09, 'num_updates': 0, 'examples': 3000, 'mean_loss': 0.005441, 'class___notok___recall': 0.7833, 'class___notok___prec': 0.8333, 'class___notok___f1': 0.8076, 'class___ok___recall': 0.9826, 'class___ok___prec': 0.9761, 'class___ok___f1': 0.9793, 'weighted_f1': 0.9621}"
+ "{'exs': 3000, 'accuracy': 0.9627, 'f1': 0.9627, 'bleu': 9.627e-10, 'lr': 5e-09, 'total_train_updates': 0, 'examples': 3000, 'mean_loss': 0.005441, 'class___notok___recall': 0.7833, 'class___notok___prec': 0.8333, 'class___notok___f1': 0.8076, 'class___ok___recall': 0.9826, 'class___ok___prec': 0.9761, 'class___ok___f1': 0.9793, 'weighted_f1': 0.9621}"
),
},
{
@@ -440,7 +440,7 @@
"python examples/eval_model.py -t dialogue_safety:multiturn -dt test -mf zoo:dialogue_safety/multi_turn/model --split-lines True -bs 40"
),
"result": (
- "{'exs': 3000, 'accuracy': 0.9317, 'f1': 0.9317, 'bleu': 9.317e-10, 'lr': 5e-09, 'num_updates': 0, 'examples': 3000, 'mean_loss': 0.008921, 'class___notok___recall': 0.7067, 'class___notok___prec': 0.6444, 'class___notok___f1': 0.6741, 'class___ok___recall': 0.9567, 'class___ok___prec': 0.9671, 'class___ok___f1': 0.9618, 'weighted_f1': 0.9331}"
+ "{'exs': 3000, 'accuracy': 0.9317, 'f1': 0.9317, 'bleu': 9.317e-10, 'lr': 5e-09, 'total_train_updates': 0, 'examples': 3000, 'mean_loss': 0.008921, 'class___notok___recall': 0.7067, 'class___notok___prec': 0.6444, 'class___notok___f1': 0.6741, 'class___ok___recall': 0.9567, 'class___ok___prec': 0.9671, 'class___ok___f1': 0.9618, 'weighted_f1': 0.9331}"
),
},
]
| diff --git a/tests/test_transformers.py b/tests/test_transformers.py
--- a/tests/test_transformers.py
+++ b/tests/test_transformers.py
@@ -80,8 +80,8 @@ def test_resuming(self):
)
# make sure the number of updates is being tracked correctly
self.assertGreater(
- valid2['num_updates'],
- valid1['num_updates'],
+ valid2['total_train_updates'],
+ valid1['total_train_updates'],
'Number of updates is not increasing',
)
# make sure the learning rate is decreasing
@@ -476,8 +476,8 @@ def test_learning_rate_resuming(self, args):
)
# make sure the number of updates is being tracked correctly
self.assertGreater(
- valid2['num_updates'],
- valid1['num_updates'],
+ valid2['total_train_updates'],
+ valid1['total_train_updates'],
'({}) Number of updates is not increasing'.format(mdl),
)
# make sure the learning rate is decreasing
@@ -497,9 +497,10 @@ def test_learning_rate_resuming(self, args):
)
)
self.assertEqual(
- valid3['num_updates'],
- valid1['num_updates'],
- '({}) Finetuning LR scheduler reset failed ' '(num_updates).'.format(mdl),
+ valid3['total_train_updates'],
+ valid1['total_train_updates'],
+ '({}) Finetuning LR scheduler reset failed '
+ '(total_train_updates).'.format(mdl),
)
self.assertEqual(
valid3['lr'],
@@ -516,9 +517,9 @@ def test_learning_rate_resuming(self, args):
)
)
self.assertEqual(
- valid4['num_updates'],
- valid1['num_updates'],
- '({}) LR scheduler change reset failed (num_updates).'
+ valid4['total_train_updates'],
+ valid1['total_train_updates'],
+ '({}) LR scheduler change reset failed (total_train_updates).'
'\n{}'.format(mdl, stdout4),
)
self.assertEqual(
| num_updates reported at validation time
## Bug description
I apologize in advance, I have not looked deep into the code. This is a quick bug report based on what I saw.
Bug: It is observed in logs that the model log "num_of_update" that is not zero. The bug makes me very hesitate to report the accuracy I am getting in my paper. For example, this is a line from testing log:
"222s elapsed: {'exs': 7300, '%done': '98.01%', 'time_left': '4s', 'accuracy': 0.4058, 'f1': 0.4307, 'hits@1': 0.406, 'hits@5': 0.719, 'hits@10': 0.88, 'hits@100': 1.0, 'bleu': 0.2901, 'lr': 5e-10, **'num_updates': 92797**, 'examples': 7300, 'loss': 949.2, 'mean_loss': 0.13, 'mean_rank': 4.308, 'mrr': 0.5503}"
The value should be zero, according to the definition of the testing, otherwise it would indicate during testing, the model has been updated.
See line [here](https://github.com/facebookresearch/ParlAI/blob/a0fe44646be2e8064b476b347e09cc1b20121e4b/parlai/core/torch_agent.py#L1002) in report function.
`metrics['num_updates'] = self._number_training_updates`
## Reproduction steps
Performed fine-turn on my own dataset. The bug is observed during testing.
Notice that the bug is also expected, but not supposed to be, in the BERT Light model. See [here](https://github.com/facebookresearch/ParlAI/blob/56513cade9d9a0cffddfc9b731d2e898ba10af7f/parlai/zoo/model_list.py#L251) and [here](https://parl.ai/docs/zoo.html?highlight=num_updates#light-bert-biranker-dialogue-model) in docs.
## Expected behavior
In testing, the num_updates should = 0.
## Additional context
Apologize again. I have not look into the code much deeper. Sorry can not be much help, u got to dig a bit.
In my opinion. Two possible situations are muturally exclusive, and may happened in this case:
* The model is indeed updating during testing. Which is very bad. The log is printing correctly as training update. See line [here](https://github.com/facebookresearch/ParlAI/blob/a0fe44646be2e8064b476b347e09cc1b20121e4b/parlai/core/torch_agent.py#L1002).
* The model is not updating, but somehow logs the num_of_update. Which in this case should not log that at all.
Thanks in advance!
| tl;dr: everything is okay.
I appreciate the filing. The metric is misleading. In this case, `num_updates` is the TOTAL NUMBER of completed SGD steps we have completed. It is monotonically increasing, but should never increase during validation/test time.
As a point of comparison, run `eval_model.py -t yourtask -mf yourmodelfile -ltim 2`. You'll notice that `num_updates` is printed on every line, but never changes.
I find it useful to log because it gives me a point of comparison when I'm running sweeps to see if two models have had similar number of updates.
I'll discuss with my colleagues and decide if we want to keep the reporting.
Cool thanks for your reply and the quick update! :laughing:
To remove ambiguity, we'll rename it to `total_train_updates`. | 2019-10-08T21:36:21Z | [] | [] |
facebookresearch/ParlAI | 2,226 | facebookresearch__ParlAI-2226 | [
"2173"
] | 3a5de86667816abbdb0f08049c86d1e2c3aff03e | diff --git a/parlai/agents/fairseq/__init__.py b/parlai/agents/fairseq/__init__.py
deleted file mode 100644
--- a/parlai/agents/fairseq/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/usr/bin/env python3
-
-# Copyright (c) Facebook, Inc. and its affiliates.
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
diff --git a/parlai/agents/fairseq/fairseq.py b/parlai/agents/fairseq/fairseq.py
deleted file mode 100644
--- a/parlai/agents/fairseq/fairseq.py
+++ /dev/null
@@ -1,731 +0,0 @@
-#!/usr/bin/env python3
-
-# Copyright (c) Facebook, Inc. and its affiliates.
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-"""
-ParlAI has limited support for using models from `Fairseq.
-
-<https://github.com/pytorch/fairseq>`_. Fairseq often supports more experimental seq2seq
-architectures with fast fp16 training.
-
-Fairseq models can be used for many default tasks by combining a
-``--arch`` flag. For example:
-
-`python -m parlai.scripts.train -t convai2 -m fairseq -a transformer`
-"""
-
-
-from parlai.core.dict import DictionaryAgent
-from parlai.utils.misc import argsort, padded_tensor
-
-try:
- from fairseq import models, optim, criterions
-
- # this is a hack around versioning check because fairseq doesn't
- # announce version numbers yet
- # fairseq 0.5.0 has fp16_trainer, 0.6.0 does not
- try:
- from fairseq import fp16_trainer # noqa: F401
- except ImportError:
- pass
- else:
- raise ImportError
-except ImportError:
- raise ImportError(
- "Please run \"pip install -U 'git+https://github.com/pytorch/"
- "[email protected]#egg=fairseq'\""
- )
-from fairseq import trainer
-from fairseq.sequence_generator import SequenceGenerator
-from fairseq.sequence_scorer import SequenceScorer
-from fairseq import options
-from fairseq.tasks.fairseq_task import FairseqTask
-from fairseq.utils import convert_padding_direction, load_model_state
-from fairseq.meters import AverageMeter
-
-from parlai.core.torch_agent import TorchAgent, Output
-from parlai.core.build_data import modelzoo_path
-from parlai.utils.misc import round_sigfigs
-
-import argparse
-import torch
-import os
-import numpy as np
-import json
-from collections import defaultdict
-
-
-# If a model file is loaded, these arguments may NOT be overridden in the
-# command line:
-NON_OVERRIDABLE_ARGS = {
- 'arch',
- 'encoder_embed_dim',
- 'encoder_layers',
- 'decoder_embed_dim',
- 'decoder_layers',
- 'decoder_out_embed_dim',
- 'decoder_attention',
-}
-
-
-def _fairseq_opt_wrapper(opt, skip_pretrained_embedding_loading=False):
- """
- Marshalls from a dict to a argparse.Namespace object for API compatibility.
-
- Also does some necessary post-processing needed for fairseq. Optionally can
- override pretrained embedding options, which is useful if we're just loading
- a model from a checkpoint.
-
- :param opt: dict. ParlAI options passed around from everywhere.
- :param skip_pretrained_embedding_loading: bool. Don't preload word embeddings.
- :return: an argparse.Namespace object for use in fairseq-py.
- """
- args = argparse.Namespace()
-
- # first set args according to ParlAI options
- for key in opt:
- if opt[key] is not None:
- setattr(args, key, opt[key])
-
- # at this point the user *must* have specified an arch
- if not hasattr(args, "arch"):
- raise ValueError("--arch/-a must be specified")
- # fill in default options from the model
- models.ARCH_CONFIG_REGISTRY[args.arch](args)
-
- # post processing of args. See
- # https://github.com/pytorch/fairseq/blob/v0.5.0/fairseq/options.py#L95
- if hasattr(args, "lr"):
- args.lr = options.eval_str_list(args.lr, type=float)
- if hasattr(args, "update_freq"):
- args.update_freq = options.eval_str_list(args.update_freq, int)
- if hasattr(args, "max_sentences_valid"):
- args.max_sentences_valid = args.max_sentences
- if args.truncate == -1:
- # some torch agents use positional embeddings, which must have a max length
- args.truncate = 1024
- if not hasattr(args, "max_source_positions"):
- # fairseq uses a different name for this CLI parameter
- # Sometimes it's set in model defaults, but not for all models
- args.max_source_positions = args.truncate
- # if we don't have source lengths, we don't have target lengths
- args.max_target_positions = args.truncate
-
- # handle modelzoo if possible
- for k in ("encoder_embed_path", "decoder_embed_path"):
- if getattr(args, k, None) is None:
- # not an argument for this model, pretrained embeddings don't matter
- continue
- elif skip_pretrained_embedding_loading:
- # if we want to skip pretrained, then hide the option from fairseq
- setattr(args, k, None)
- else:
- # otherwise we may need to modelzoo adjust the path for fairseq
- import warnings
-
- warnings.warn("We recommend using --embedding-type instead")
- setattr(args, k, modelzoo_path(opt.get("datapath"), getattr(args, k)))
-
- # Here we hardcode a few options that we currently do not support
- # turn off distributed training
- args.distributed_world_size = 1
- args.distributed_rank = 0
-
- return args, vars(args)
-
-
-class _FairseqDictionary(DictionaryAgent):
- """
- Skeleton dictionary class needed for interaction with fairseq-py.
-
- This class mostly just adds some basic API behavior that Fairseq internally
- expects from dictionaries.
-
- It also inserts a fake token at the 0th index of the dictionary, as
- fairseq-py maintains backwards compatibility with fairseq-lua, which uses
- 1 indexing.
- """
-
- # Name of our fake lua compatibility token
- _LUA = '__LUACOMPAT__'
-
- def __init__(self, *args, **kwargs):
- super().__init__(*args, **kwargs)
- # insert the fairseq-lua compatibility token to emulate 1-indexing.
- # This 1-indexing assumption is baked into a couple of places in fairseq-py,
- # and is unavoidable at the moment.
- #
- # Because of the structure of DictionaryAgent, it's difficult to force
- # a token in the 0th position without breaking load()ing. I've found
- # this to be the best way.
-
- # add the token to the dictionary
- self.add_token(_FairseqDictionary._LUA)
- # force it to be the "most frequent" token
- self.freq[_FairseqDictionary._LUA] = self.freq[self.null_token] + 1
- # sort the list to ensure the lua token is placed first. trim=False to
- # ensure shuffle is non-destructive.
- self.sort(trim=False)
-
- def pad(self):
- return self.pad_index
-
- def eos(self):
- return self[self.end_token]
-
- def unk(self):
- return self[self.unk_token]
-
- @property
- def pad_index(self):
- return self[self.null_token]
-
- @property
- def eos_index(self):
- return self[self.end_token]
-
- @property
- def bos_index(self):
- return self[self.start_token]
-
- @property
- def unk_index(self):
- return self[self.unk_token]
-
- def add_symbol(self):
- raise NotImplementedError("This is a fake class")
-
- @property
- def symbols(self):
- return self.tok2ind.keys()
-
-
-class _ParlaiTask(FairseqTask):
- """
- Skeleton task class needed for interaction with fairseq-py.
- """
-
- def __init__(self, dictionary):
- self.dict = dictionary
-
- @property
- def target_dictionary(self):
- return self.dict
-
- @property
- def source_dictionary(self):
- return self.dict
-
-
-class FairseqAgent(TorchAgent):
- """
- Generic wrapper around fairseq for use in ParlAI.
- """
-
- metrics = {}
-
- @classmethod
- def add_cmdline_args(cls, argparser):
- """
- Add command-line arguments specifically for this agent.
- """
- # first we need to add the general torch agent operations
- super(FairseqAgent, cls).add_cmdline_args(argparser)
-
- # let's store any defaults that were overridden
- old_defaults = argparser._defaults
- if 'clip_norm' not in old_defaults:
- # fairseq has a few awful defaults
- old_defaults['clip_norm'] = 1.0
- if 'optimizer' not in old_defaults:
- old_defaults['optimizer'] = 'adam'
- old_defaults['adam_betas'] = '(0.9,0.98)'
-
- agent = argparser.add_argument_group('Fairseq Arguments')
- agent.add_argument(
- '--fp16', default=False, type='bool', help='Use fp16 training'
- )
- agent.add_argument(
- '--fp16-init-scale',
- default=2 ** 7,
- type=int,
- help='default FP16 loss scale',
- )
- agent.add_argument(
- '--seed',
- default=1,
- type=int,
- metavar='N',
- help='pseudo random number generator seed',
- )
- agent.add_argument(
- '--skip-generation',
- default=False,
- type='bool',
- metavar='BOOL',
- help='Skips test time beam search. Much faster if you only need PPL',
- )
-
- # Check subargs for generation, optimizers, criterions, archs, etc
- options.add_generation_args(argparser)
- options.add_optimization_args(argparser)
- options.add_checkpoint_args(argparser)
-
- # restore any user set defaults that fairseq possibly overrode
- argparser.set_defaults(**old_defaults)
- known_args = argparser.parse_known_args(nohelp=True)[0]
-
- if hasattr(known_args, "optimizer"):
- optimizer = known_args.optimizer
- opt_group = argparser.add_argument_group(
- '{} optimizer arguments'.format(optimizer)
- )
- optim.OPTIMIZER_REGISTRY[optimizer].add_args(opt_group)
- if hasattr(known_args, "lr_scheduler"):
- lr_scheduler = known_args.lr_scheduler
- lr_group = argparser.add_argument_group(
- '{} scheduler arguments'.format(lr_scheduler)
- )
- optim.lr_scheduler.LR_SCHEDULER_REGISTRY[lr_scheduler].add_args(lr_group)
- # We need to find out the fairseq model-specific options, so grab the
- # architecture stuff and look up its options
- arch_group = options.add_model_args(argparser)
- # Fairseq marks the arch flag as required, but it may be specified
- # by a saved model cache, so we do some weird stuff to undo that
- for a in arch_group._actions:
- if a.dest == "arch":
- a.required = False
- a.default = None
- break
-
- # once again restore any user-set defaults
- argparser.set_defaults(**old_defaults)
- known_args = argparser.parse_known_args(nohelp=True)[0]
-
- if hasattr(known_args, "arch") and known_args.arch is not None:
- arch = known_args.arch
- arch_group = argparser.add_argument_group(
- "{} architecture arguments".format(arch)
- )
- models.ARCH_MODEL_REGISTRY[arch].add_args(arch_group)
-
- if hasattr(known_args, "criterion"):
- crit_group = argparser.add_argument_group(
- '{} criterion arguments'.format(known_args.criterion)
- )
- criterions.CRITERION_REGISTRY[known_args.criterion].add_args(crit_group)
-
- # one last time, restore any user set defaults
- argparser.set_defaults(**old_defaults)
- # default weight decay in fairseq is zero not None
- argparser.set_defaults(weight_decay=0.0)
-
- @staticmethod
- def dictionary_class():
- # Force use of the Fairseq Dictionary
- return _FairseqDictionary
-
- def __init__(self, opt, shared=None):
- # In general use a basic TorchAgent wherever possible
- super().__init__(opt, shared)
- if not shared:
- # this is not a shared instance of this class, so do full initialization
-
- # check early if we're going to be loading the model from a checkpoint
- model_file_exists = self.opt.get('model_file') and os.path.isfile(
- self.opt['model_file']
- )
-
- # fairseq expects options to be in argparse format, instead of a dict
- # We also need to do some argument postprocessing and whatnot
- # We'll skip pretrained embeddings if we're going to override them with
- # a model checkpoint anyway
- self.args, self.opt = _fairseq_opt_wrapper(opt, model_file_exists)
-
- # seed the RNG
- torch.manual_seed(self.args.seed)
-
- # Just some identifying info
- self.id = "fairseq:{}".format(self.args.arch)
-
- # We need a placeholder task for fairseq
- self.task = _ParlaiTask(self.dict)
-
- # meters for keeping track of loss, ppl, etc.
- self.meters = defaultdict(AverageMeter)
-
- # actually construct the criterion, model and generator
- self.criterion = self.build_criterion()
- self.model = self.build_model()
-
- # Construct the generator and scorer
- self.generator = SequenceGenerator(
- [self.model],
- tgt_dict=self.dict,
- beam_size=self.args.beam,
- stop_early=(not self.args.no_early_stop),
- normalize_scores=(not self.args.unnormalized),
- len_penalty=self.args.lenpen,
- unk_penalty=self.args.unkpen,
- sampling=self.args.sampling,
- sampling_topk=self.args.sampling_topk,
- sampling_temperature=self.args.sampling_temperature,
- )
- self.scorer = SequenceScorer([self.model], self.dict)
-
- # TODO: we might choose to add a --no-fp16 opt in the future to
- # explicitly disable fp16 instead
- if not self.args.fp16 and torch.cuda.get_device_capability(0)[0] >= 7:
- print("Heads up: using --fp16 could be a lot faster!")
- if self.use_cuda:
- self.trainer = trainer.Trainer(
- self.args, self.task, self.model, self.criterion, None
- )
- self.trainer._build_optimizer()
- else:
- self.trainer = None
-
- # if the model already existed, let's preload it and the trainer
- if model_file_exists:
- print('Loading existing model params from ' + self.opt['model_file'])
- self.load(self.opt.get('model_file'))
-
- # move things to the GPU if possible
- if self.use_cuda:
- self.model = self.model.cuda()
- self.generator = self.generator.cuda()
- else:
- self.model = shared['model']
- self.trainer = shared['trainer']
- self.generator = shared['generator']
- self.dict = shared['dict']
- self.args = shared['args']
- self.meters = shared['meters']
-
- # Start things off clean
- self.reset()
-
- def _check_opts_unchanged(self, saved_opts, current_opts):
- """
- Verify that critical options do not differ in command line vs saved model.
- """
- for k in NON_OVERRIDABLE_ARGS:
- if k not in saved_opts or k not in current_opts:
- # if it's not an option needed by this fairseq model, don't stress
- continue
- if saved_opts[k] != current_opts[k]:
- raise ValueError(
- '{} cannot be overridden when --model-file is specified'.format(k)
- )
-
- def build_model(self):
- """
- Construct the actual Fairseq model.
-
- Default implementation is to use Fairseq's arch builder, but this method may be
- overridden to build custom models.
- """
- model_class = models.ARCH_MODEL_REGISTRY[self.args.arch]
- model = model_class.build_model(self.args, self.task)
- if self.args.embedding_type != 'random':
- self._copy_embeddings(
- model.encoder.embed_tokens.weight, self.args.embedding_type
- )
- return model
-
- def build_criterion(self):
- """
- Set up the grader.
- """
- # TorchAgent will call this without ready=True before self.args is ready
- return criterions.build_criterion(self.args, self.task)
-
- def share(self):
- shared = super().share()
- shared['model'] = self.model
- shared['trainer'] = self.trainer
- shared['generator'] = self.generator
- shared['dict'] = self.dict
- shared['args'] = self.args
- shared['meters'] = self.meters
- return shared
-
- def save(self, path):
- """
- Save using fairseq's checkpointing.
- """
- if not path:
- return
- self.trainer.save_checkpoint(path, {'opt': self.opt, 'epoch': 0})
- # Parlai expects options to also be saved
- with open(path + '.opt', 'w') as handle:
- # overridden options shouldn't be stored, only the main ones
- if 'override' in self.opt:
- del self.opt['override']
- json.dump(self.opt, handle)
-
- # force save the dict
- self.dict.save(path + '.dict', sort=False)
-
- def load(self, path):
- """
- Load using fairseq's checkpointing.
- """
- if self.trainer:
- old_options = self.trainer.load_checkpoint(path, self.args.reset_optimizer)
- self._check_opts_unchanged(old_options, self.opt)
- else:
- load_model_state(path, self.model)
-
- def shutdown(self):
- if not hasattr(self, 'trainer'):
- # looks like this is a "fake" model that isn't actually used for batch_act.
- # we don't need to save this one.
- return
- super().shutdown()
-
- def reset(self):
- """
- Reset observation and episode_done.
- """
- super().reset()
- self.reset_metrics()
-
- def is_valid(self, obs):
- """
- Override from TorchAgent.
-
- Check if an observation has no tokens in it.
- """
- return len(obs.get('text_vec', [])) > 0
-
- def batchify(self, obs_batch):
- """
- Override parent batchify to set requirements for fairseq.
-
- Fairseq depends on sorted batch inputs for a call to rnn.pad_packed_sequence.
- Fairseq models cannot handle zero length sentences
- """
- return super().batchify(obs_batch, sort=True)
-
- def _update_metrics(self, metrics, sample):
- if metrics is None:
- # probably got an overflow in fp16 mode. don't count this sample
- return
-
- bsz = len(sample['target'])
- ntok = sample['ntokens']
- ssize = metrics['sample_size']
-
- for k, v in metrics.items():
- if k in {'ntokens', 'nsentences', 'sample_size'}:
- # don't need these
- continue
- elif k == "nll_loss":
- # nll loss is always normalized by ntokens
- self.meters[k].update(v, ntok)
- elif k == "loss":
- # loss is explicitly normalized by passed up sample size
- self.meters[k].update(v, ssize)
- else:
- # assume everything else it's averaged over bsz
- self.meters[k].update(v, bsz)
-
- def train_step(self, batch):
- """
- Process batch of inputs and targets and train on them.
-
- :param batch: parlai.core.torch_agent.Batch, contains tensorized
- version of observations.
- """
- if batch.text_vec is None:
- return
- self.is_training = True
- sample = self._make_sample(batch)
- self.model.train()
- metrics = self.trainer.train_step([sample])
- self._update_metrics(metrics, sample)
-
- def eval_step(self, batch):
- """
- Process batch of inputs.
-
- If the batch includes labels, calculate validation metrics as well.
- If --skip-generation is not set, return a prediction for each input.
-
- :param batch: parlai.core.torch_agent.Batch, contains tensorized
- version of observations.
- """
- if batch.text_vec is None:
- return
- self.is_training = False
- samples = self._make_sample(batch)
- self.model.eval()
- if batch.label_vec is not None and self.trainer is not None:
- # Interactive mode won't have a gold label
- metrics = self.trainer.valid_step(samples)
- self._update_metrics(metrics, samples)
-
- # Output placeholders
- reranked_cands = None
- generated_output = None
-
- # Grade each of the candidate sequences
- if batch.candidate_vecs is not None:
- bsz = len(batch.text_vec)
- reranked_cands = []
- # score the candidates for each item in the batch separately, so that
- # we can support variable number of candidates
- for i in range(bsz):
- cands = batch.candidate_vecs[i]
- if not cands:
- reranked_cands.append(None)
- continue
- ncand = len(cands)
- # repeat the input many times
- xs = batch.text_vec[i].unsqueeze(0).expand(ncand, -1)
- # some models crash if there's leading padding on every example
- xs = xs[:, : batch.text_lengths[i]]
- # and appropriately pack the outputs
- ys, _ = padded_tensor(cands, self.NULL_IDX, self.use_cuda)
- s = self._make_sample(xs=xs, ys=ys)
- # perform the actual grading, extract the scores
- scored = list(self.scorer.score_batched_itr([s], cuda=self.use_cuda))
- scores = [s[3][0]['score'].item() for s in scored]
- # intentional hanging comma here; argsort returns a list
- (ranked,) = argsort(scores, batch.candidates[i], descending=True)
- reranked_cands.append(ranked)
-
- # Next generate freely to create our response
- if not self.args.skip_generation:
- generated_output = self._generate(samples)
- elif reranked_cands:
- # we're skiping generation, but we're also grading candidates
- # so output the highest ranked candidate
- # In the case of zero candidates, we don't have something to rank,
- # so we may need to pass on that None
- generated_output = [
- ranked and ranked[0] or None for ranked in reranked_cands
- ]
- else:
- # no output at all
- pass
-
- return Output(generated_output, reranked_cands)
-
- def _generate(self, samples):
- no_prev_token = {
- k: v for k, v in samples['net_input'].items() if k != 'prev_output_tokens'
- }
- gens = self.generator.generate(no_prev_token, maxlen=64)
- bsz = samples['net_input']['src_tokens'].size(0)
- responses = []
- for i in range(bsz):
- beams = gens[i]
- selected = max(beams, key=lambda x: x["score"])
- tokens = selected["tokens"]
- start = 0
- end = -1
- for i, t in enumerate(tokens):
- t = t.item()
- if t == self.dict.bos_index:
- # don't include <s> token
- start = i + 1
- continue
- if t == self.dict.eos_index:
- # stop (and don't include) </s> token
- end = i
- break
- responses.append(self.dict.vec2txt(tokens[start:end]))
- return responses
-
- def report(self):
- """
- Return metrics calculated by the model.
- """
- # if we haven't initialized yet, just return a dummy object
- if not hasattr(self, "trainer"):
- return {}
-
- output = {k: v.avg for k, v in self.meters.items()}
-
- if "nll_loss" in self.meters:
- # special case, we used sentence averaging so ppl comes from nll_loss
- output["ppl"] = np.exp2(self.meters["nll_loss"].avg)
- else:
- # normal case, just use loss
- output["ppl"] = np.exp2(self.meters["loss"].avg)
-
- # Fairseq trainer metrics we'll pass up the way
- trainer_metrics = {"ups", "wps", "gnorm", "clip"}
- if self.is_training:
- for k in trainer_metrics:
- output[k] = self.trainer.meters[k].avg
-
- # for display purposes
- output = {k: round_sigfigs(v, 4) for k, v in output.items()}
- return output
-
- def reset_metrics(self):
- """
- Reset metrics calculated by the model back to zero.
- """
- if not hasattr(self, "trainer"):
- # We haven't set up the trainer yet, so we don't have any metrics
- return
- # We need to reset everything
- self.meters.clear()
- if self.trainer:
- for k in self.trainer.meters:
- self.trainer.meters[k].reset()
-
- def receive_metrics(self, metrics_dict):
- """
- Update lr scheduler with validation loss.
- """
- # TODO: this should be smarter
- self.trainer.lr_step(-1, metrics_dict["loss"])
-
- # Helper functions
- def _seq_length(self, xs):
- """
- Compute length of the sequence (non-padded size).
- """
- return xs.ne(self.dict.pad_index).long().sum(dim=-1)
-
- def _right_shifted_ys(self, ys):
- """
- Replace first token with EOS and shift remaining tokens right 1.
- """
- result = torch.LongTensor(ys.size())
- result[:, 0] = self.dict.eos_index
- result[:, 1:] = ys[:, :-1]
- return result
-
- def _make_sample(self, batch=None, xs=None, ys=None):
- """
- Generate a sample object that Fairseq expects.
- """
- # add extra info to samples
- if batch is None and xs is None:
- raise ValueError("Must supply either batch or xs")
- if batch is None and ys is None:
- raise ValueError("Must supply either batch or ys")
- if xs is None:
- xs = batch.text_vec
- if ys is None:
- ys = batch.label_vec
- repadded = convert_padding_direction(xs, self.dict.pad(), right_to_left=True)
- sample = {}
- sample["id"] = torch.arange(len(xs) - 1)
- sample["net_input"] = {
- "src_tokens": repadded,
- "src_lengths": self._seq_length(xs),
- }
- if ys is not None:
- sample["target"] = ys
- sample["ntokens"] = sum(self._seq_length(ys)).item()
- sample["net_input"]["prev_output_tokens"] = self._right_shifted_ys(ys)
- return sample
| diff --git a/tests/test_fairseq.py b/tests/test_fairseq.py
deleted file mode 100644
--- a/tests/test_fairseq.py
+++ /dev/null
@@ -1,77 +0,0 @@
-#!/usr/bin/env python3
-
-# Copyright (c) Facebook, Inc. and its affiliates.
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import unittest
-import parlai.utils.testing as testing_utils
-
-SKIP_TESTS = False
-try:
- import fairseq # noqa: F401
-except ImportError:
- SKIP_TESTS = True
-
-
-BATCH_SIZE = 64
-NUM_EPOCHS = 5
-LR = 1e-2
-
-
-class TestFairseq(unittest.TestCase):
- """
- Checks that fairseq can learn some very basic tasks.
- """
-
- @testing_utils.skipUnlessGPU
- @unittest.skipIf(SKIP_TESTS, "Fairseq not installed")
- def test_labelcands(self):
- stdout, valid, test = testing_utils.train_model(
- dict(
- task='integration_tests:candidate',
- model='fairseq',
- arch='lstm_wiseman_iwslt_de_en',
- lr=LR,
- batchsize=BATCH_SIZE,
- num_epochs=NUM_EPOCHS,
- rank_candidates=True,
- skip_generation=True,
- )
- )
-
- self.assertTrue(
- valid['hits@1'] > 0.95,
- "valid hits@1 = {}\nLOG:\n{}".format(valid['hits@1'], stdout),
- )
- self.assertTrue(
- test['hits@1'] > 0.95,
- "test hits@1 = {}\nLOG:\n{}".format(test['hits@1'], stdout),
- )
-
- @testing_utils.skipUnlessGPU
- @unittest.skipIf(SKIP_TESTS, "Fairseq not installed")
- def test_generation(self):
- stdout, valid, test = testing_utils.train_model(
- dict(
- task='integration_tests:nocandidate',
- model='fairseq',
- arch='lstm_wiseman_iwslt_de_en',
- lr=LR,
- batchsize=BATCH_SIZE,
- num_epochs=NUM_EPOCHS,
- rank_candidates=False,
- skip_generation=False,
- )
- )
-
- self.assertTrue(
- valid['ppl'] < 1.2, "valid ppl = {}\nLOG:\n{}".format(valid['ppl'], stdout)
- )
- self.assertTrue(
- test['ppl'] < 1.2, "test ppl = {}\nLOG:\n{}".format(test['ppl'], stdout)
- )
-
-
-if __name__ == '__main__':
- unittest.main()
| Delete fairseq agent
Delete the fairseq agent and references to it. It hasn't worked in ages, and we can stop pretending that. That means also finding and locating my PR that tried to fix it.
| 2019-11-26T14:33:23Z | [] | [] |
|
facebookresearch/ParlAI | 2,272 | facebookresearch__ParlAI-2272 | [
"2192"
] | 0678996c59aef1c3a850acd2193b5c83ffa28579 | diff --git a/parlai/core/dict.py b/parlai/core/dict.py
--- a/parlai/core/dict.py
+++ b/parlai/core/dict.py
@@ -288,6 +288,9 @@ def __init__(self, opt: Opt, shared=None):
# don't check isfile first, should fail if file not found
self.load(opt['dict_initpath'])
+ # cache unk token for later
+ self._unk_token_idx = self.tok2ind.get(self.unk_token)
+
# initialize tokenizers
if self.tokenizer == 'nltk':
try:
@@ -374,6 +377,14 @@ def __contains__(self, key):
elif type(key) == str:
return key in self.tok2ind
+ def _word_lookup(self, key):
+ # return index from token, or unk_token's index, or None
+ return self.tok2ind.get(key, self._unk_token_idx)
+
+ def _index_lookup(self, key):
+ # return token from index, or unk_token
+ return self.ind2tok.get(key, self.unk_token)
+
def __getitem__(self, key):
"""
Lookup the word or ID.
@@ -383,12 +394,10 @@ def __getitem__(self, key):
not in the dictionary, return the index of the unknown token. If there is no
unknown token, return ``None``.
"""
+ if type(key) == str:
+ return self._word_lookup(key)
if type(key) == int:
- # return token from index, or unk_token
- return self.ind2tok.get(key, self.unk_token)
- elif type(key) == str:
- # return index from token, or unk_token's index, or None
- return self.tok2ind.get(key, self.tok2ind.get(self.unk_token, None))
+ return self._index_lookup(key)
def __len__(self):
return len(self.tok2ind)
@@ -737,10 +746,11 @@ def txt2vec(self, text, vec_type=list):
The type of the returned vector if the input is a string. Suggested
``list``, ``tuple``, ``set``, or ``np.ndarray``.
"""
+ itr = (self._word_lookup(token) for token in self.tokenize(str(text)))
if vec_type == list or vec_type == tuple or vec_type == set:
- res = vec_type((self[token] for token in self.tokenize(str(text))))
+ res = vec_type(itr)
elif vec_type == np.ndarray:
- res = np.fromiter((self[token] for token in self.tokenize(text)), np.int)
+ res = np.fromiter(itr, np.int)
else:
raise RuntimeError('Type {} not supported by dict'.format(vec_type))
return res
diff --git a/parlai/core/params.py b/parlai/core/params.py
--- a/parlai/core/params.py
+++ b/parlai/core/params.py
@@ -127,6 +127,18 @@ def get_model_name(opt):
return model
+def str2none(value: str):
+ """
+ If the value is a variant of `none`, return None.
+
+ Otherwise, return the original value.
+ """
+ if value.lower() == 'none':
+ return None
+ else:
+ return value
+
+
def str2bool(value):
"""
Convert 'yes', 'false', '1', etc.
@@ -259,6 +271,7 @@ def __init__(
formatter_class=CustomHelpFormatter,
add_help=add_parlai_args,
)
+ self.register('type', 'nonestr', str2none)
self.register('type', 'bool', str2bool)
self.register('type', 'floats', str2floats)
self.register('type', 'class', str2class)
@@ -660,6 +673,14 @@ def add_parlai_args(self, args=None):
type=int,
help='batch size for minibatch training schemes',
)
+ parlai.add_argument(
+ '-dynb',
+ '--dynamic-batching',
+ default=None,
+ type='nonestr',
+ choices={'none', 'full', 'batchsort'},
+ help='Use dynamic batching',
+ )
self.add_parlai_data_path(parlai)
def add_distributed_training_args(self):
diff --git a/parlai/core/teachers.py b/parlai/core/teachers.py
--- a/parlai/core/teachers.py
+++ b/parlai/core/teachers.py
@@ -546,12 +546,19 @@ def __init__(self, opt, shared=None):
self.training = (
self.datatype.startswith('train') and 'evalmode' not in self.datatype
)
- self.stream = 'stream' in self.datatype.split(':')
+ self.stream = 'stream' in self.datatype
if not self.use_batch_act:
# first initialize any shared objects
data_class = StreamDialogData if self.stream else DialogData
- kwargs = {'cycle': self.training} if self.stream else {}
+ kwargs = (
+ # never cycle if "ordered" is in the datatype. this is used by
+ # build_dict to enumerate through the data exactly once while still
+ # marking examples as training examples.
+ {'cycle': self.training and 'ordered' not in self.datatype}
+ if self.stream
+ else {}
+ )
if shared and shared.get('data'):
self.data = data_class(opt, shared=shared['data'], **kwargs)
else:
@@ -893,6 +900,11 @@ class StreamDialogData(DialogData):
reached without reset being called.
"""
+ # represents that we haven't read in any data at all
+ _FIRST_PASS = None
+ # represents that we are out of data.
+ _END_OF_EPOCH = -1
+
def __init__(self, opt, data_loader=None, cands=None, shared=None, **kwargs):
# super() call initiates stream in self.data by calling _load()
super().__init__(opt, data_loader, cands, shared, **kwargs)
@@ -918,7 +930,7 @@ def __init__(self, opt, data_loader=None, cands=None, shared=None, **kwargs):
)
self.lock = Lock()
self.entry_idx = 0
- self.next_episode = None
+ self.cur_episode = self._FIRST_PASS
self.num_eps = None
self.num_exs = None
@@ -951,9 +963,8 @@ def _data_generator(self, data_loader, datafile):
while True:
for episode in self._read_episode(data_loader(datafile)):
yield episode
- yield -1
while not self.cycle:
- yield -1
+ yield self._END_OF_EPOCH
def load_length(self):
"""
@@ -1005,35 +1016,24 @@ def get(self):
When episode is done returns first entry of next episode.
"""
- # first look up data
- if self.next_episode != -1 or self.entry_idx != 0:
- with self._lock():
- if self.next_episode is None:
- self.next_episode = next(self.data)
- if self.entry_idx == 0:
- self.cur_episode = self.next_episode
- self.next_episode = next(self.data)
- entry = self.cur_episode[self.entry_idx]
-
- # now pack it in a action-observation dictionary
- table = self.build_table(entry)
-
- episode_done = self.entry_idx == len(self.cur_episode) - 1
- if episode_done:
- self.entry_idx = 0
- else:
- self.entry_idx += 1
- end_of_data = episode_done and self.next_episode is -1
- if end_of_data and self.cycle:
- self.next_episode = next(self.data)
-
- # last entry in this episode
- table['episode_done'] = episode_done
+ if self.cur_episode is self._FIRST_PASS:
+ # first go around, always read off the episode
+ # maybe lock this line
+ self.cur_episode = next(self.data)
+ if self.cur_episode == self._END_OF_EPOCH:
+ # we're done here
+ return {'episode_done': True}, True
+ entry = self.cur_episode[self.entry_idx]
+ table = self.build_table(entry)
+ episode_done = self.entry_idx == len(self.cur_episode) - 1
+ table['episode_done'] = episode_done
+ if episode_done:
+ # maybe lock this line
+ self.cur_episode = next(self.data)
+ self.entry_idx = 0
else:
- table = {'episode_done': True}
- end_of_data = True
-
- return table, end_of_data
+ self.entry_idx += 1
+ return table, self.cur_episode == self._END_OF_EPOCH
def reset(self):
"""
@@ -1042,13 +1042,12 @@ def reset(self):
if self.reset_data is not None:
# auxiliary instance, reset main datastream
self.data = self.reset_data()
- self.next_episode = None
elif not self.is_reset:
# if main instance is not reset, reset datastream
self._load(self.data_loader, self.datafile)
self.is_reset = True
- self.next_episode = None
self.entry_idx = 0
+ self.cur_episode = self._FIRST_PASS
return self.data
diff --git a/parlai/core/torch_agent.py b/parlai/core/torch_agent.py
--- a/parlai/core/torch_agent.py
+++ b/parlai/core/torch_agent.py
@@ -629,8 +629,10 @@ def __init__(self, opt: Opt, shared=None):
# of 8 in all dimensions. This INCLUDES the embeddings layer! As
# such, we need some extra magic to ensure the dictionary is padded
# with extra tokens to make it a multiple of 8.
- if len(self.dict) % 8 != 0:
- for i in range(8 - len(self.dict) % 8):
+ from parlai.utils.torch import FP16_PAD_SIZE
+
+ if len(self.dict) % FP16_PAD_SIZE != 0:
+ for i in range(FP16_PAD_SIZE - len(self.dict) % FP16_PAD_SIZE):
self.dict['__FP16_PAD_{}__'.format(i)] = 1
self.metrics: Dict[str, Any] = {}
diff --git a/parlai/core/torch_generator_agent.py b/parlai/core/torch_generator_agent.py
--- a/parlai/core/torch_generator_agent.py
+++ b/parlai/core/torch_generator_agent.py
@@ -641,12 +641,10 @@ def train_step(self, batch):
"""
Train on a single batch of examples.
"""
- if batch.text_vec is not None:
- batchsize = batch.text_vec.size(0)
- elif batch.image is not None:
- batchsize = len(batch.image)
# helps with memory usage
- self._init_cuda_buffer(batchsize, self.truncate or 256)
+ # note we want to use the opt's batchsize instead of the observed batch size
+ # in case dynamic batching is in use
+ self._init_cuda_buffer(self.opt['batchsize'], self.label_truncate or 256)
self.model.train()
self.zero_grad()
diff --git a/parlai/core/worlds.py b/parlai/core/worlds.py
--- a/parlai/core/worlds.py
+++ b/parlai/core/worlds.py
@@ -46,7 +46,7 @@
import time
from functools import lru_cache
-from typing import List, Dict, Any
+from typing import List, Dict, Any, Union
try:
from torch.multiprocessing import Process, Value, Condition, Semaphore
@@ -174,6 +174,12 @@ def get_task_agent(self):
"""
raise NotImplementedError('Implement in subworld')
+ def get_model_agent(self):
+ """
+ Return model agent, if applicable.
+ """
+ raise NotImplementedError('Implement in subworld')
+
def get_acts(self):
"""
Return the last act of each agent.
@@ -294,7 +300,9 @@ class DialogPartnerWorld(World):
chance to speak per turn and passing that back to the other one.
"""
- def __init__(self, opt: Opt, agents, shared=None):
+ def __init__(self, opt: Opt, agents=None, shared=None):
+ if not ((agents is not None) ^ (shared is not None)):
+ raise ValueError('You must supply either agents or shared, but not both.')
super().__init__(opt)
if shared:
# Create agents based on shared data.
@@ -317,6 +325,12 @@ def get_task_agent(self):
"""
return self.get_agents()[0]
+ def get_model_agent(self):
+ """
+ Return model agent, if applicable.
+ """
+ return self.get_agents()[1]
+
def parley(self):
"""
Agent 0 goes first.
@@ -428,6 +442,12 @@ def get_task_agent(self):
"""
return self.get_agents()[0]
+ def get_model_agent(self):
+ """
+ Return model agent.
+ """
+ return self.get_agents()[1]
+
def epoch_done(self):
"""
Return if the epoch is done for any subagent.
@@ -615,7 +635,13 @@ def get_task_agent(self):
"""
Not possible/well-defined in this setting.
"""
- raise RuntimeError('get_task_agent not defined for Multiworld')
+ return self.worlds[self.world_idx].get_task_agent()
+
+ def get_model_agent(self):
+ """
+ Not implemented.
+ """
+ return self.worlds[self.world_idx].get_model_agent()
def get_acts(self):
"""
@@ -916,6 +942,12 @@ def get_task_agent(self):
"""
return self.world.get_task_agent()
+ def get_model_agent(self):
+ """
+ Return model agent of the root world.
+ """
+ return self.world.get_model_agent()
+
def episode_done(self):
"""
Return whether the episode is done.
@@ -974,6 +1006,222 @@ def shutdown(self):
self.world.shutdown()
+class DynamicBatchWorld(World):
+ def __init__(self, opt: Opt, world: Union[DialogPartnerWorld, MultiWorld]):
+ super().__init__(opt)
+ self.opt = opt
+
+ # agents is a placeholder just for super.reset
+ self.agents = []
+
+ # check some assumptions
+ if isinstance(
+ world, (ExecutableWorld, BatchWorld, HogwildWorld, MultiAgentDialogWorld)
+ ):
+ raise TypeError(
+ 'World must be a DialogPartnerWorld or a '
+ 'MultiWorld of DialogPartnerWorld'
+ )
+
+ if len(world.get_agents()) != 2:
+ raise AssertionError(
+ "Dynamic batch only works in a fixed dialog world with two agents."
+ )
+
+ if not hasattr(world.get_model_agent(), 'batch_act'):
+ raise TypeError("Model agent doesn't have batch_act.")
+
+ self.truncate = opt.get('text_truncate', None) or opt.get('truncate', None)
+ self.l_truncate = opt.get('label_truncate', None) or opt.get('truncate', None)
+ if self.truncate is None or self.truncate < 0:
+ raise ValueError(
+ 'You must use --text-truncate or --truncate in order to use '
+ 'dynamic batching.'
+ )
+
+ # size of the buffer we will use to find worlds
+ self._BUFFER_SIZE = 1021 # chosen as a prime number
+
+ if opt['dynamic_batching'] == 'full':
+ # full dynamic batching, we can grow our batchsize
+ self.max_batch_size = self._BUFFER_SIZE
+ else:
+ # simple batchsort
+ self.max_batch_size = opt['batchsize']
+
+ # TODO: check to ensure the agent has self_observe
+ shared = world.share()
+ self.world = world
+ # TODO: maybe generalize this
+ self.max_words = (self.l_truncate + self.truncate) * opt['batchsize']
+
+ # buffer worlds
+ self.worlds = [
+ shared['world_class'](opt, shared=shared) for _ in range(self._BUFFER_SIZE)
+ ]
+
+ self.reset()
+
+ def reset(self):
+ super().reset()
+ self._obs = [None for _ in range(self._BUFFER_SIZE)]
+ self._scores = [None for _ in range(self._BUFFER_SIZE)]
+
+ self.number_parleys = 0
+ self.total_exs = 0
+ self.world.reset()
+ self.rng = random.Random(4)
+ for w in self.worlds:
+ w.reset()
+
+ def reset_metrics(self):
+ super().reset_metrics()
+ self.world.reset_metrics()
+ for w in self.worlds:
+ w.reset_metrics()
+
+ def epoch_done(self):
+ return (
+ self.world.epoch_done()
+ or all(w.epoch_done() for w in self.worlds)
+ and all(s is None for s in self._scores)
+ )
+
+ def num_examples(self):
+ return self.world.num_examples()
+
+ def num_episodes(self):
+ return self.world.num_episodes()
+
+ def _ceil(self, n):
+ """
+ Round to the nearest multiple of 8.
+
+ TensorCores only work when a tensor is a multiple of 8 in almost all
+ dimensions. This means all examples cost is related to their nearest
+ multiple of 8.
+
+ See https://devblogs.nvidia.com/programming-tensor-cores-cuda-9/ for
+ more information.
+ """
+ # round up to r, all things are equal
+ from parlai.utils.torch import FP16_PAD_SIZE
+
+ return ((n + FP16_PAD_SIZE - 1) // FP16_PAD_SIZE) * FP16_PAD_SIZE
+
+ def _score(self, obs):
+ if 'text_vec' in obs:
+ # note that all examples have a cost that is based on their
+ # nearest multiple of 4. We can therefore mix-and-match
+ # anything with the same cost for increased stochasticity,
+ # while not really wasting much padding.
+ return tuple(
+ self._ceil(len(obs[key]))
+ for key in ['text_vec', 'labels_vec', 'eval_labels_vec']
+ if key in obs
+ )
+ else:
+ return None
+
+ def parley(self):
+ # first make sure that all the worlds are processed in the queue
+ indices = []
+ for i in range(self._BUFFER_SIZE):
+ if self._scores[i] is not None:
+ indices.append(i)
+ continue
+ if self.worlds[i].epoch_done():
+ continue
+
+ if hasattr(self.world, 'parley_init'):
+ self.worlds[i].parley_init()
+
+ act = self.worlds[i].get_task_agent().act()
+ obs = self.worlds[i].get_model_agent().observe(act)
+ self._obs[i] = obs
+
+ self._scores[i] = self._score(obs)
+ if self._scores[i] is not None:
+ indices.append(i)
+
+ # quick invariant checks
+ assert len(indices) != 0, "DynamicBatchWorld ran out of data!"
+ assert not any(self._scores[i] is None for i in indices)
+
+ # sort all the indices by their score, so that we can find similarly lengthed
+ # items in O(1)
+ indices = sorted(indices, key=lambda i: self._scores[i] + (self.rng.random(),))
+
+ # now let's build the batch
+ batch = []
+
+ # start with a random item. indices_idx is the lookup into indices, but
+ # index is the actual world.
+ width = 0
+ indices_idx = random.randint(0, len(indices) - 1)
+
+ # we picked a random spot, but we can get better packing if we start at
+ # the last example with the same score, since we always move down to
+ # smaller examples.
+ while indices_idx < len(indices) - 1 and (
+ sum(self._scores[indices[indices_idx]])
+ == sum(self._scores[indices[indices_idx + 1]])
+ ):
+ indices_idx += 1
+
+ # quit early if we eat our full buffer
+ while indices:
+ index = indices[indices_idx]
+ this_width = self._ceil(sum(self._scores[index]))
+ new_width = max(width, this_width)
+ # compute the cost of the new batch
+ new_bsz = self._ceil(len(batch) + 1)
+ new_words = new_width * new_bsz
+ if new_words <= self.max_words and new_bsz <= self.max_batch_size:
+ # cool, this one fits, let's add it
+ width = new_width
+ batch.append(index)
+ indices.pop(indices_idx)
+ indices_idx = max(indices_idx - 1, 0)
+ else:
+ # we'd overfill our buffer, give up
+ break
+
+ # Always have a batch size that's a multiple of 4, for fp16's sake.
+ while len(batch) > 4 and len(batch) % 4 != 0:
+ # pop off the shortest one. it's easiest to pack in later
+ batch.pop(-1)
+
+ # double check our assumed invariant
+ assert self._ceil(width) * self._ceil(len(batch)) <= self.max_words
+ assert self._ceil(len(batch)) <= self.max_batch_size
+
+ # great, this batch is good to go! let's run it!
+ acts = self.world.get_model_agent().batch_act([self._obs[i] for i in batch])
+ # broadcast the results back to all the models
+ for i, act in zip(batch, acts):
+ # we need to make sure that the teachers saw the result
+ self.worlds[i].get_task_agent().observe(act)
+ # and that the agent copies saw their own voice
+ self.worlds[i].get_model_agent().self_observe(act)
+
+ # move these worlds forward
+ act = self.worlds[i].get_task_agent().act()
+ obs = self.worlds[i].get_model_agent().observe(act)
+ self._scores[i] = self._score(obs)
+ self._obs[i] = obs
+
+ # update metrics
+ self.total_parleys += 1
+ self.total_exs += len(batch)
+
+ def get_total_epochs(self):
+ return self.total_exs / self.num_examples()
+
+ def report(self):
+ return self.world.report()
+
+
class HogwildProcess(Process):
"""
Process child used for ``HogwildWorld``.
@@ -1164,6 +1412,12 @@ def get_task_agent(self):
"""
return self.inner_world.get_task_agent()
+ def get_model_agent(self):
+ """
+ Return model agent of inner world.
+ """
+ return self.inner_world.get_model_agent()
+
def get_total_exs(self):
"""
Return the number of processed examples.
@@ -1322,6 +1576,8 @@ def create_task(opt: Opt, user_agents, default_world=None):
# use hogwild world if more than one thread requested
# hogwild world will create sub batch worlds as well if bsz > 1
world = HogwildWorld(opt, world)
+ elif opt.get('batchsize', 1) > 1 and opt.get('dynamic_batching'):
+ world = DynamicBatchWorld(opt, world)
elif opt.get('batchsize', 1) > 1:
# otherwise check if should use batchworld
world = BatchWorld(opt, world)
diff --git a/parlai/scripts/train_model.py b/parlai/scripts/train_model.py
--- a/parlai/scripts/train_model.py
+++ b/parlai/scripts/train_model.py
@@ -255,7 +255,7 @@ def _run_single_eval(opt, valid_world, max_exs):
if cnt == 0 and opt['display_examples']:
print(valid_world.display() + '\n~~')
print(valid_world.report())
- cnt += valid_world.opt['batchsize']
+ cnt = valid_world.report().get('exs') or 0
valid_report = valid_world.report()
valid_world.reset() # make sure world doesn't remember valid data
diff --git a/parlai/utils/torch.py b/parlai/utils/torch.py
--- a/parlai/utils/torch.py
+++ b/parlai/utils/torch.py
@@ -10,6 +10,7 @@
from typing import Union, Optional, Tuple, Any, List, Sized
from parlai.utils.misc import warn_once
+
try:
import torch
except ImportError:
@@ -22,6 +23,13 @@
NEAR_INF = 1e20
NEAR_INF_FP16 = 65504
+# according to the tensor cores documentation from nvidia, the matmuls in fp16
+# must all be multiples of 8 in order to get the speedup from fp16. We set this
+# as a constant here for clarity and convenience. See
+# https://devblogs.nvidia.com/programming-tensor-cores-cuda-9/ for more
+# information.
+FP16_PAD_SIZE = 8
+
def neginf(dtype: torch.dtype) -> float:
"""
@@ -58,7 +66,7 @@ def padded_tensor(
:param bool use_cuda: if true, places `padded` on GPU
:param bool left_padded:
:param int max_len: if None, the max length is the maximum item length
- :param bool fp16friendly: if True, pads the time dimension to be a multiple of 8.
+ :param bool fp16friendly: if True, pads the time dimension to be a multiple of 4.
:returns: (padded, lengths) tuple
:rtype: (Tensor[int64], list[int])
@@ -74,9 +82,9 @@ def padded_tensor(
# if input tensors are empty, we should expand to nulls
t = max(t, 1)
- if fp16friendly and (t % 8 != 0):
- # pad to be a multiple of 8 to ensure we use the tensor cores
- t += 8 - (t % 8)
+ if fp16friendly and (t % FP16_PAD_SIZE != 0):
+ # pad to be fp16 friendly
+ t += FP16_PAD_SIZE - (t % FP16_PAD_SIZE)
if isinstance(items[0], torch.Tensor):
# keep type of input tensors, they may already be cuda ones
@@ -131,8 +139,8 @@ def padded_3d(
c = max(len(item) for row in tensors for item in row) # type: ignore
# pad empty tensors
- if fp16friendly and c % 8 != 0:
- c += 8 - (c % 8)
+ if fp16friendly and c % FP16_PAD_SIZE != 0:
+ c += FP16_PAD_SIZE - (c % FP16_PAD_SIZE)
c = max(c, 1)
output = torch.full((a, b, c), pad_idx, dtype=dtype)
diff --git a/projects/wizard_of_wikipedia/wizard_transformer_ranker/wizard_dict.py b/projects/wizard_of_wikipedia/wizard_transformer_ranker/wizard_dict.py
--- a/projects/wizard_of_wikipedia/wizard_transformer_ranker/wizard_dict.py
+++ b/projects/wizard_of_wikipedia/wizard_transformer_ranker/wizard_dict.py
@@ -59,6 +59,9 @@ def __init__(self, opt, shared=None):
if opt.get('dict_file'):
self.save_path = opt['dict_file']
+ # cache unk token for later
+ self._unk_token_idx = self.tok2ind.get(self.unk_token)
+
def tokenize(self, text, building=False):
"""
Returns a sequence of tokens from the iterable.
| diff --git a/parlai/tasks/integration_tests/agents.py b/parlai/tasks/integration_tests/agents.py
--- a/parlai/tasks/integration_tests/agents.py
+++ b/parlai/tasks/integration_tests/agents.py
@@ -184,6 +184,15 @@ def setup_data(self, fold):
yield (text, [text], 0, cands), True
+class VariableLengthTeacher(CandidateTeacher):
+ def build_corpus(self):
+ corpus = super().build_corpus()
+ for i in range(len(corpus)):
+ length = len(corpus[i]) - i % 3
+ corpus[i] = corpus[i][:length]
+ return corpus
+
+
class CandidateTeacherDataset(Dataset):
"""
Candidate Teacher, in Pytorch Dataset form.
@@ -316,6 +325,14 @@ def num_examples(self):
return self.example_size * self.num_episodes()
+class MultiturnTeacher(MultiturnCandidateTeacher):
+ """
+ Simple alias.
+ """
+
+ pass
+
+
class NocandidateTeacher(CandidateTeacher):
"""
Strips the candidates so the model can't see any options.
diff --git a/tests/test_dynamicbatching.py b/tests/test_dynamicbatching.py
new file mode 100644
--- /dev/null
+++ b/tests/test_dynamicbatching.py
@@ -0,0 +1,136 @@
+#!/usr/bin/env python3
+
+# Copyright (c) Facebook, Inc. and its affiliates.
+# This source code is licensed under the MIT license found in the
+# LICENSE file in the root directory of this source tree.
+
+from typing import Dict, Any
+
+import unittest
+from parlai.core.opt import Opt
+import parlai.utils.testing as testing_utils
+from parlai.tasks.integration_tests.agents import NUM_TEST, EXAMPLE_SIZE
+
+_TASK = 'integration_tests:variable_length'
+_DEFAULT_OPTIONS = {
+ 'batchsize': 8,
+ 'dynamic_batching': 'full',
+ 'optimizer': 'adamax',
+ 'learningrate': 7e-3,
+ 'num_epochs': 1,
+ 'n_layers': 1,
+ 'n_heads': 1,
+ 'ffn_size': 32,
+ 'embedding_size': 32,
+ 'truncate': 8,
+ 'model': 'transformer/generator',
+ 'task': _TASK,
+}
+
+
+# TODO tests to write:
+# - multiple validation runs, streaming/not streaming
+# - ranking model
+
+
+class TestDynamicBatching(unittest.TestCase):
+ def _test_correct_processed(self, num_goal: int, **kwargs: Dict[str, Any]):
+ opt = Opt({**_DEFAULT_OPTIONS, **kwargs})
+ valid_report, test_report = testing_utils.train_model(opt)
+ self.assertEqual(valid_report['exs'], num_goal)
+ self.assertEqual(test_report['exs'], num_goal)
+
+ def test_no_truncate(self):
+ with self.assertRaises(ValueError):
+ testing_utils.train_model(Opt({**_DEFAULT_OPTIONS, **{'truncate': -1}}))
+
+ def test_no_batch_act(self):
+ """
+ Fail when the agent doesn't support dynamic batching.
+ """
+ with self.assertRaises(TypeError):
+ testing_utils.train_model(model='repeat_label', task=_TASK)
+
+ with self.assertRaises(TypeError):
+ testing_utils.eval_model(model='repeat_label', task=_TASK)
+
+ def test_ranking(self):
+ self._test_correct_processed(
+ NUM_TEST, model='transformer/ranker', datatype='train'
+ )
+
+ def test_ranking_streaming(self):
+ self._test_correct_processed(
+ NUM_TEST, model='transformer/ranker', datatype='train:stream'
+ )
+
+ def test_training(self):
+ self._test_correct_processed(NUM_TEST, datatype='train')
+
+ def test_streaming(self):
+ self._test_correct_processed(NUM_TEST, datatype='train:stream')
+
+ def test_multiworld(self):
+ self._test_correct_processed(
+ NUM_TEST + NUM_TEST * EXAMPLE_SIZE,
+ task='integration_tests:variable_length,integration_tests:multiturn',
+ )
+
+ def test_multiworld_stream(self):
+ self._test_correct_processed(
+ NUM_TEST + NUM_TEST * EXAMPLE_SIZE,
+ task='integration_tests:variable_length,integration_tests:multiturn',
+ datatype='train:stream',
+ )
+
+
+class TestBatchSort(unittest.TestCase):
+ def _test_correct_processed(self, num_goal: int, **kwargs: Dict[str, Any]):
+ opt = Opt({**_DEFAULT_OPTIONS, **kwargs})
+ opt['dynamic_batching'] = 'batchsort'
+ valid_report, test_report = testing_utils.train_model(opt)
+ self.assertEqual(valid_report['exs'], num_goal)
+ self.assertEqual(test_report['exs'], num_goal)
+
+ def test_no_batch_act(self):
+ """
+ Fail when the agent doesn't support dynamic batching.
+ """
+ with self.assertRaises(TypeError):
+ testing_utils.train_model(model='repeat_label', task=_TASK)
+
+ with self.assertRaises(TypeError):
+ testing_utils.eval_model(model='repeat_label', task=_TASK)
+
+ def test_ranking(self):
+ self._test_correct_processed(
+ NUM_TEST, model='transformer/ranker', datatype='train'
+ )
+
+ def test_ranking_streaming(self):
+ self._test_correct_processed(
+ NUM_TEST, model='transformer/ranker', datatype='train:stream'
+ )
+
+ def test_training(self):
+ self._test_correct_processed(NUM_TEST, datatype='train')
+
+ def test_streaming(self):
+ self._test_correct_processed(NUM_TEST, datatype='train:stream')
+
+ def test_multiworld(self):
+ self._test_correct_processed(
+ NUM_TEST + NUM_TEST * EXAMPLE_SIZE,
+ task='integration_tests:variable_length,integration_tests:multiturn',
+ )
+
+ def test_multiworld_stream(self):
+ self._test_correct_processed(
+ NUM_TEST + NUM_TEST * EXAMPLE_SIZE,
+ task='integration_tests:variable_length,integration_tests:multiturn',
+ datatype='train:stream',
+ )
+
+
+if __name__ == '__main__':
+ unittest.main()
| Dynamic Batching
We'd like to support dynamic batching, which is an advancement over batchsort and similar. The idea is that if we pack shorter conversations together, we can fit MORE of them in.
Dependencies:
#2043, #2191
The basic idea is that we'll have something like 1024 or so copies of our batch worlds, and we'll be dynamically deciding which to advance at a time. This is why the self observe stuff is so important: we won't have any promise about what "index" the subworld is in at any given time.
A few requirements
- [ ] Should be an option (`-dynbs`?).
- [ ] Should essentially do batchsort "for free"
- [ ] Good testing is important
- [ ] Prototype can just hardcode some stuff or use a `--words-per-batch` option or similar.
Optional features:
- [ ] Doing batchify off the main thread would be incredible.
- [ ] Should use `--truncate` and/or a magic iteration to try to identify the max batch size for free, by catching OOMs. OOM handling is extremely finicky, so the truncate magic might be the way to go.
| 2019-12-15T19:14:20Z | [] | [] |
|
facebookresearch/ParlAI | 2,346 | facebookresearch__ParlAI-2346 | [
"1876"
] | 916b0e496f1ccabf5e590fb31711a777c739afaa | diff --git a/docs/source/generate_agent_list.py b/docs/source/generate_agent_list.py
--- a/docs/source/generate_agent_list.py
+++ b/docs/source/generate_agent_list.py
@@ -3,10 +3,83 @@
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.abs
+import parlai.core.agents
import parlai.agents
+from parlai.core.params import ParlaiParser
import os
import pkgutil
+import importlib
+import inspect
+import io
+import csv
+
+
+"""
+Extract the readme and CLI args for all of the standard agents.
+"""
+
+
+def _make_argparse_table(class_):
+ """
+ Build the reStructuredText table containing the args and descriptions.
+ """
+ readme = []
+ parser = ParlaiParser(False, False)
+ class_.add_cmdline_args(parser)
+ # group by whatever ArgumentGroups there are
+ for ag in parser._action_groups:
+ actions = []
+ # get options defined within only this group
+ for action in ag._group_actions:
+ if hasattr(action, 'hidden') and action.hidden:
+ # some options are marked hidden
+ continue
+ action_strings = ", ".join(f'``{a}``' for a in action.option_strings)
+ description = []
+ # start with the help message
+ if action.help:
+ h = action.help
+ if not h[0].isupper():
+ h = h[0].upper() + h[1:]
+ description += [h]
+ # list choices if there are any
+ if action.choices:
+ description += [
+ "Choices: " + ", ".join(f'``{c}``' for c in action.choices) + "."
+ ]
+ # list default and recommended values.
+ default_value = ""
+ if action.default is not None:
+ default_value += f"Default: ``{action.default}``. "
+ if hasattr(action, 'recommended') and action.recommended:
+ default_value += f"Recommended: ``{action.recommended}``. "
+
+ # special escape for a few args which use a literal newline as their default
+ if default_value:
+ default_value = default_value.replace("\n", "\\n")
+ description.append(default_value)
+
+ description = "\n".join(description)
+ # escape for the fact that we're inserting this inside a table
+ description = description.replace("\n", "\n \n ")
+ actions.append((action_strings, description))
+
+ if not actions:
+ continue
+
+ # render the table
+ readme.append(f"```eval_rst\n")
+ readme.append(f".. csv-table:: {ag.title}\n")
+ readme.append(f' :widths: 35, 65\n\n')
+ cout = io.StringIO()
+ csvw = csv.writer(cout, csv.unix_dialect, delimiter=",")
+ for row in actions:
+ cout.write(" ")
+ csvw.writerow(row)
+ readme.append(cout.getvalue())
+ readme.append("```\n\n")
+ return readme
def prepare_agent_readme(agent):
@@ -28,7 +101,35 @@ def prepare_agent_readme(agent):
if '# ' not in readme[0]:
readme[0] = f'# {agent}'
- return ''.join(readme)
+ # try to import all of the agents and look for their classes
+ root = os.path.join(parlai.agents.__path__[0], agent)
+ submodules = pkgutil.iter_modules([root])
+ for sm in submodules:
+ # look in the main folder
+ if not (sm.name == agent or sm.name == 'agents'):
+ continue
+ module_name = f'parlai.agents.{agent}.{sm.name}'
+ module = importlib.import_module(module_name)
+ for itemname in dir(module):
+ # skip all private items
+ if itemname.startswith('_'):
+ continue
+ item = getattr(module, itemname)
+ # avoid catching TorchAgent/TorchRankerAgent/...
+ if (
+ inspect.isclass(item)
+ and issubclass(item, parlai.core.agents.Agent)
+ and hasattr(item, 'add_cmdline_args')
+ and not inspect.isabstract(item)
+ ):
+ # gather all the options
+ options = _make_argparse_table(item)
+ if options:
+ # if there were no options, don't mention it
+ readme.append(f"## {itemname} Options\n\n")
+ readme += options
+
+ return readme
def write_all_agents():
@@ -39,15 +140,10 @@ def write_all_agents():
file object to write to
"""
os.makedirs('agent_refs', exist_ok=True)
- agents = [
- name
- for _, name, _ in pkgutil.iter_modules(
- [os.path.dirname(parlai.agents.__file__)]
- )
- ]
+ agents = [name for _, name, _ in pkgutil.iter_modules(parlai.agents.__path__)]
for agent in agents:
with open(f'agent_refs/{agent}.md', 'w') as fout:
- fout.write(prepare_agent_readme(agent))
+ fout.write(''.join(prepare_agent_readme(agent)))
if __name__ == '__main__':
diff --git a/parlai/agents/bert_classifier/bert_classifier.py b/parlai/agents/bert_classifier/bert_classifier.py
--- a/parlai/agents/bert_classifier/bert_classifier.py
+++ b/parlai/agents/bert_classifier/bert_classifier.py
@@ -26,11 +26,9 @@
try:
from pytorch_pretrained_bert import BertModel
except ImportError:
- raise Exception(
- (
- "BERT rankers needs pytorch-pretrained-BERT installed. \n "
- "pip install pytorch-pretrained-bert"
- )
+ raise ImportError(
+ "BERT rankers needs pytorch-pretrained-BERT installed. \n "
+ "pip install pytorch-pretrained-bert"
)
diff --git a/parlai/agents/bert_ranker/bert_dictionary.py b/parlai/agents/bert_ranker/bert_dictionary.py
--- a/parlai/agents/bert_ranker/bert_dictionary.py
+++ b/parlai/agents/bert_ranker/bert_dictionary.py
@@ -13,7 +13,6 @@
'BERT rankers needs pytorch-pretrained-BERT installed. \n '
'pip install pytorch-pretrained-bert'
)
-
from .helpers import VOCAB_PATH
import os
diff --git a/parlai/agents/bert_ranker/bert_ranker.py b/parlai/agents/bert_ranker/bert_ranker.py
--- a/parlai/agents/bert_ranker/bert_ranker.py
+++ b/parlai/agents/bert_ranker/bert_ranker.py
@@ -3,9 +3,9 @@
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
-from .biencoder_ranker import BiEncoderRankerAgent # NOQA
-from .crossencoder_ranker import CrossEncoderRankerAgent # NOQA
-from .bothencoder_ranker import BothEncoderRankerAgent # NOQA
+from .bi_encoder_ranker import BiEncoderRankerAgent # NOQA
+from .cross_encoder_ranker import CrossEncoderRankerAgent # NOQA
+from .both_encoder_ranker import BothEncoderRankerAgent # NOQA
from parlai.core.torch_agent import TorchAgent
diff --git a/parlai/agents/transformer/transformer.py b/parlai/agents/transformer/transformer.py
--- a/parlai/agents/transformer/transformer.py
+++ b/parlai/agents/transformer/transformer.py
@@ -153,7 +153,7 @@ def add_cmdline_args(cls, argparser):
type=str,
default='sqrt',
choices=['cosine', 'dot', 'sqrt'],
- help='similarity for basic attention mechanism'
+ help='similarity for basic attention mechanism '
'when using transformer to encode memories',
)
# model specific arguments
diff --git a/parlai/core/params.py b/parlai/core/params.py
--- a/parlai/core/params.py
+++ b/parlai/core/params.py
@@ -191,7 +191,7 @@ def fix_underscores(args):
return args
-class CustomHelpFormatter(argparse.ArgumentDefaultsHelpFormatter):
+class CustomHelpFormatter(argparse.HelpFormatter):
"""
Produce a custom-formatted `--help` option.
@@ -199,8 +199,8 @@ class CustomHelpFormatter(argparse.ArgumentDefaultsHelpFormatter):
"""
def __init__(self, *args, **kwargs):
- kwargs['max_help_position'] = 8
- kwargs['width'] = 130
+ kwargs['max_help_position'] = 6
+ kwargs['width'] = 80
super().__init__(*args, **kwargs)
def _format_action_invocation(self, action):
@@ -210,6 +210,22 @@ def _format_action_invocation(self, action):
args_string = self._format_args(action, default)
return ', '.join(action.option_strings) + ' ' + args_string
+ def _get_help_string(self, action):
+ help = action.help
+ if '%(default)' not in action.help:
+ if action.default is not argparse.SUPPRESS:
+ defaulting_nargs = [argparse.OPTIONAL, argparse.ZERO_OR_MORE]
+ if action.option_strings or action.nargs in defaulting_nargs:
+ help += ' (default: %(default)s)'
+ if (
+ hasattr(action, 'recommended')
+ and action.recommended
+ and action.recommended != action.default
+ ):
+ help += '(recommended: %(recommended)s)'
+ help = help.replace(')(recommended', ', recommended')
+ return help
+
class ParlaiParser(argparse.ArgumentParser):
"""
@@ -241,6 +257,7 @@ def __init__(
allow_abbrev=False,
conflict_handler='resolve',
formatter_class=CustomHelpFormatter,
+ add_help=add_parlai_args,
)
self.register('type', 'bool', str2bool)
self.register('type', 'floats', str2floats)
@@ -1135,35 +1152,32 @@ def show_advanced_args(self):
self._show_advanced_args = True
return self._show_advanced_args
- def _handle_hidden_args(self, kwargs):
+ def _handle_custom_options(self, kwargs):
"""
- Hide help messages for arguments marked as hidden.
+ Handle custom parlai options.
+
+ Includes hidden, recommended. Future may include no_save and no_override.
"""
+ action_attr = {}
+ if 'recommended' in kwargs:
+ rec = kwargs.pop('recommended')
+ action_attr['recommended'] = rec
+ action_attr['hidden'] = kwargs.get('hidden', False)
if 'hidden' in kwargs:
- flag = kwargs['hidden']
- del kwargs['hidden']
- if flag and not self.show_advanced_args:
+ hidden = kwargs.pop('hidden')
+ if hidden:
kwargs['help'] = argparse.SUPPRESS
- return kwargs
-
- def _augment_help_msg(self, kwargs):
- """
- Add recommended value to help string if recommended exists.
- """
- if 'help' in kwargs:
- if 'recommended' in kwargs:
- kwargs['help'] += " (recommended: " + str(kwargs['recommended']) + ")"
- del kwargs['recommended']
- return kwargs
+ return kwargs, action_attr
def add_argument(self, *args, **kwargs):
"""
Override to convert underscores to hyphens for consistency.
"""
- kwargs = self._augment_help_msg(kwargs)
- return super().add_argument(
- *fix_underscores(args), **self._handle_hidden_args(kwargs)
- )
+ kwargs, newattr = self._handle_custom_options(kwargs)
+ action = super().add_argument(*fix_underscores(args), **kwargs)
+ for k, v in newattr.items():
+ setattr(action, k, v)
+ return action
def add_argument_group(self, *args, **kwargs):
"""
@@ -1173,10 +1187,11 @@ def add_argument_group(self, *args, **kwargs):
original_add_arg = arg_group.add_argument
def ag_add_argument(*args, **kwargs):
- kwargs = self._augment_help_msg(kwargs)
- return original_add_arg(
- *fix_underscores(args), **self._handle_hidden_args(kwargs)
- )
+ kwargs, newattr = self._handle_custom_options(kwargs)
+ action = original_add_arg(*fix_underscores(args), **kwargs)
+ for k, v in newattr.items():
+ setattr(action, k, v)
+ return action
arg_group.add_argument = ag_add_argument # override _ => -
arg_group.add_argument_group = self.add_argument_group
diff --git a/parlai/core/torch_agent.py b/parlai/core/torch_agent.py
--- a/parlai/core/torch_agent.py
+++ b/parlai/core/torch_agent.py
@@ -741,6 +741,7 @@ def _get_init_model(self, opt: Opt, shared):
return init_model, is_finetune
+ @abstractmethod
def build_model(self):
"""
Construct the model and return it.
diff --git a/parlai/core/torch_generator_agent.py b/parlai/core/torch_generator_agent.py
--- a/parlai/core/torch_generator_agent.py
+++ b/parlai/core/torch_generator_agent.py
@@ -241,7 +241,7 @@ def forward(self, *xs, ys=None, prev_enc=None, maxlen=None, bsz=None):
return scores, preds, encoder_states
-class TorchGeneratorAgent(TorchAgent):
+class TorchGeneratorAgent(TorchAgent, ABC):
"""
Abstract Generator agent; only meant to be extended.
diff --git a/parlai/nn/lr_scheduler.py b/parlai/nn/lr_scheduler.py
--- a/parlai/nn/lr_scheduler.py
+++ b/parlai/nn/lr_scheduler.py
@@ -139,14 +139,14 @@ def add_cmdline_args(cls, argparser):
default=-1,
help='Number of train steps the scheduler should take after warmup. '
'Training is terminated after this many steps. This should only be '
- 'set for --lr_scheduler cosine or linear',
+ 'set for --lr-scheduler cosine or linear',
)
lr_group.add_argument(
'--invsqrt-lr-decay-gamma',
type=int,
default=-1,
help='Constant used only to find the lr multiplier for the invsqrt '
- 'scheduler. Must be set for --lr_scheduler invsqrt',
+ 'scheduler. Must be set for --lr-scheduler invsqrt',
)
lr_group.add_argument(
'--warmup-updates',
@@ -431,7 +431,7 @@ def __init__(
"""
super().__init__(hard_reset, warmup_updates, warmup_rate)
if max_lr_steps <= 0:
- raise ValueError('--lr-scheduler cosine requires setting --max_lr_steps')
+ raise ValueError('--lr-scheduler cosine requires setting --max-lr-steps')
self.max_lr_steps = max_lr_steps
self.scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, max_lr_steps)
@@ -466,7 +466,7 @@ def __init__(
"""
super().__init__(hard_reset, warmup_updates, warmup_rate)
if max_lr_steps <= 0:
- raise ValueError('--lr-scheduler linear requires setting --max_lr_steps')
+ raise ValueError('--lr-scheduler linear requires setting --max-lr-steps')
self.max_lr_steps = max_lr_steps
self.scheduler = optim.lr_scheduler.LambdaLR(optimizer, self._linear_lr)
| diff --git a/tests/test_params.py b/tests/test_params.py
--- a/tests/test_params.py
+++ b/tests/test_params.py
@@ -9,7 +9,6 @@
"""
import os
-import re
import json
import unittest
from parlai.core.params import ParlaiParser
@@ -61,25 +60,25 @@ def test_recommendations_single(self):
"""
Test whether recommended args work for non-group.
"""
- parser = ParlaiParser()
+ parser = ParlaiParser(False, False)
parser.add_argument(
'-bs',
'--batchsize',
default=1,
type=int,
help='batch size for minibatch training schemes',
- recommended="10",
+ recommended=1337,
)
-
parser.parse_args([])
help_str = parser.format_help()
- assert re.search(r'--batchsize[^\n]*\n[^\n]*\(recommended: 10\)', help_str)
+ assert 'recommended:' in help_str
+ assert '1337' in help_str
def test_recommendations_group(self):
"""
Test whether recommended args work for a group.
"""
- parser = ParlaiParser()
+ parser = ParlaiParser(False, False)
parser_grp = parser.add_argument_group('Test Group')
parser_grp.add_argument(
'-bs',
@@ -87,15 +86,15 @@ def test_recommendations_group(self):
default=1,
type=int,
help='batch size for minibatch training schemes',
- recommended=[5, 10, 15],
+ recommended=1337,
)
parser.parse_args([])
help_str = parser.format_help()
- assert re.search(r'Test Group:\n', help_str)
- assert re.search(
- r'--batchsize[^\n]*\n[^\n]*\(recommended: \[5, 10, 15\]\)', help_str
- )
+ assert 'Test Group' in help_str
+ _, latter = help_str.split('Test Group')
+ assert 'recommended:' in latter
+ assert '1337' in latter
if __name__ == '__main__':
| ParlAI auto-generated docs script for all standard agents
We should have a new section of the [docs](https://parl.ai/docs/) called Standard Agents. It should be a revision of the current "reference models" section (see attached).
It should use the docstrings for the agent module in order to populate this information.
It should additionally use `add_cmdline_args` to get a list of all the arguments this agent takes and display them. You may use [`generate_cli.py`](https://github.com/facebookresearch/ParlAI/blob/master/docs/source/generate_cli.py) as a template. Bonus points if you format as a pretty table though.
| 2020-01-18T05:12:03Z | [] | [] |
|
facebookresearch/ParlAI | 2,649 | facebookresearch__ParlAI-2649 | [
"2168"
] | f7f78ad068d8a719f404ddc909ca32a5a9fa4ef4 | diff --git a/parlai/core/params.py b/parlai/core/params.py
--- a/parlai/core/params.py
+++ b/parlai/core/params.py
@@ -1162,6 +1162,9 @@ def _handle_custom_options(self, kwargs):
hidden = kwargs.pop('hidden')
if hidden:
kwargs['help'] = argparse.SUPPRESS
+ if 'type' in kwargs and kwargs['type'] is bool:
+ # common error, we really want simple form
+ kwargs['type'] = 'bool'
return kwargs, action_attr
def add_argument(self, *args, **kwargs):
| diff --git a/tests/test_params.py b/tests/test_params.py
--- a/tests/test_params.py
+++ b/tests/test_params.py
@@ -120,6 +120,46 @@ def test_parse_kwargs(self):
parser = ParlaiParser(True, True)
parser.parse_kwargs(model='transformer/generator', fake_arg='foo')
+ def test_bool(self):
+ """
+ test add_argument(type=bool)
+ """
+ parser = ParlaiParser(True, True)
+ parser.add_argument('--foo', type=bool)
+ opt = parser.parse_args(['--foo', 'true'])
+ assert opt['foo'] is True
+ opt = parser.parse_args(['--foo', 'False'])
+ assert opt['foo'] is False
+ opt = parser.parse_args(['--foo', '0'])
+ assert opt['foo'] is False
+
+ group = parser.add_argument_group('foo container')
+ group.add_argument('--bar', type=bool)
+ opt = parser.parse_args(['--bar', 'true'])
+ assert opt['bar'] is True
+ opt = parser.parse_args(['--bar', 'False'])
+ assert opt['bar'] is False
+ opt = parser.parse_args(['--bar', '0'])
+ assert opt['bar'] is False
+
+ parser = ParlaiParser(True, True)
+ parser.add_argument('--foo', type='bool')
+ opt = parser.parse_args(['--foo', 'true'])
+ assert opt['foo'] is True
+ opt = parser.parse_args(['--foo', 'False'])
+ assert opt['foo'] is False
+ opt = parser.parse_args(['--foo', '0'])
+ assert opt['foo'] is False
+
+ group = parser.add_argument_group('foo container')
+ group.add_argument('--bar', type='bool')
+ opt = parser.parse_args(['--bar', 'true'])
+ assert opt['bar'] is True
+ opt = parser.parse_args(['--bar', 'False'])
+ assert opt['bar'] is False
+ opt = parser.parse_args(['--bar', '0'])
+ assert opt['bar'] is False
+
if __name__ == '__main__':
unittest.main()
| Throw error if bool is used in add_argument?
in our `ParlaiParser.add_argument`, if the type argument is a literal bool, we should either throw an error or maybe smartly switch to `'bool'` with a warning?
| 2020-05-09T20:21:51Z | [] | [] |
|
facebookresearch/ParlAI | 2,651 | facebookresearch__ParlAI-2651 | [
"2133"
] | 18f738f139768f99e59d5395a8b086a218d3f495 | diff --git a/parlai/core/build_data.py b/parlai/core/build_data.py
--- a/parlai/core/build_data.py
+++ b/parlai/core/build_data.py
@@ -145,7 +145,7 @@ def mark_done(path, version_string=None):
write.write('\n' + version_string)
-def download(url, path, fname, redownload=False):
+def download(url, path, fname, redownload=False, num_retries=5):
"""
Download file using `requests`.
@@ -155,12 +155,12 @@ def download(url, path, fname, redownload=False):
outfile = os.path.join(path, fname)
download = not os.path.isfile(outfile) or redownload
print("[ downloading: " + url + " to " + outfile + " ]")
- retry = 5
+ retry = num_retries
exp_backoff = [2 ** r for r in reversed(range(retry))]
pbar = tqdm.tqdm(unit='B', unit_scale=True, desc='Downloading {}'.format(fname))
- while download and retry >= 0:
+ while download and retry > 0:
resume_file = outfile + '.part'
resume = os.path.isfile(resume_file)
if resume:
@@ -210,26 +210,24 @@ def download(url, path, fname, redownload=False):
):
retry -= 1
pbar.clear()
- if retry >= 0:
- print('Connection error, retrying. (%d retries left)' % retry)
+ if retry > 0:
+ pl = 'y' if retry == 1 else 'ies'
+ print(f'Connection error, retrying. ({retry} retr{pl} left)')
time.sleep(exp_backoff[retry])
else:
print('Retried too many times, stopped retrying.')
finally:
if response:
response.close()
- if retry < 0:
- raise RuntimeWarning('Connection broken too many times. Stopped retrying.')
+ if retry <= 0:
+ raise RuntimeError('Connection broken too many times. Stopped retrying.')
if download and retry > 0:
pbar.update(done - pbar.n)
if done < total_size:
- raise RuntimeWarning(
- 'Received less data than specified in '
- + 'Content-Length header for '
- + url
- + '.'
- + ' There may be a download problem.'
+ raise RuntimeError(
+ f'Received less data than specified in Content-Length header for '
+ f'{url}. There may be a download problem.'
)
move(resume_file, outfile)
| diff --git a/tests/test_build_data.py b/tests/test_build_data.py
--- a/tests/test_build_data.py
+++ b/tests/test_build_data.py
@@ -6,6 +6,8 @@
import os
from parlai.core import build_data
import unittest
+import unittest.mock
+import requests
import parlai.utils.testing as testing_utils
import multiprocessing
from parlai.core.params import ParlaiParser
@@ -70,6 +72,16 @@ def test_download_multiprocess_chunks(self):
self.assertIn(200, output_statuses, 'unexpected error code')
self.assertIn(403, output_statuses, 'unexpected error code')
+ def test_connectionerror_download(self):
+ with unittest.mock.patch('requests.Session.get') as Session:
+ Session.side_effect = requests.exceptions.ConnectTimeout
+ with testing_utils.tempdir() as tmpdir:
+ with self.assertRaises(RuntimeError):
+ build_data.download(
+ 'http://test.com/bad', tmpdir, 'foo', num_retries=3
+ )
+ assert Session.call_count == 3
+
if __name__ == '__main__':
multiprocessing.set_start_method('spawn')
| unimportant off-by-one in build_data.download
https://github.com/facebookresearch/ParlAI/blob/f261852f688dc1ae9e2382518c757da14a603cac/parlai/core/build_data.py#L75-L80
This reads like there are 5 retries, but there are really 6. Should change this code to be more readable.
| 2020-05-09T20:39:30Z | [] | [] |
|
facebookresearch/ParlAI | 2,676 | facebookresearch__ParlAI-2676 | [
"2172"
] | 6c2e66a9f696571fd336ea58365841fd32d15319 | diff --git a/parlai/core/agents.py b/parlai/core/agents.py
--- a/parlai/core/agents.py
+++ b/parlai/core/agents.py
@@ -43,7 +43,7 @@
from parlai.core.build_data import modelzoo_path
from parlai.core.loader import load_agent_module
from parlai.core.loader import register_agent # noqa: F401
-from parlai.core.opt import Opt, load_opt_file
+from parlai.core.opt import Opt
from parlai.utils.misc import warn_once
import copy
import os
@@ -202,7 +202,7 @@ def compare_init_model_opts(opt: Opt, curr_opt: Opt):
optfile = opt['init_model'] + '.opt'
if not os.path.isfile(optfile):
return
- init_model_opt = load_opt_file(optfile)
+ init_model_opt = Opt.load(optfile)
extra_opts = {}
different_opts = {}
@@ -291,7 +291,7 @@ def create_agent_from_opt_file(opt: Opt):
model_file = opt['model_file']
optfile = model_file + '.opt'
if os.path.isfile(optfile):
- new_opt = load_opt_file(optfile)
+ new_opt = Opt.load(optfile)
# TODO we need a better way to say these options are never copied...
if 'datapath' in new_opt:
# never use the datapath from an opt dump
diff --git a/parlai/core/opt.py b/parlai/core/opt.py
--- a/parlai/core/opt.py
+++ b/parlai/core/opt.py
@@ -13,6 +13,20 @@
import pickle
import traceback
+from typing import List
+
+# these keys are automatically removed upon save. This is a rather blunt hammer.
+# It's preferred you indicate this at option definiton time.
+__AUTOCLEAN_KEYS__: List[str] = [
+ "override",
+ "batchindex",
+ "download_path",
+ "datapath",
+ "batchindex",
+ # we don't save interactive mode, it's only decided by scripts or CLI
+ "interactive_mode",
+]
+
class Opt(dict):
"""
@@ -81,17 +95,39 @@ def display_history(self, key):
else:
return f'No history for {key}'
+ def save(self, filename: str) -> None:
+ """
+ Save the opt to disk.
-def load_opt_file(optfile: str) -> Opt:
- """
- Load an Opt from disk.
- """
- try:
- # try json first
- with open(optfile, 'r') as t_handle:
- opt = json.load(t_handle)
- except UnicodeDecodeError:
- # oops it's pickled
- with open(optfile, 'rb') as b_handle:
- opt = pickle.load(b_handle)
- return Opt(opt)
+ Attempts to 'clean up' any residual values automatically.
+ """
+ # start with a shallow copy
+ dct = dict(self)
+
+ # clean up some things we probably don't want to save
+ for key in __AUTOCLEAN_KEYS__:
+ if key in dct:
+ del dct[key]
+
+ with open(filename, 'w', encoding='utf-8') as f:
+ json.dump(dct, fp=f, indent=4)
+ # extra newline for convenience of working with jq
+ f.write('\n')
+
+ @classmethod
+ def load(cls, optfile: str) -> 'Opt':
+ """
+ Load an Opt from disk.
+ """
+ try:
+ # try json first
+ with open(optfile, 'r') as t_handle:
+ dct = json.load(t_handle)
+ except UnicodeDecodeError:
+ # oops it's pickled
+ with open(optfile, 'rb') as b_handle:
+ dct = pickle.load(b_handle)
+ for key in __AUTOCLEAN_KEYS__:
+ if key in dct:
+ del dct[key]
+ return cls(dct)
diff --git a/parlai/core/params.py b/parlai/core/params.py
--- a/parlai/core/params.py
+++ b/parlai/core/params.py
@@ -26,7 +26,7 @@
from parlai.core.build_data import modelzoo_path
from parlai.core.loader import load_teacher_module, load_agent_module, load_world_module
from parlai.tasks.tasks import ids_to_tasks
-from parlai.core.opt import Opt, load_opt_file
+from parlai.core.opt import Opt
from typing import List, Optional
@@ -123,7 +123,7 @@ def get_model_name(opt):
model_file = modelzoo_path(opt.get('datapath'), model_file)
optfile = model_file + '.opt'
if os.path.isfile(optfile):
- new_opt = load_opt_file(optfile)
+ new_opt = Opt.load(optfile)
model = new_opt.get('model', None)
return model
@@ -868,7 +868,7 @@ def _load_known_opts(self, optfile, parsed):
Called before args are parsed; ``_load_opts`` is used for actually overriding
opts after they are parsed.
"""
- new_opt = load_opt_file(optfile)
+ new_opt = Opt.load(optfile)
for key, value in new_opt.items():
# existing command line parameters take priority.
if key not in parsed or parsed[key] is None:
@@ -876,7 +876,7 @@ def _load_known_opts(self, optfile, parsed):
def _load_opts(self, opt):
optfile = opt.get('init_opt')
- new_opt = load_opt_file(optfile)
+ new_opt = Opt.load(optfile)
for key, value in new_opt.items():
# existing command line parameters take priority.
if key not in opt:
diff --git a/parlai/core/torch_agent.py b/parlai/core/torch_agent.py
--- a/parlai/core/torch_agent.py
+++ b/parlai/core/torch_agent.py
@@ -21,9 +21,7 @@
from typing import Dict, Any, Union, List, Tuple, Optional
from abc import ABC, abstractmethod
-from copy import deepcopy
from collections import deque
-import json
import random
import os
import torch
@@ -1788,19 +1786,10 @@ def save(self, path=None):
if states: # anything found to save?
with open(path, 'wb') as write:
torch.save(states, write)
-
# save opt file
- with open(path + '.opt', 'w', encoding='utf-8') as handle:
- if hasattr(self, 'model_version'):
- self.opt['model_version'] = self.model_version()
- saved_opts = deepcopy(self.opt)
- if 'interactive_mode' in saved_opts:
- # We do not save the state of interactive mode, it is only decided
- # by scripts or command line.
- del saved_opts['interactive_mode']
- json.dump(saved_opts, handle, indent=4)
- # for convenience of working with jq, make sure there's a newline
- handle.write('\n')
+ if hasattr(self, 'model_version'):
+ self.opt['model_version'] = self.model_version()
+ self.opt.save(path + '.opt')
def load_state_dict(self, state_dict):
"""
| diff --git a/tests/test_opt.py b/tests/test_opt.py
new file mode 100644
--- /dev/null
+++ b/tests/test_opt.py
@@ -0,0 +1,33 @@
+#!/usr/bin/env python3
+
+# Copyright (c) Facebook, Inc. and its affiliates.
+# This source code is licensed under the MIT license found in the
+# LICENSE file in the root directory of this source tree.
+
+import os
+import unittest
+import parlai.utils.testing as testing_utils
+from parlai.core.opt import Opt
+
+"""
+Test Opt and related mechanisms.
+"""
+
+
+class TestOpt(unittest.TestCase):
+ def test_save_load(self):
+ o = Opt({'a': 3, 'b': 'foo'})
+ with testing_utils.tempdir() as tmpdir:
+ fn = os.path.join(tmpdir, "opt")
+ o.save(fn)
+ o2 = Opt.load(fn)
+ assert o == o2
+
+ def test_save_withignore(self):
+ o = Opt({'a': 3, 'b': 'foo', 'override': {'a': 3}})
+ with testing_utils.tempdir() as tmpdir:
+ fn = os.path.join(tmpdir, "opt")
+ o.save(fn)
+ o2 = Opt.load(fn)
+ assert o != o2
+ assert 'override' not in o2
| Opt save/load
Build a save and load function into opt, and use them. Should help refactor out some nastiness win params.py.
| 2020-05-20T04:46:19Z | [] | [] |
|
facebookresearch/ParlAI | 2,692 | facebookresearch__ParlAI-2692 | [
"2338"
] | a969b64aecab004b9d53d7cb8a9f81a027fdfc50 | diff --git a/conftest.py b/conftest.py
--- a/conftest.py
+++ b/conftest.py
@@ -13,51 +13,102 @@
import os
import pathlib
-import random
-from pytest import ExitCode
+import collections
+import pytest
+import subprocess
# TODO: rename the folders nicer so they make more sense, maybe even have
# a 1:1 correspondance with the circleci name
-def pytest_collection_modifyitems(config, items):
- # handle circleci parallelism
- if 'CIRCLE_NODE_TOTAL' in os.environ:
- total = int(os.environ['CIRCLE_NODE_TOTAL'])
- index = int(os.environ['CIRCLE_NODE_INDEX'])
+# -----------------------------------------------------------------------
+# From https://github.com/ryanwilsonperkin/pytest-circleci-parallelized.
+# MIT licensed, Copyright Ryan Wilson-Perkin.
+# -----------------------------------------------------------------------
+def get_class_name(item):
+ class_name, module_name = None, None
+ for parent in reversed(item.listchain()):
+ if isinstance(parent, pytest.Class):
+ class_name = parent.name
+ elif isinstance(parent, pytest.Module):
+ module_name = parent.module.__name__
+ break
+
+ if class_name:
+ return "{}.{}".format(module_name, class_name)
else:
- total = 1
- index = 0
+ return module_name
+
+
+def filter_tests_with_circleci(test_list):
+ circleci_input = "\n".join(test_list).encode("utf-8")
+ p = subprocess.Popen(
+ [
+ "circleci",
+ "tests",
+ "split",
+ "--split-by=timings",
+ "--timings-type=classname",
+ ],
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ )
+ circleci_output, _ = p.communicate(circleci_input)
+ return [
+ line.strip() for line in circleci_output.decode("utf-8").strip().split("\n")
+ ]
+
+
+# -----------------------------------------------------------------------
+MARKER_RULES = [
+ ('parlai_internal', 'test'),
+ ('nightly/gpu', 'nightly_gpu'),
+ ('nightly/cpu/', 'nightly_cpu'),
+ ('datatests/', 'data'),
+ ('tasks/', 'tasks'),
+ ('parlai/mturk/core/test/', 'mturk'),
+]
+
+
+def pytest_collection_modifyitems(config, items):
+ marker_expr = config.getoption('markexpr')
+ deselected = []
+
+ # first add all the markers, possibly filtering
# python 3.4/3.5 compat: rootdir = pathlib.Path(str(config.rootdir))
rootdir = pathlib.Path(config.rootdir)
- parallels = [i % total == index for i in range(len(items))]
- random.Random(42).shuffle(parallels)
- deselected = []
- for parallel, item in zip(parallels, items):
+ for item in items:
rel_path = str(pathlib.Path(item.fspath).relative_to(rootdir))
- if not parallel:
- deselected.append(item)
- elif "parlai_internal" in rel_path:
- item.add_marker("internal")
- elif "nightly/gpu/" in rel_path:
- item.add_marker("nightly_gpu")
- elif "nightly/cpu/" in rel_path:
- item.add_marker("nightly_cpu")
- elif "datatests/" in rel_path:
- item.add_marker("data")
- elif "tasks/" in rel_path:
- item.add_marker("tasks")
- elif "parlai/mturk/core/test/" in rel_path:
- item.add_marker("mturk")
- elif "/" not in rel_path[6:]:
- item.add_marker("unit")
+ for file_pattern, marker in MARKER_RULES:
+ if file_pattern in rel_path:
+ item.add_marker(marker)
+ if marker_expr and marker != marker_expr:
+ deselected.append(item)
+ break
else:
- raise ValueError(f"Couldn't categorize '{rel_path}'")
- config.hook.pytest_deselected(items=deselected)
- for d in deselected:
- items.remove(d)
+ assert "/" not in rel_path[6:], f"Couldn't categorize '{rel_path}'"
+ item.add_marker("unit")
+ if marker_expr != 'unit' and marker_expr != '':
+ deselected.append(item)
+
+ # kill everything that wasn't grabbed
+ for item in deselected:
+ items.remove(item)
+
+ if 'CIRCLE_NODE_TOTAL' in os.environ:
+ # circleci, split up the parallelism by classes
+ class_mapping = collections.defaultdict(list)
+ for item in items:
+ class_name = get_class_name(item)
+ class_mapping[class_name].append(item)
+
+ filtered_tests = filter_tests_with_circleci(class_mapping.keys())
+ new_items = []
+ for name in filtered_tests:
+ new_items.extend(class_mapping[name])
+ items[:] = new_items
def pytest_sessionfinish(session, exitstatus):
@@ -67,5 +118,5 @@ def pytest_sessionfinish(session, exitstatus):
This can sometimes happen due to the way we distribute tests across multiple circle
nodes.
"""
- if exitstatus == ExitCode.NO_TESTS_COLLECTED:
- session.exitstatus = ExitCode.OK
+ if exitstatus == pytest.ExitCode.NO_TESTS_COLLECTED:
+ session.exitstatus = pytest.ExitCode.OK
diff --git a/parlai/zoo/unittest/build.py b/parlai/zoo/unittest/build.py
--- a/parlai/zoo/unittest/build.py
+++ b/parlai/zoo/unittest/build.py
@@ -8,11 +8,15 @@
Pretrained models used by unit tests.
"""
-from parlai.core.build_data import download_models
+import os
+from parlai.core.build_data import download_models, built, get_model_dir
def download(datapath):
opt = {'datapath': datapath}
+ model_name = 'unittest'
+ mdir = os.path.join(get_model_dir(datapath), model_name)
+ version = 'v6.1'
model_filenames = [
'seq2seq.tar.gz',
'transformer_ranker.tar.gz',
@@ -20,5 +24,8 @@ def download(datapath):
'memnn.tar.gz',
'apex_v1.tar.gz',
'test_bytelevel_bpe_v2.tar.gz',
+ 'beam_blocking1.tar.gz',
+ 'context_blocking1.tar.gz',
]
- download_models(opt, model_filenames, 'unittest', version='v5.0')
+ if not built(mdir, version):
+ download_models(opt, model_filenames, model_name, version=version)
| diff --git a/tests/test_tra.py b/tests/test_tra.py
--- a/tests/test_tra.py
+++ b/tests/test_tra.py
@@ -31,10 +31,12 @@ def _get_args(self):
return dict(
task='integration_tests:candidate',
optimizer='adamax',
+ candidates='batch',
learningrate=7e-3,
batchsize=16,
- embedding_size=32,
+ embedding_size=16,
num_epochs=4,
+ gradient_clip=0.0,
)
def _get_threshold(self):
@@ -149,13 +151,7 @@ def test_eval_vocab(self):
class TestTransformerRanker(_AbstractTRATest):
def _get_args(self):
args = super()._get_args()
- new_args = dict(
- model='transformer/ranker',
- n_layers=1,
- n_heads=4,
- ffn_size=64,
- gradient_clip=0.5,
- )
+ new_args = dict(model='transformer/ranker', n_layers=1, n_heads=4, ffn_size=32,)
for k, v in new_args.items():
args[k] = v
return args
@@ -176,11 +172,7 @@ class TestPolyRanker(_AbstractTRATest):
def _get_args(self):
args = super()._get_args()
new_args = dict(
- model='transformer/polyencoder',
- n_layers=1,
- n_heads=4,
- ffn_size=64,
- gradient_clip=0.5,
+ model='transformer/polyencoder', n_layers=1, n_heads=4, ffn_size=32,
)
for k, v in new_args.items():
args[k] = v
@@ -228,7 +220,7 @@ def test_eval_fixed_label_not_in_cands(self):
testing_utils.eval_model(args, skip_valid=True)
args['add_label_to_fixed_cands'] = True
- valid, test = testing_utils.eval_model(args, skip_valid=True)
+ _, test = testing_utils.eval_model(args, skip_valid=True)
self.assertGreaterEqual(test['hits@100'], 0.0)
diff --git a/tests/test_transformers.py b/tests/test_transformers.py
--- a/tests/test_transformers.py
+++ b/tests/test_transformers.py
@@ -302,55 +302,48 @@ def test_beamsearch(self):
self.assertLessEqual(test['ppl'], 1.50)
self.assertGreaterEqual(test['bleu-4'], 0.90)
- @testing_utils.retry(ntries=3)
def test_beamsearch_blocking(self):
"""
Test beamsearch blocking.
"""
with testing_utils.tempdir() as tmpdir:
- mf = os.path.join(tmpdir, 'model')
- df = os.path.join(tmpdir, 'model.dict')
- valid, test = testing_utils.train_model(
+ valid, _ = testing_utils.eval_model(
dict(
task='integration_tests:repeat_words',
- model='transformer/generator',
- model_file=mf,
- dict_file=df,
- optimizer='adamax',
- learningrate=7e-3,
- batchsize=32,
- num_epochs=20,
- n_layers=1,
- n_heads=1,
- ffn_size=32,
- embedding_size=32,
+ model_file='zoo:unittest/beam_blocking/model',
+ dict_file='zoo:unittest/beam_blocking/model.dict',
+ batch_size=1,
inference='beam',
- beam_size=2,
- )
+ beam_size=5,
+ skip_generation=False,
+ ),
+ skip_test=True,
)
- valid_beam_block, test_beam_block = testing_utils.eval_model(
+ valid_beam_block, _ = testing_utils.eval_model(
dict(
task='integration_tests:repeat_words',
- model_file=mf,
- dict_file=df,
+ model_file='zoo:unittest/beam_blocking/model',
+ dict_file='zoo:unittest/beam_blocking/model.dict',
batch_size=1,
inference='beam',
beam_size=5,
beam_block_ngram=1,
skip_generation=False,
- )
+ ),
+ skip_test=True,
)
- valid_beam_block2, test_beam_block2 = testing_utils.eval_model(
+ valid_beam_block2, _ = testing_utils.eval_model(
dict(
task='integration_tests:repeat_words',
- model_file=mf,
- dict_file=df,
+ model_file='zoo:unittest/beam_blocking/model',
+ dict_file='zoo:unittest/beam_blocking/model.dict',
batch_size=1,
inference='beam',
beam_size=5,
beam_block_ngram=2,
skip_generation=False,
- )
+ ),
+ skip_test=True,
)
with open(os.path.join(tmpdir, 'blacklist.txt'), 'w') as f:
@@ -359,8 +352,8 @@ def test_beamsearch_blocking(self):
valid_beam_block3, _ = testing_utils.eval_model(
dict(
task='integration_tests:repeat_words',
- model_file=mf,
- dict_file=df,
+ model_file='zoo:unittest/beam_blocking/model',
+ dict_file='zoo:unittest/beam_blocking/model.dict',
batch_size=1,
inference='beam',
beam_size=5,
@@ -373,27 +366,19 @@ def test_beamsearch_blocking(self):
self.assertLessEqual(valid['ppl'], 1.30)
self.assertGreaterEqual(valid['f1'], 0.80)
self.assertGreaterEqual(valid['bleu-4'], 0.5)
- self.assertLessEqual(test['ppl'], 1.30)
- self.assertGreaterEqual(test['f1'], 0.80)
- self.assertGreaterEqual(test['bleu-4'], 0.5)
# Beam Block 1
self.assertLessEqual(valid_beam_block['f1'], 0.4)
self.assertLessEqual(valid_beam_block['bleu-4'], 1e-9)
- self.assertLessEqual(test_beam_block['f1'], 0.4)
- self.assertLessEqual(test_beam_block['bleu-4'], 1e-9)
# Beam Block 2
self.assertLessEqual(valid_beam_block2['f1'], 0.6)
self.assertLessEqual(valid_beam_block2['bleu-4'], 1e-6)
- self.assertLessEqual(test_beam_block2['f1'], 0.6)
- self.assertLessEqual(test_beam_block2['bleu-4'], 1e-6)
# Beam Block blacklist
self.assertLess(valid_beam_block3['bleu-4'], valid['bleu-4'])
self.assertLess(valid_beam_block3['f1'], valid['f1'])
- @testing_utils.retry(ntries=3)
def test_beamsearch_contextblocking(self):
"""
Test beamsearch context blocking.
@@ -402,69 +387,54 @@ def test_beamsearch_contextblocking(self):
well. Measure how much context blocking affects performance.
"""
- with testing_utils.tempdir() as tmpdir:
- mf = os.path.join(tmpdir, 'model')
- df = os.path.join(tmpdir, 'model.dict')
- # we'll reuse these
- args = dict(
- task='integration_tests', model_file=mf, dict_file=df, metrics='all'
- )
- noblock_valid, _ = testing_utils.train_model(
- dict(
- model='transformer/generator',
- optimizer='adamax',
- learningrate=7e-3,
- batchsize=32,
- num_epochs=20,
- n_layers=1,
- n_heads=1,
- ffn_size=32,
- embedding_size=32,
- inference='beam',
- beam_size=5,
- **args,
- )
- )
- self.assertGreaterEqual(noblock_valid['f1'], 0.95)
-
- # first confirm all is good without blocking
- valid, test = testing_utils.eval_model(
- dict(beam_context_block_ngram=-1, **args)
- )
- self.assertGreaterEqual(valid['f1'], 0.95)
- self.assertGreaterEqual(valid['bleu-4'], 0.95)
-
- # there's a special case for block == 1
- valid, test = testing_utils.eval_model(
- dict(beam_context_block_ngram=1, **args)
- )
- # bleu and f1 should be totally wrecked.
- self.assertLess(valid['f1'], 0.01)
- self.assertLess(valid['bleu-4'], 0.01)
-
- # a couple general cases
- valid, test = testing_utils.eval_model(
- dict(beam_context_block_ngram=2, **args)
- )
- # should take a big hit here
- self.assertLessEqual(valid['f1'], noblock_valid['f1'])
- # bleu-1 should be relatively okay
- self.assertLessEqual(valid['bleu-1'], noblock_valid['bleu-1'])
- self.assertGreaterEqual(valid['bleu-1'], 0.45)
- # and bleu-2 should be 0 at this point
- self.assertLessEqual(valid['bleu-2'], 0.01)
-
- # larger blocking, we can do better now
- valid, test = testing_utils.eval_model(
- dict(beam_context_block_ngram=3, **args)
- )
- # not as hard a hit from the larger hit
- self.assertLessEqual(valid['f1'], 0.95)
- # bleu-1 and bleu-2 should be relatively okay
- self.assertGreaterEqual(valid['bleu-1'], 0.60)
- self.assertGreaterEqual(valid['bleu-2'], 0.25)
- # bleu-3 should be totally screwed
- self.assertLessEqual(valid['bleu-3'], 0.01)
+ # we'll reuse these
+ args = dict(
+ task='integration_tests',
+ model_file='zoo:unittest/context_blocking/model',
+ dict_file='zoo:unittest/context_blocking/model.dict',
+ metrics='all',
+ )
+ noblock_valid, _ = testing_utils.eval_model(args,)
+ self.assertGreaterEqual(noblock_valid['f1'], 0.95)
+
+ # first confirm all is good without blocking
+ valid, _ = testing_utils.eval_model(
+ dict(beam_context_block_ngram=-1, **args), skip_test=True
+ )
+ self.assertGreaterEqual(valid['f1'], 0.95)
+ self.assertGreaterEqual(valid['bleu-4'], 0.95)
+
+ # there's a special case for block == 1
+ valid, _ = testing_utils.eval_model(
+ dict(beam_context_block_ngram=1, **args), skip_test=True,
+ )
+ # bleu and f1 should be totally wrecked.
+ self.assertLess(valid['f1'], 0.01)
+ self.assertLess(valid['bleu-4'], 0.01)
+
+ # a couple general cases
+ valid, _ = testing_utils.eval_model(
+ dict(beam_context_block_ngram=2, **args), skip_test=True
+ )
+ # should take a big hit here
+ self.assertLessEqual(valid['f1'], noblock_valid['f1'])
+ # bleu-1 should be relatively okay
+ self.assertLessEqual(valid['bleu-1'], noblock_valid['bleu-1'])
+ self.assertGreaterEqual(valid['bleu-1'], 0.45)
+ # and bleu-2 should be 0 at this point
+ self.assertLessEqual(valid['bleu-2'], 0.01)
+
+ # larger blocking, we can do better now
+ valid, _ = testing_utils.eval_model(
+ dict(beam_context_block_ngram=3, **args), skip_test=True
+ )
+ # not as hard a hit from the larger hit
+ self.assertLessEqual(valid['f1'], 0.95)
+ # bleu-1 and bleu-2 should be relatively okay
+ self.assertGreaterEqual(valid['bleu-1'], 0.60)
+ self.assertGreaterEqual(valid['bleu-2'], 0.25)
+ # bleu-3 should be totally screwed
+ self.assertLessEqual(valid['bleu-3'], 0.01)
def test_nucleus(self):
"""
| Remaining slow unit tests
Following up from #2327, here are a few possible improvements that could be done to unit tests to bring them to sub 5 minutes:
- there's a test_context_blocking and test_beam_blocking. Both of those train a model to get 100% performance on a custom task, and then use blocking to ensure performance degrades in an expected manner. These are also somewhat flaky. By moving them to pretrained models (distributed with the other unit test models), we could remove the flakiness and speed up their inference from ~50s to well under 10.
- Deleting the pytorch data teacher tests
- ?
| Hi, @stephenroller I am interested to solve this problem? Can I do mentioned task for improvement?
Absolutely! Thanks! If you find that you need to distribute some model files, we’ll have to upload them for you, but won’t be a problem.
@stephenroller Okay, I have started the work and will raise PR soon. Thanks
Hi, @stephenroller While I am looking for test `text_context_blocking` and `test_beam_blocking`, can you tell me the path for these function definitions as I didn't find these in `test`. I can move that into the pre-trained model for fast test performance.
Thanks and I am looking for your comment.
https://github.com/facebookresearch/ParlAI/blob/b4a442018d1a46f9374547ef0a9b8134b7bb4944/tests/test_transformers.py#L305
https://github.com/facebookresearch/ParlAI/blob/b4a442018d1a46f9374547ef0a9b8134b7bb4944/tests/test_transformers.py#L396 | 2020-05-26T01:11:27Z | [] | [] |
facebookresearch/ParlAI | 2,919 | facebookresearch__ParlAI-2919 | [
"2875"
] | c3cb2621f0b0b58fe6ba5a5d34030d57980e20eb | diff --git a/parlai/agents/drqa/drqa.py b/parlai/agents/drqa/drqa.py
--- a/parlai/agents/drqa/drqa.py
+++ b/parlai/agents/drqa/drqa.py
@@ -113,8 +113,6 @@ def dictionary_class():
return SimpleDictionaryAgent
def __init__(self, opt, shared=None):
- if opt.get('numthreads', 1) > 1:
- raise RuntimeError("numthreads > 1 not supported for this model.")
super().__init__(opt, shared)
# All agents keep track of the episode (for multiple questions)
diff --git a/parlai/agents/starspace/starspace.py b/parlai/agents/starspace/starspace.py
--- a/parlai/agents/starspace/starspace.py
+++ b/parlai/agents/starspace/starspace.py
@@ -191,7 +191,6 @@ def __init__(self, opt, shared=None):
self.history = {}
self.debugMode = False
if shared:
- torch.set_num_threads(1)
# set up shared properties
self.dict = shared['dict']
self.model = shared['model']
diff --git a/parlai/agents/tfidf_retriever/build_db.py b/parlai/agents/tfidf_retriever/build_db.py
--- a/parlai/agents/tfidf_retriever/build_db.py
+++ b/parlai/agents/tfidf_retriever/build_db.py
@@ -52,7 +52,6 @@ def store_contents(opt, task, save_path, context_length=-1, include_labels=True)
dt = opt.get('datatype', '').split(':')
ordered_opt['datatype'] = ':'.join([dt[0], 'ordered'] + dt[1:])
ordered_opt['batchsize'] = 1
- ordered_opt['numthreads'] = 1
ordered_opt['task'] = task
teacher = create_task_agent_from_taskname(ordered_opt)[0]
diff --git a/parlai/agents/tfidf_retriever/tfidf_retriever.py b/parlai/agents/tfidf_retriever/tfidf_retriever.py
--- a/parlai/agents/tfidf_retriever/tfidf_retriever.py
+++ b/parlai/agents/tfidf_retriever/tfidf_retriever.py
@@ -227,12 +227,10 @@ def train_act(self):
if (
'ordered' not in self.opt.get('datatype', 'train:ordered')
or self.opt.get('batchsize', 1) != 1
- or self.opt.get('numthreads', 1) != 1
or self.opt.get('num_epochs', 1) != 1
):
raise RuntimeError(
- 'Need to set --batchsize 1, --numthreads 1, \
- --datatype train:ordered, --num_epochs 1'
+ 'Need to set --batchsize 1, --datatype train:ordered, --num_epochs 1'
)
obs = self.observation
self.current.append(obs)
diff --git a/parlai/core/metrics.py b/parlai/core/metrics.py
--- a/parlai/core/metrics.py
+++ b/parlai/core/metrics.py
@@ -4,16 +4,12 @@
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
-Provides standard metric evaluations for dialog.
-
-Uses locking and shared memory when ``numthreads`` is set to >1 to share metrics between
-processes.
+Provides standard metric evaluations for dialog, as well as an aggregator.
"""
import re
from abc import ABC, abstractmethod
from collections import Counter
-import queue
import functools
import datetime
from typing import Union, List, Optional, Tuple, Set, Any, Dict
@@ -24,12 +20,6 @@
from parlai.utils.misc import warn_once
from parlai.utils.typing import TScalar, TVector
-try:
- import torch.multiprocessing as multiprocessing
-except ImportError:
- import multiprocessing # type: ignore
-
-
DEFAULT_METRICS = {'bleu-4', 'accuracy', 'f1'}
ROUGE_METRICS = {'rouge-1', 'rouge-2', 'rouge-L'}
BLEU_METRICS = {'bleu-1', 'bleu-2', 'bleu-3', 'bleu-4'}
@@ -604,34 +594,18 @@ def aggregate_unnamed_reports(reports: List[Dict[str, Metric]]) -> Dict[str, Met
class Metrics(object):
"""
- Threadsafe metrics container focused on aggregation.
+ Metrics aggregator.
"""
def __init__(self, threadsafe=False, shared=None):
- self._threadsafe = threadsafe
- if self._threadsafe and shared is None:
- # Threadsafe metrics tracking works by keeping a queue that workers can
- # push updates to. the main worker works through the queue at report
- # time. We could add some buffering to improve performance, but we
- # are deprioritizing hogwild performance at this time.
- self._buffer = None
- self._queue = multiprocessing.SimpleQueue()
- self._worker = False
- self._data = {}
- elif shared and 'queue' in shared:
- # This is a clone, in threadsafe mode
- self._buffer = {}
- self._queue = shared['queue']
- self._worker = True
- self._data = None
- elif shared and 'data' in shared:
- # This is a clone, in non-threadsafe mode
+ if shared and 'data' in shared:
+ # This is a clone
self._buffer = None
self._queue = None
self._worker = False
self._data = shared['data']
else:
- # The original in non-threadsafe mode
+ # The original
self._buffer = None
self._queue = None
self._worker = False
@@ -647,64 +621,22 @@ def add(self, key: str, value: Optional[Metric]) -> None:
"""
Record an accumulation to a metric.
"""
- if self._threadsafe and self._worker:
- self._buffer[key] = self._buffer.get(key) + value
- else:
- self._data[key] = self._data.get(key) + value
-
- def flush(self):
- """
- Clear the local buffer and push it on.
- """
- if self._threadsafe and self._buffer:
- self._queue.put(self._buffer)
- self._buffer.clear()
+ self._data[key] = self._data.get(key) + value
def report(self):
"""
Report the metrics over all data seen so far.
"""
- self.sync()
return {k: v for k, v in self._data.items()}
- def sync(self):
- """
- Process all items on the queue to ensure it is up to date.
- """
- if self._worker:
- self.flush()
- elif self._threadsafe and not self._worker:
- for buffer_ in self._drain_queue():
- for key, value in buffer_.items():
- self._data[key] = self._data.get(key) + value
-
- def _drain_queue(self):
- """
- Drain the queue, yielding all items in it.
- """
- while not self._queue.empty():
- try:
- yield self._queue.get()
- except queue.Empty:
- break
-
def clear(self):
"""
Clear all the metrics.
"""
- if self._worker:
- self._buffer.clear()
- elif self._threadsafe and not self._worker:
- for _ in self._drain_queue():
- pass
- if self._data:
- self._data.clear()
+ self._data.clear()
def share(self):
- if self._threadsafe:
- return {'queue': self._queue}
- else:
- return {'data': self._data}
+ return {'data': self._data}
class TeacherMetrics(Metrics):
@@ -713,12 +645,9 @@ class TeacherMetrics(Metrics):
"""
def __init__(
- self,
- threadsafe: bool = False,
- metrics_list: str = "default",
- shared: Dict[str, Any] = None,
+ self, metrics_list: str = "default", shared: Dict[str, Any] = None,
) -> None:
- super().__init__(threadsafe=threadsafe, shared=shared)
+ super().__init__(shared=shared)
self._metrics_list = self._infer_metrics(metrics_list)
self.eval_pr = [1, 5, 10, 100]
@@ -805,6 +734,3 @@ def evaluate_response(self, observation: Message, labels: List[str]) -> None:
v = AverageMetric(v)
assert isinstance(v, Metric)
self.add(uk, v)
-
- # always flush at the end of processing this response
- self.flush()
diff --git a/parlai/core/params.py b/parlai/core/params.py
--- a/parlai/core/params.py
+++ b/parlai/core/params.py
@@ -695,14 +695,6 @@ def add_parlai_args(self, args=None):
'to skip image loading.',
hidden=True,
)
- parlai.add_argument(
- '-nt',
- '--numthreads',
- default=1,
- type=int,
- help='number of threads. Used for hogwild if batchsize is 1, else '
- 'for number of threads in threadpool loading,',
- )
parlai.add_argument(
'--hide-labels',
default=False,
diff --git a/parlai/core/teachers.py b/parlai/core/teachers.py
--- a/parlai/core/teachers.py
+++ b/parlai/core/teachers.py
@@ -51,8 +51,6 @@
from abc import ABC, abstractmethod
import concurrent.futures
-import multiprocessing
-from multiprocessing import Value, Lock
from threading import Thread
import queue
import random
@@ -124,7 +122,6 @@ def __init__(self, opt: Opt, shared=None):
self.id = opt.get('task', 'teacher')
if not hasattr(self, 'metrics'):
self.metrics = TeacherMetrics(
- threadsafe=(opt.get('numthreads', 1) > 1),
metrics_list=opt.get('metrics', 'default'),
shared=shared['metrics'] if shared is not None else None,
)
@@ -190,12 +187,6 @@ def share(self):
shared['metrics'] = self.metrics.share()
return shared
- def update_counters(self):
- """
- Ensure counters are synchronized.
- """
- self.metrics.sync()
-
class FixedDialogTeacher(Teacher):
"""
@@ -333,11 +324,6 @@ def share(self):
if hasattr(self, 'data_loader'):
shared['data_loader'] = self.data_loader
- if self.opt.get('numthreads', 1) > 1:
- if type(self.index) is not multiprocessing.sharedctypes.Synchronized:
- # for multithreading need to move index into threadsafe memory
- self.index = Value('l', -1)
-
shared['index'] = self.index
return shared
@@ -517,9 +503,6 @@ class DialogTeacher(FixedDialogTeacher):
- uses data class to store and query text data
- generates action tables to send to the student agent from the data
- If you have ``opt.numthreads > 1``, this also activates a shared memory
- array for the data and lock-protected shared-memory metrics.
-
In order to subclass this class, you must implement ``setup_data()`` in
your class (or subclass another class which does, like
``FbDialogTeacher``), which reads your data file as an iterator.
@@ -911,11 +894,6 @@ def __init__(self, opt, data_loader=None, cands=None, shared=None, **kwargs):
self.datafile = opt['datafile']
self.reset_data = None
self.is_reset = True
- if opt.get('numthreads', 1) > 1:
- logging.warn(
- 'multithreaded streaming will process every example numthreads times.'
- )
- self.lock = Lock()
self.entry_idx = 0
self.cur_episode = self._FIRST_PASS
self.num_eps = None
@@ -2089,13 +2067,6 @@ def reset_metrics(self):
for t in self.tasks:
t.reset_metrics()
- def save(self):
- """
- Save each subtask.
- """
- for t in self.tasks:
- t.save()
-
def share(self):
"""
Shares this teacher by sharing each subtask.
@@ -2113,10 +2084,6 @@ def shutdown(self):
for t in self.tasks:
t.shutdown()
- def update_counters(self):
- for t in self.tasks:
- t.update_counters()
-
class ChunkTeacher(FixedDialogTeacher, ABC):
"""
@@ -2132,8 +2099,6 @@ def __init__(self, opt, shared=None):
if 'stream' not in opt['datatype']:
raise ValueError('Chunk teacher should be used with streaming. ')
- if opt['numthreads'] > 1:
- raise ValueError('Chunk teacher is not compatible with Hogwild.')
self.set_datasettings(opt)
diff --git a/parlai/core/torch_agent.py b/parlai/core/torch_agent.py
--- a/parlai/core/torch_agent.py
+++ b/parlai/core/torch_agent.py
@@ -29,7 +29,6 @@
from parlai.core.opt import Opt
from parlai.core.agents import Agent
-from parlai.utils.thread import SharedTable
from parlai.core.dict import DictionaryAgent
from parlai.nn.lr_scheduler import ParlAILRScheduler
from parlai.core.message import Message
@@ -734,7 +733,7 @@ def __init__(self, opt: Opt, shared=None):
self.dict['__FP16_PAD_{}__'.format(i)] = 1
# global_metrics keeps track of batch-level or global-level metrics
- self.global_metrics = Metrics(opt.get('numthreads', 1) > 1, shared=None)
+ self.global_metrics = Metrics(shared=None)
# self.metrics is there for legacy reasons
self.metrics: Dict[str, Any] = {}
else:
@@ -744,12 +743,7 @@ def __init__(self, opt: Opt, shared=None):
self.model = shared['model']
self.criterion = shared['criterion']
self.metrics = shared['metrics']
- self.global_metrics = Metrics(
- opt.get('numthreads', 1) > 1, shared=shared['global_metrics']
- )
-
- if opt.get('numthreads', 1) > 1:
- torch.set_num_threads(1)
+ self.global_metrics = Metrics(shared=shared['global_metrics'])
# Default to the class name, sans "Agent". child can override
self.id = type(self).__name__.replace("Agent", "")
@@ -893,7 +887,7 @@ def _should_initialize_optimizer(self) -> bool:
return False
datatype = self.opt.get('datatype', '')
is_train = 'train' in datatype and 'evalmode' not in datatype
- return is_train or self.opt.get('numthreads', 1) > 1
+ return is_train
def init_optim(self, params, optim_states=None, saved_optim_type=None):
"""
@@ -1255,14 +1249,8 @@ def share(self):
Subclasses will likely want to share their model as well.
"""
shared = super().share()
-
- if self.opt.get('numthreads', 1) > 1 and isinstance(self.metrics, dict):
- # move metrics and model to shared memory
- self.metrics = SharedTable(self.metrics)
- self.model.share_memory()
shared['metrics'] = self.metrics
shared['global_metrics'] = self.global_metrics.share()
-
shared['dict'] = self.dict
shared['model'] = self.model
shared['criterion'] = self.criterion
@@ -2013,9 +2001,6 @@ def batch_act(self, observations):
self.global_metrics.add('ctps', GlobalTimerMetric(0))
self.global_metrics.add('tps', GlobalTimerMetric(0))
- # Make sure we push all the metrics to main thread in hogwild/workers
- self.global_metrics.flush()
-
return batch_reply
@abstractmethod
diff --git a/parlai/core/torch_generator_agent.py b/parlai/core/torch_generator_agent.py
--- a/parlai/core/torch_generator_agent.py
+++ b/parlai/core/torch_generator_agent.py
@@ -653,10 +653,6 @@ def share(self):
shared['beam_block_list'] = self.beam_block_list
if hasattr(self, 'optimizer'):
shared['optimizer'] = self.optimizer
- if self.opt.get('numthreads', 1) > 1:
- shared['states'] = { # don't share optimizer states
- 'optimizer_type': self.opt['optimizer']
- }
return shared
def vectorize(self, *args, **kwargs):
diff --git a/parlai/core/worlds.py b/parlai/core/worlds.py
--- a/parlai/core/worlds.py
+++ b/parlai/core/worlds.py
@@ -17,11 +17,6 @@
``MultiWorld(World)`` creates a set of environments (worlds) for the same agent
to multitask over, a different environment will be chosen per episode.
- ``HogwildWorld(World)`` is a container that creates another world within itself for
- every thread, in order to have separate simulated environments for each one.
- Each world gets its own agents initialized using the ``share()`` parameters
- from the original agents.
-
``BatchWorld(World)`` is a container for doing minibatch training over a world by
collecting batches of N copies of the environment (each with different state).
@@ -43,14 +38,8 @@
import copy
import random
-import time
-
-from typing import List, Dict, Any, Union
-try:
- from torch.multiprocessing import Process, Value, Condition, Semaphore
-except ImportError:
- from multiprocessing import Process, Value, Semaphore, Condition # noqa: F401
+from typing import List, Dict, Union
from parlai.core.agents import create_agents_from_shared
from parlai.core.loader import load_task_module, load_world_module
@@ -424,10 +413,6 @@ def update_counters(self):
for a in self.agents:
if hasattr(a, 'update_counters'):
a.update_counters()
- if hasattr(a, 'global_metrics'):
- # torch agent has "global metrics" that might get backed up on OS X,
- # see the SystemError exception in Metrics.
- a.global_metrics.sync()
class MultiAgentDialogWorld(World):
@@ -519,69 +504,6 @@ def shutdown(self):
a.shutdown()
-class ExecutableWorld(MultiAgentDialogWorld):
- """
- World where messages from agents can be interpreted as _actions_.
-
- Actions result in changes in the environment (are executed). Hence a grounded
- simulation can be implemented rather than just dialogue between agents.
- """
-
- def __init__(self, opt: Opt, agents=None, shared=None):
- super().__init__(opt, agents, shared)
- self.init_world()
-
- def init_world(self):
- """
- Initialize the world.
-
- An executable world class should implement this function, otherwise the actions
- do not do anything (and it is the same as MultiAgentDialogWorld).
- """
- # TODO: mark as abstract
- pass
-
- def execute(self, agent, act):
- """
- Execute an action.
-
- An executable world class should implement this function, otherwise the actions
- do not do anything (and it is the same as MultiAgentDialogWorld).
- """
- pass
-
- def observe(self, agent, act):
- """
- Observe an action.
-
- An executable world class should implement this function, otherwise the
- observations for each agent are just the messages from other agents and not
- confitioned on the world at all (and it is thus the same as
- MultiAgentDialogWorld).
- """
- if agent.id == act['id']:
- return None
- else:
- return act
-
- def parley(self):
- """
- For each agent: act, execute and observe actions in world.
- """
- acts = self.acts
- for index, agent in enumerate(self.agents):
- # The agent acts.
- acts[index] = agent.act()
- # We execute this action in the world.
- self.execute(agent, acts[index])
- # All agents (might) observe the results.
- for other_agent in self.agents:
- obs = self.observe(other_agent, acts[index])
- if obs is not None:
- other_agent.observe(obs)
- self.update_counters()
-
-
class MultiWorld(World):
"""
Container for multiple worlds.
@@ -786,13 +708,6 @@ def reset_metrics(self):
for w in self.worlds:
w.reset_metrics()
- def save_agents(self):
- """
- Save agents in all subworlds.
- """
- # Assumes all worlds have same agents, picks first to save.
- self.worlds[0].save_agents()
-
def update_counters(self):
super().update_counters()
for w in self.worlds:
@@ -833,8 +748,7 @@ class BatchWorld(World):
the parameters for each.
The underlying world(s) it is batching can be either
- ``DialogPartnerWorld``, ``MultiAgentWorld``, ``ExecutableWorld`` or
- ``MultiWorld``.
+ ``DialogPartnerWorld``, ``MultiAgentWorld``, or ``MultiWorld``.
"""
def __init__(self, opt: Opt, world):
@@ -1038,14 +952,6 @@ def reset_metrics(self):
"""
self.world.reset_metrics()
- def save_agents(self):
- """
- Save the agents in the root world.
- """
- # Because all worlds share the same parameters through sharing, saving
- # one copy would suffice
- self.world.save_agents()
-
def shutdown(self):
"""
Shutdown each world.
@@ -1064,9 +970,7 @@ def __init__(self, opt: Opt, world: Union[DialogPartnerWorld, MultiWorld]):
self.agents = []
# check some assumptions
- if isinstance(
- world, (ExecutableWorld, BatchWorld, HogwildWorld, MultiAgentDialogWorld)
- ):
+ if isinstance(world, (BatchWorld, MultiAgentDialogWorld)):
raise TypeError(
'World must be a DialogPartnerWorld or a '
'MultiWorld of DialogPartnerWorld'
@@ -1272,293 +1176,6 @@ def report(self):
return self.world.report()
-class HogwildProcess(Process):
- """
- Process child used for ``HogwildWorld``.
-
- Each ``HogwildProcess`` contain its own unique ``World``.
- """
-
- def __init__(self, tid, opt: Opt, shared, sync):
- self.numthreads = opt['numthreads']
- opt = copy.deepcopy(opt)
- opt['numthreads'] = 1 # don't let threads create more threads!
- self.opt = opt
- self.shared = shared
- self.shared['threadindex'] = tid
- if 'agents' in self.shared:
- for a in self.shared['agents']:
- a['threadindex'] = tid
- self.sync = sync
- super().__init__(daemon=True)
-
- def run(self):
- """
- Run a parley loop.
-
- Runs normal parley loop for as many examples as this thread can get ahold of via
- the semaphore ``queued_sem``.
- """
- world = self.shared['world_class'](self.opt, None, self.shared)
- if self.opt.get('batchsize', 1) > 1:
- world = BatchWorld(self.opt, world)
- self.sync['threads_sem'].release()
- with world:
- while True:
- if self.sync['term_flag'].value:
- break # time to close
- self.sync['queued_sem'].acquire()
- self.sync['threads_sem'].release()
-
- # check if you need to reset before moving on
- if self.sync['epoch_done_ctr'].value < 0:
- with self.sync['epoch_done_ctr'].get_lock():
- # increment the number of finished threads
- self.sync['epoch_done_ctr'].value += 1
- if self.sync['epoch_done_ctr'].value == 0:
- # make sure reset sem is clean
- for _ in range(self.numthreads):
- self.sync['reset_sem'].acquire(block=False)
- world.reset() # keep lock for this!
-
- while self.sync['epoch_done_ctr'].value < 0:
- # only move forward once other threads have finished reset
- time.sleep(0.1)
-
- # process an example or wait for reset
- if not world.epoch_done() or self.opt.get('datatype').startswith(
- 'train', False
- ):
- # do one example if any available
- world.parley()
- with self.sync['total_parleys'].get_lock():
- self.sync['total_parleys'].value += 1
- else:
- # during valid/test, we stop parleying once at end of epoch
- with self.sync['epoch_done_ctr'].get_lock():
- # increment the number of finished threads
- self.sync['epoch_done_ctr'].value += 1
- # send control back to main thread
- self.sync['threads_sem'].release()
- # we didn't process anything
- self.sync['queued_sem'].release()
- # wait for reset signal
- self.sync['reset_sem'].acquire()
-
-
-class HogwildWorld(World):
- """
- Creates a separate world for each thread (process).
-
- Maintains a few shared objects to keep track of state:
-
- - A Semaphore which represents queued examples to be processed. Every call
- of parley increments this counter; every time a Process claims an
- example, it decrements this counter.
-
- - A Condition variable which notifies when there are no more queued
- examples.
-
- - A boolean Value which represents whether the inner worlds should shutdown.
-
- - An integer Value which contains the number of unprocessed examples queued
- (acquiring the semaphore only claims them--this counter is decremented
- once the processing is complete).
- """
-
- def __init__(self, opt: Opt, world):
- super().__init__(opt)
- self.inner_world = world
- self.numthreads = opt['numthreads']
-
- self.sync: Dict[str, Any] = { # syncronization primitives
- # semaphores for counting queued examples
- 'queued_sem': Semaphore(0), # counts num exs to be processed
- 'threads_sem': Semaphore(0), # counts threads
- 'reset_sem': Semaphore(0), # allows threads to reset
- # flags for communicating with threads
- 'reset_flag': Value('b', False), # threads should reset
- 'term_flag': Value('b', False), # threads should terminate
- # counters
- 'epoch_done_ctr': Value('i', 0), # number of done threads
- 'total_parleys': Value('l', 0), # number of parleys in threads
- }
-
- self.threads: List[HogwildProcess] = []
- for i in range(self.numthreads):
- self.threads.append(HogwildProcess(i, opt, world.share(), self.sync))
- time.sleep(0.05) # delay can help prevent deadlock in thread launches
- for t in self.threads:
- t.start()
-
- for _ in self.threads:
- # wait for threads to launch
- # this makes sure that no threads get examples before all are set up
- # otherwise they might reset one another after processing some exs
- self.sync['threads_sem'].acquire() # type: ignore
-
- logging.info(f'{self.numthreads} threads initialized')
-
- def display(self):
- """
- Unsupported operation.
-
- Raises a `NotImplementedError`.
- """
- self.shutdown()
- raise NotImplementedError(
- 'Hogwild does not support displaying in-run'
- ' task data. Use `--numthreads 1`.'
- )
-
- def episode_done(self):
- """
- Unsupported operation.
-
- Raises a `RuntimeError`.
- """
- self.shutdown()
- raise RuntimeError('episode_done() undefined for hogwild')
-
- def epoch_done(self):
- """
- Return whether the epoch is finished.
- """
- return self.sync['epoch_done_ctr'].value == self.numthreads
-
- def parley(self):
- """
- Queue one item to be processed.
- """
- # schedule an example
- self.sync['queued_sem'].release()
- # keep main process from getting too far ahead of the threads
- # this way it can only queue up to numthreads unprocessed examples
- self.sync['threads_sem'].acquire()
- self.update_counters()
-
- def update_counters(self):
- super().update_counters()
- # some unix systems have a system max of how big the kernel can get. OS X
- # has one, and so does CI. It's very easy to hit this max, so we need to
- # flush the hogwild metrics queue every parley, to keep it from filling
- # up and causing a deadlock.
- self.inner_world.update_counters()
-
- def getID(self):
- """
- Return the inner world's ID.
- """
- return self.inner_world.getID()
-
- def num_examples(self):
- """
- Return the number of examples.
- """
- if hasattr(self, '_num_examples'):
- return self._num_examples_cache
- self._num_examples_cache = self.inner_world.num_examples()
- return self._num_examples_cache
-
- def num_episodes(self):
- """
- Return the number of episodes.
- """
- return self.inner_world.num_episodes()
-
- def get_task_agent(self):
- """
- Return task agent of inner world.
- """
- return self.inner_world.get_task_agent()
-
- def get_model_agent(self):
- """
- Return model agent of inner world.
- """
- return self.inner_world.get_model_agent()
-
- def get_total_exs(self):
- """
- Return the number of processed examples.
- """
- return self.inner_world.get_total_exs()
-
- def get_total_epochs(self):
- """
- Return total amount of epochs on which the world has trained.
- """
- if self.max_exs is None:
- if 'num_epochs' in self.opt and self.opt['num_epochs'] > 0:
- if self.num_examples():
- self.max_exs = self.num_examples() * self.opt['num_epochs']
- else:
- self.max_exs = -1
- else:
- self.max_exs = -1
- if self.max_exs > 0:
- return (
- self.sync['total_parleys'].value
- * self.opt.get('batchsize', 1)
- / self.num_examples()
- )
- else:
- return self.total_epochs
-
- def report(self):
- """
- Report the inner world's metrics.
- """
- return self.inner_world.report()
-
- def save_agents(self):
- """
- Save the inner world's agents.
- """
- self.inner_world.save_agents()
-
- def reset(self):
- """
- Reset the inner world.
- """
- # set epoch done counter negative so all threads know to reset
- with self.sync['epoch_done_ctr'].get_lock():
- threads_asleep = self.sync['epoch_done_ctr'].value > 0
- self.sync['epoch_done_ctr'].value = -len(self.threads)
- if threads_asleep:
- # release reset semaphore only if threads had reached epoch_done
- for _ in self.threads:
- self.sync['reset_sem'].release()
-
- def reset_metrics(self):
- """
- Reset metrics for the inner world.
- """
- self.inner_world.reset_metrics()
-
- def shutdown(self):
- """
- Set shutdown flag and wake threads up to close themselves.
- """
- # set shutdown flag
- with self.sync['term_flag'].get_lock():
- self.sync['term_flag'].value = True
-
- # wake up each thread by queueing fake examples or setting reset flag
- for _ in self.threads:
- self.sync['queued_sem'].release()
- self.sync['reset_sem'].release()
-
- # make sure epoch counter is reset so threads aren't waiting for it
- with self.sync['epoch_done_ctr'].get_lock():
- self.sync['epoch_done_ctr'].value = 0
-
- # wait for threads to close
- for t in self.threads:
- t.terminate()
- self.inner_world.shutdown()
-
-
################################################################################
# Functions for creating tasks/worlds given options.
################################################################################
@@ -1627,7 +1244,6 @@ def create_task(opt: Opt, user_agents, default_world=None):
opt['task'] = ids_to_tasks(opt['task'])
logging.info(f"creating task(s): {opt['task']}")
- # check if single or multithreaded, and single-example or batched examples
if ',' not in opt['task']:
# Single task
world = create_task_world(opt, user_agents, default_world=default_world)
@@ -1636,11 +1252,7 @@ def create_task(opt: Opt, user_agents, default_world=None):
# TODO: remove and replace with multiteachers only?
world = MultiWorld(opt, user_agents, default_world=default_world)
- if opt.get('numthreads', 1) > 1:
- # use hogwild world if more than one thread requested
- # hogwild world will create sub batch worlds as well if bsz > 1
- world = HogwildWorld(opt, world)
- elif opt.get('batchsize', 1) > 1 and opt.get('dynamic_batching'):
+ if opt.get('batchsize', 1) > 1 and opt.get('dynamic_batching'):
world = DynamicBatchWorld(opt, world)
elif opt.get('batchsize', 1) > 1:
# otherwise check if should use batchworld
diff --git a/parlai/mturk/tasks/personachat/personachat_chat/extract_and_save_personas.py b/parlai/mturk/tasks/personachat/personachat_chat/extract_and_save_personas.py
--- a/parlai/mturk/tasks/personachat/personachat_chat/extract_and_save_personas.py
+++ b/parlai/mturk/tasks/personachat/personachat_chat/extract_and_save_personas.py
@@ -52,7 +52,6 @@ def main(opt):
opt['task'] = teacher_name
assert 'personas_path' in opt, 'Must specify personas path'
opt['datatype'] = 'train:ordered:stream'
- opt['numthreads'] = 1
opt['batchsize'] = 1
extract_and_save(opt)
diff --git a/parlai/mturk/tasks/wizard_of_wikipedia/extract_and_save_personas.py b/parlai/mturk/tasks/wizard_of_wikipedia/extract_and_save_personas.py
--- a/parlai/mturk/tasks/wizard_of_wikipedia/extract_and_save_personas.py
+++ b/parlai/mturk/tasks/wizard_of_wikipedia/extract_and_save_personas.py
@@ -56,7 +56,6 @@ def main(opt):
)
opt['personas_path'] = personas_path
opt['datatype'] = 'train:ordered:stream'
- opt['numthreads'] = 1
opt['batchsize'] = 1
personas = extract_and_save(opt)
return personas
diff --git a/parlai/mturk/tasks/wizard_of_wikipedia/run.py b/parlai/mturk/tasks/wizard_of_wikipedia/run.py
--- a/parlai/mturk/tasks/wizard_of_wikipedia/run.py
+++ b/parlai/mturk/tasks/wizard_of_wikipedia/run.py
@@ -59,7 +59,6 @@ def setup_title_to_passage(opt):
ordered_opt = opt.copy()
ordered_opt['datatype'] = 'train:ordered:stream'
ordered_opt['batchsize'] = 1
- ordered_opt['numthreads'] = 1
ordered_opt['task'] = 'wikipedia:full:key-value'
teacher = create_task_agent_from_taskname(ordered_opt)[0]
title_to_passage = {}
diff --git a/parlai/scripts/build_candidates.py b/parlai/scripts/build_candidates.py
--- a/parlai/scripts/build_candidates.py
+++ b/parlai/scripts/build_candidates.py
@@ -52,9 +52,6 @@ def setup_args(parser=None) -> ParlaiParser:
def build_cands(opt):
opt.log()
# create repeat label agent and assign it to the specified task
- if opt['numthreads'] > 1:
- # Broken in hogwild mode. Just fall back to single processing mode
- opt['numthreads'] = 1
agent = RepeatLabelAgent(opt)
world = create_task(opt, agent)
if opt['outfile'] is None:
diff --git a/parlai/scripts/build_dict.py b/parlai/scripts/build_dict.py
--- a/parlai/scripts/build_dict.py
+++ b/parlai/scripts/build_dict.py
@@ -96,7 +96,6 @@ def build_dict(opt, skip_if_built=False):
cnt = 0
# we use train set to build dictionary
- ordered_opt['numthreads'] = 1
ordered_opt['batchsize'] = 1
# Set this to none so that image features are not calculated when Teacher is
# instantiated while building the dict
diff --git a/parlai/tasks/fvqa/agents.py b/parlai/tasks/fvqa/agents.py
--- a/parlai/tasks/fvqa/agents.py
+++ b/parlai/tasks/fvqa/agents.py
@@ -56,9 +56,7 @@ def __init__(self, opt, shared=None):
if shared and shared.get('factmetrics'):
self.factmetrics = shared['factmetrics']
else:
- self.factmetrics = TeacherMetrics(
- opt.get('numthreads', 1) > 1, opt.get('metrics', 'default')
- )
+ self.factmetrics = TeacherMetrics(opt.get('metrics', 'default'))
self.datatype = opt['datatype']
questions_path, trainset_path, self.image_path = _path(opt)
@@ -86,7 +84,8 @@ def num_episodes(self):
def report(self):
r = super().report()
- r['factmetrics'] = self.factmetrics.report()
+ for k, v in self.factmetrics.report().items():
+ r[f'factmetrics_{k}'] = v
return r
def reset(self):
diff --git a/parlai/tasks/talkthewalk/__init__.py b/parlai/tasks/talkthewalk/__init__.py
deleted file mode 100644
--- a/parlai/tasks/talkthewalk/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/usr/bin/env python3
-
-# Copyright (c) Facebook, Inc. and its affiliates.
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
diff --git a/parlai/tasks/talkthewalk/agents.py b/parlai/tasks/talkthewalk/agents.py
deleted file mode 100644
--- a/parlai/tasks/talkthewalk/agents.py
+++ /dev/null
@@ -1,78 +0,0 @@
-#!/usr/bin/env python3
-# Copyright (c) Facebook, Inc. and its affiliates.
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-from .base import TTWBase
-from .worlds import is_action
-
-
-class TouristTeacher(TTWBase):
- def _setup_episode(self, episode):
- ep = []
- example = {'episode_done': False}
- for msg in episode['dialog']:
- text = msg['text']
- if msg['id'] == 'Tourist':
- if self.opt.get('train_actions') or not is_action(text):
- example['labels'] = [text]
- ep.append(example)
- example = {'episode_done': False}
- # add movements to text history if not training on them
- if not self.opt.get('train_actions') and is_action(text):
- example['text'] = example.get('text', '') + text + '\n'
- elif msg['id'] == 'Guide':
- example['text'] = example.get('text', '') + text + '\n'
-
- self.sim.execute(text)
- self.sim.add_view_to_text(example, text)
-
- if len(ep):
- ep[-1]['episode_done'] = True
- return ep
-
-
-class GuideTeacher(TTWBase):
- def _setup_episode(self, episode):
- ep = []
- example = {'episode_done': False, 'text': self.sim.get_text_map()}
- for msg in episode['dialog']:
- text = msg['text']
- if msg['id'] == 'Guide':
- if self.opt.get('train_actions') or not text.startswith('EVALUATE'):
- example['labels'] = [text]
- ep.append(example)
- example = {'episode_done': False}
- elif msg['id'] == 'Tourist' and not is_action(text):
- example['text'] = example.get('text', '') + text + '\n'
-
- self.sim.execute(text)
-
- if len(ep):
- ep[-1]['episode_done'] = True
- return ep
-
-
-class GuideLocalizeTeacher(TTWBase):
- def _setup_episode(self, episode):
- ep = []
- example = {'episode_done': False, 'text': self.sim.get_text_map()}
- for msg in episode['dialog']:
- text = msg['text']
- if msg['id'] == 'Guide':
- example['text'] = example.get('text', '') + text + '\n'
- elif msg['id'] == 'Tourist' and not is_action(text):
- example['text'] = example.get('text', '') + text + '\n'
- example['labels'] = [self.sim.get_agent_location()]
- ep.append(example)
- example = {'episode_done': False}
-
- self.sim.execute(text)
-
- if len(ep):
- ep[-1]['episode_done'] = True
- return ep
-
-
-class DefaultTeacher(TouristTeacher):
- pass
diff --git a/parlai/tasks/talkthewalk/base.py b/parlai/tasks/talkthewalk/base.py
deleted file mode 100644
--- a/parlai/tasks/talkthewalk/base.py
+++ /dev/null
@@ -1,101 +0,0 @@
-#!/usr/bin/env python3
-# Copyright (c) Facebook, Inc. and its affiliates.
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-from parlai.core.teachers import FixedDialogTeacher
-from .build import build
-from .worlds import Simulator
-import os
-import json
-
-
-def _path(opt):
- # Build the data if it doesn't exist.
- build(opt)
- dt = opt['datatype'].split(':')[0]
- opt['ttw_data'] = os.path.join(opt['datapath'], 'TalkTheWalk')
- return opt['ttw_data'], os.path.join(opt['ttw_data'], 'talkthewalk.' + dt + '.json')
-
-
-class TTWBase(FixedDialogTeacher):
- @staticmethod
- def add_cmdline_args(argparser):
- agent = argparser.add_argument_group('Talk the Walk Teacher Arguments')
- agent.add_argument(
- '--train-actions',
- type='bool',
- default=False,
- help='Train model to \
- take actions',
- )
-
- def __init__(self, opt, shared=None):
- super().__init__(opt, shared)
- self.opt = opt
- data_path, datafile = _path(opt)
- self.label_candidates = set()
-
- if shared:
- self.data = shared['data']
- self.sim = shared['sim']
- self.label_candidates = shared['cands']
- else:
- self.sim = Simulator(opt)
- self._setup_data(datafile)
- self.reset()
-
- def share(self):
- shared = super().share()
- shared['data'] = self.data
- shared['sim'] = self.sim
- shared['cands'] = self.label_candidates
- return shared
-
- def _setup_episode(self, episode):
- """
- Process one episode in an example.
- """
- raise NotImplementedError('Abstract class: user must implement _setup_episode')
-
- def _setup_data(self, datafile):
- self.episodes = json.load(open(datafile))
- self.data = []
- self.examples_count = 0
-
- for episode in self.episodes:
- if episode:
- init = {
- x: y
- for x, y in episode.items()
- if x
- in [
- 'start_location',
- 'neighborhood',
- 'boundaries',
- 'target_location',
- ]
- }
- self.sim.init_sim(**init)
-
- episode = self._setup_episode(episode)
-
- if episode:
- self.label_candidates = self.label_candidates.union(
- [x['labels'][0] for x in episode]
- )
- self.data.append(episode)
- self.examples_count += len(episode)
- self.label_candidates = list(self.label_candidates)
-
- def get(self, episode_idx, entry_idx=0):
- example = self.data[episode_idx][entry_idx]
- example['text'] = example.get('text', '__silence__')
- example['label_candidates'] = self.label_candidates
- return example
-
- def num_episodes(self):
- return len(self.data)
-
- def num_examples(self):
- return self.examples_count
diff --git a/parlai/tasks/talkthewalk/build.py b/parlai/tasks/talkthewalk/build.py
deleted file mode 100644
--- a/parlai/tasks/talkthewalk/build.py
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/usr/bin/env python3
-
-# Copyright (c) Facebook, Inc. and its affiliates.
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-# Download and build the data if it does not exist.
-
-import parlai.core.build_data as build_data
-import os
-from parlai.core.build_data import DownloadableFile
-
-RESOURCES = [
- DownloadableFile(
- 'https://dl.fbaipublicfiles.com/parlai/projects/talkthewalk/talkthewalk.tgz',
- 'talkthewalk.tgz',
- '314c379fa9d03eb879585b543661c27ddbd836c70d3e440cdd7c5f9b9bf32ed0',
- )
-]
-
-
-def build(opt):
- dpath = os.path.join(opt['datapath'], 'TalkTheWalk')
- version = None
-
- if not build_data.built(dpath, version_string=version):
- print('[building data: ' + dpath + ']')
- if build_data.built(dpath):
- # An older version exists, so remove these outdated files.
- build_data.remove_dir(dpath)
- build_data.make_dir(dpath)
-
- # Download the data.
- for downloadable_file in RESOURCES:
- downloadable_file.download_file(dpath)
-
- # Mark the data as built.
- build_data.mark_done(dpath, version_string=version)
diff --git a/parlai/tasks/talkthewalk/run.py b/parlai/tasks/talkthewalk/run.py
deleted file mode 100644
--- a/parlai/tasks/talkthewalk/run.py
+++ /dev/null
@@ -1,92 +0,0 @@
-#!/usr/bin/env python3
-
-# Copyright (c) Facebook, Inc. and its affiliates.
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-"""
-Runs Talk the Walk Simulation using pretrained model files for tourist and agent.
-"""
-
-from parlai.core.params import ParlaiParser
-from parlai.core.agents import create_agent
-from parlai.core.logs import TensorboardLogger
-from parlai.core.worlds import BatchWorld, HogwildWorld
-from parlai.tasks.talkthewalk.worlds import SimulateWorld
-
-import copy
-import os
-
-from build import build
-
-
-def _path(opt):
- build(opt)
- opt['ttw_data'] = os.path.join(opt['datapath'], 'TalkTheWalk')
-
-
-def setup_args(parser=None):
- if parser is None:
- parser = ParlaiParser(True, True, 'Evaluate a model')
- parser.add_parlai_data_path()
- # Get command line arguments
- parser.add_argument('-tmf', '--tourist-model-file', type=str)
- parser.add_argument('-gmf', '--guide-model-file', type=str)
- parser.add_argument('-ne', '--num-examples', type=int, default=-1)
- parser.add_argument('-d', '--display-examples', type='bool', default=False)
- parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=2)
- parser.add_argument(
- '--metrics',
- type=str,
- default="all",
- help="list of metrics to show/compute, e.g. "
- "ppl,f1,accuracy,hits@1."
- "If 'all' is specified [default] all are shown.",
- )
- TensorboardLogger.add_cmdline_args(parser)
- parser.set_defaults(datatype='valid')
- return parser
-
-
-def run(opt):
- opt = copy.deepcopy(opt)
- _path(opt)
-
- opt['model_file'] = opt['tourist_model_file']
- tourist = create_agent(opt)
- opt['model_file'] = opt['guide_model_file']
- guide = create_agent(opt)
-
- world = SimulateWorld(opt, [tourist, guide])
-
- if opt.get('numthreads', 1) > 1:
- # use hogwild world if more than one thread requested
- # hogwild world will create sub batch worlds as well if bsz > 1
- world = HogwildWorld(opt, world)
- elif opt.get('batchsize', 1) > 1:
- # otherwise check if should use batchworld
- world = BatchWorld(opt, world)
-
- # Show some example dialogs:
- cnt = 0
- while not world.epoch_done():
- cnt += opt.get('batchsize', 1)
- world.parley()
- if opt['display_examples']:
- print(world.display() + "\n~~")
- if opt['num_examples'] > 0 and cnt >= opt['num_examples']:
- break
-
- if world.epoch_done():
- print("EPOCH DONE")
- print(
- 'finished evaluating task using datatype {}'.format(opt.get('datatype', 'N/A'))
- )
- report = world.report()
- print(report)
- return report
-
-
-if __name__ == '__main__':
- parser = setup_args()
- run(parser.parse_args())
diff --git a/parlai/tasks/talkthewalk/worlds.py b/parlai/tasks/talkthewalk/worlds.py
deleted file mode 100644
--- a/parlai/tasks/talkthewalk/worlds.py
+++ /dev/null
@@ -1,301 +0,0 @@
-#!/usr/bin/env python3
-
-# Copyright (c) Facebook, Inc. and its affiliates.
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-from parlai.core.worlds import ExecutableWorld
-import json
-import os
-import copy
-from random import choice, randint
-
-BOUNDARIES = {
- 'hellskitchen': [3, 3],
- 'williamsburg': [2, 8],
- 'uppereast': [3, 3],
- 'fidi': [2, 3],
- 'eastvillage': [3, 4],
-}
-
-
-def is_action(msg, forward=False):
- if forward:
- return msg and (msg == 'ACTION:FORWARD' or msg == 'ACTION : FORWARD')
- return msg and msg.startswith('ACTION')
-
-
-class Simulator:
-
- boundaries = None
- neighborhood = None
- agent_location = None
- target_location = None
- landmarks = None
-
- def __init__(self, opt):
- self.map = Map(opt['ttw_data'])
- self.feature_loader = GoldstandardFeatures(self.map)
-
- def _get_random_location(self):
- return [
- randint(self.boundaries[0], self.boundaries[2]),
- randint(self.boundaries[1], self.boundaries[3]),
- randint(0, 3),
- ]
-
- def random_init(self):
- self.neighborhood = choice(list(BOUNDARIES.keys()))
- self.boundaries = [
- randint(0, BOUNDARIES[self.neighborhood][x]) * 2 for x in range(2)
- ]
- self.boundaries = self.boundaries + [x + 3 for x in self.boundaries]
- self.agent_location = self._get_random_location()
- self.target_location = self._get_random_location()
-
- def init_sim(
- self,
- neighborhood=None,
- boundaries=None,
- start_location=None,
- target_location=None,
- ):
-
- if not neighborhood:
- self.random_init()
- else:
- self.boundaries = boundaries
- self.neighborhood = neighborhood
- self.agent_location = start_location
- self.target_location = target_location
-
- self.landmarks, self.target_location = self.map.get_landmarks(
- self.neighborhood, self.boundaries, self.target_location
- )
-
- def get_agent_location(self):
- return str(
- (self.agent_location[0] - self.boundaries[0]) * 4
- + (self.agent_location[1] - self.boundaries[1])
- )
-
- def get_text_map(self):
- L = [y for x in zip(*self.landmarks) for y in x]
- txt = '\n'.join([(str(i) + ':' + ' and '.join(x)) for i, x in enumerate(L)])
- txt += (
- '\n'
- + str(self.target_location[0] * 4 + self.target_location[1])
- + ':Target'
- + '\n'
- )
- return txt
-
- def get_current_view(self):
- return "\n".join(
- [
- 'see:' + x
- for x in self.feature_loader.get(self.neighborhood, self.agent_location)
- ]
- )
-
- def add_view_to_text(self, obs, action=None):
- action = action or obs.get('text')
- if action and is_action(action, forward=True):
- obs['text'] = obs.get('text', '') + self.get_current_view() + '\n'
-
- def execute_and_write(self, obs, action):
- self.execute(action)
- self.add_view_to_text(obs, action)
-
- def execute(self, text):
- """
- move the tourist.
- """
- if not is_action(text):
- return
-
- self.agent_location = self.map.step_aware(
- text, self.agent_location, self.boundaries
- )
-
-
-class SimulateWorld(ExecutableWorld):
-
- boundaries = None
- neighborhood = None
- agent_location = None
- target_location = None
- guide = None
- tourist = None
-
- def __init__(self, opt, agents=None, shared=None):
- super().__init__(opt, agents, shared)
-
- if agents:
- self.tourist = agents[0]
- self.guide = agents[1]
-
- if shared:
- self.sim = shared['sim']
- else:
- self.sim = Simulator(opt)
- self.sim.init_sim()
-
- if agents:
- self.send_map(self.guide)
- self.send_view(self.tourist)
-
- def send_map(self, agent):
- agent.observe({'text': self.sim.get_text_map()})
-
- def send_view(self, agent):
- agent.observe({'text': self.sim.get_current_view()})
-
- def share(self):
- shared = super().share()
- shared['sim'] = self.sim
- return shared
-
- def execute(self, agent, act):
- self.sim.execute(act['text'])
-
- def episode_done(self):
- return self.guide_act.startswith('EVALUATE')
-
- def parley(self):
- act = self.tourist.act()
- while is_action(act['text']):
- self.execute(self.tourist, act)
- obs = self.observe(self.tourist, act)
- if obs is not None:
- self.tourist.observe(obs)
- act = self.tourist.act()
- self.guide.observe(act)
- self.guide_act = self.guide.act()
- obs = self.observe(self.guide, act)
-
- if obs is not None:
- self.tourist.observe(obs)
- self.update_counters()
-
- def observe(self, agent, act):
- self.sim.add_view_to_text(act)
- return act
-
-
-class Map(object):
- """
- Map with landmarks.
- """
-
- def __init__(self, data_dir, include_empty_corners=True):
- super().__init__()
- self.coord_to_landmarks = dict()
- self.include_empty_corners = include_empty_corners
- self.data_dir = data_dir
- self.landmarks = dict()
-
- for neighborhood in BOUNDARIES.keys():
- self.coord_to_landmarks[neighborhood] = [
- [[] for _ in range(BOUNDARIES[neighborhood][1] * 2 + 4)]
- for _ in range(BOUNDARIES[neighborhood][0] * 2 + 4)
- ]
- self.landmarks[neighborhood] = json.load(
- open(os.path.join(data_dir, neighborhood, "map.json"))
- )
- for landmark in self.landmarks[neighborhood]:
- coord = self.transform_map_coordinates(landmark)
- self.coord_to_landmarks[neighborhood][coord[0]][coord[1]].append(
- landmark['type']
- )
-
- def transform_map_coordinates(self, landmark):
- x_offset = {"NW": 0, "SW": 0, "NE": 1, "SE": 1}
- y_offset = {"NW": 1, "SW": 0, "NE": 1, "SE": 0}
-
- coord = (
- landmark['x'] * 2 + x_offset[landmark['orientation']],
- landmark['y'] * 2 + y_offset[landmark['orientation']],
- )
- return coord
-
- def get(self, neighborhood, x, y):
- landmarks = self.coord_to_landmarks[neighborhood][x][y]
- if self.include_empty_corners and len(landmarks) == 0:
- return ['Empty']
- return landmarks
-
- def get_landmarks(self, neighborhood, boundaries, target_loc):
- landmarks = [[[] for _ in range(4)] for _ in range(4)]
- label_index = (target_loc[0] - boundaries[0], target_loc[1] - boundaries[1])
- for x in range(4):
- for y in range(4):
- landmarks[x][y] = self.get(
- neighborhood, boundaries[0] + x, boundaries[1] + y
- )
-
- assert 0 <= label_index[0] < 4
- assert 0 <= label_index[1] < 4
-
- return landmarks, label_index
-
- def get_unprocessed_landmarks(self, neighborhood, boundaries):
- landmark_list = []
- for landmark in self.landmarks[neighborhood]:
- coord = self.transform_map_coordinates(landmark)
- if (
- boundaries[0] <= coord[0] <= boundaries[2]
- and boundaries[1] <= coord[1] <= boundaries[3]
- ):
- landmark_list.append(landmark)
- return landmark_list
-
- def step_aware(self, action, loc, boundaries):
- orientations = ['N', 'E', 'S', 'W']
- steps = dict()
- steps['N'] = [0, 1]
- steps['E'] = [1, 0]
- steps['S'] = [0, -1]
- steps['W'] = [-1, 0]
-
- new_loc = copy.deepcopy(loc)
- if action == 'ACTION:TURNLEFT':
- # turn left
- new_loc[2] = (new_loc[2] - 1) % 4
-
- if action == 'ACTION:TURNRIGHT':
- # turn right
- new_loc[2] = (new_loc[2] + 1) % 4
-
- if action == 'ACTION:FORWARD':
- # move forward
- orientation = orientations[loc[2]]
- new_loc[0] = new_loc[0] + steps[orientation][0]
- new_loc[1] = new_loc[1] + steps[orientation][1]
-
- new_loc[0] = min(max(new_loc[0], boundaries[0]), boundaries[2])
- new_loc[1] = min(max(new_loc[1], boundaries[1]), boundaries[3])
- return new_loc
-
-
-class GoldstandardFeatures:
- def __init__(self, map, orientation_aware=False):
- self.map = map
- self.allowed_orientations = {
- 'NW': [3, 0],
- 'SW': [2, 3],
- 'NE': [0, 1],
- 'SE': [1, 2],
- }
- self.mod2orientation = {(0, 0): 'SW', (1, 0): 'SE', (0, 1): 'NW', (1, 1): 'NE'}
- self.orientation_aware = orientation_aware
-
- def get(self, neighborhood, loc):
- if self.orientation_aware:
- mod = (loc[0] % 2, loc[1] % 2)
- orientation = self.mod2orientation[mod]
- if loc[2] in self.allowed_orientations[orientation]:
- return self.map.get(neighborhood, loc[0], loc[1])
- else:
- return ['Empty']
- else:
- return self.map.get(neighborhood, loc[0], loc[1])
diff --git a/parlai/tasks/task_list.py b/parlai/tasks/task_list.py
--- a/parlai/tasks/task_list.py
+++ b/parlai/tasks/task_list.py
@@ -968,16 +968,6 @@
"tags": ["All", "Visual", "ChitChat", "Dodeca"],
"description": ("Image Chat task to train generative model"),
},
- {
- "id": "TalkTheWalk",
- "display_name": "Talk the Walk",
- "task": "talkthewalk",
- "tags": ["All", "Visual"],
- "description": (
- "Talk the walk dataset."
- "See https://arxiv.org/abs/1807.03367 for more information."
- ),
- },
{
"id": "Wizard_of_Wikipedia",
"display_name": "Wizard_of_Wikipedia",
diff --git a/parlai/tasks/wrapper/agents.py b/parlai/tasks/wrapper/agents.py
--- a/parlai/tasks/wrapper/agents.py
+++ b/parlai/tasks/wrapper/agents.py
@@ -123,15 +123,6 @@ def share(self):
shared['task'] = self.task.share()
return shared
- def shutdown(self):
- """
- Shutdown the subtask.
- """
- self.task.shutdown()
-
- def update_counters(self):
- self.task.update_counters()
-
class LabelToTextTeacher(AbstractWrapperTeacher):
"""
diff --git a/parlai/utils/distributed.py b/parlai/utils/distributed.py
--- a/parlai/utils/distributed.py
+++ b/parlai/utils/distributed.py
@@ -31,32 +31,6 @@
TORCH_AVAILABLE = False
-def validate_params(opt):
- """
- Ensure sane combinations of command line parameters for distributed training.
-
- Raises exceptions if anything is wrong, otherwise returns None.
- """
- if torch.version.__version__.startswith('0.'):
- raise ImportError(
- "Please upgrade to PyTorch >=1.0; "
- "visit https://pytorch.org for instructions."
- )
-
- if opt.get('no_cuda', False):
- raise ValueError('Distributed mode only makes sense when using GPUs.')
-
- if opt.get('numthreads', 1) != 1:
- raise ValueError('--numthreads must be 1 for distributed training.')
-
- if 'train:stream' in opt['datatype'] or 'ordered' in opt['datatype']:
- raise ValueError(
- "You should not combine ordered streaming with distributed training "
- "because all workers will have exactly the same minibatches, "
- "defeating the purpose."
- )
-
-
def is_distributed():
"""
Return if we are in distributed mode.
diff --git a/parlai/utils/thread.py b/parlai/utils/thread.py
deleted file mode 100644
--- a/parlai/utils/thread.py
+++ /dev/null
@@ -1,187 +0,0 @@
-#!/usr/bin/env python3
-
-# Copyright (c) Facebook, Inc. and its affiliates.
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-"""
-Provides utilities useful for multiprocessing.
-
-This includes a ``SharedTable``.
-"""
-
-from multiprocessing import Lock
-from multiprocessing import RawArray # type: ignore
-from collections.abc import MutableMapping
-import ctypes
-import sys
-
-
-class SharedTable(MutableMapping):
- """
- Provides a simple shared-memory table of integers, floats, or strings.
-
- Use this class as follows:
-
- .. code-block:: python
-
- tbl = SharedTable({'cnt': 0})
- with tbl.get_lock():
- tbl['startTime'] = time.time()
- for i in range(10):
- with tbl.get_lock():
- tbl['cnt'] += 1
- """
-
- types = {int: ctypes.c_int, float: ctypes.c_float, bool: ctypes.c_bool}
-
- def __init__(self, init_dict=None):
- """
- Create a shared memory version of each element of the initial dictionary.
-
- Creates an empty array otherwise, which will extend
- automatically when keys are added.
-
- Each different type (all supported types listed in the ``types`` array
- above) has its own array. For each key we store an index into the
- appropriate array as well as the type of value stored for that key.
- """
- # idx is dict of {key: (array_idx, value_type)}
- self.idx = {}
- # arrays is dict of {value_type: array_of_ctype}
- self.arrays = {}
- self.tensors = {}
-
- if init_dict:
- sizes = {typ: 0 for typ in self.types.keys()}
- for k, v in init_dict.items():
- if is_tensor(v):
- # add tensor to tensor dict--don't try to put in rawarray
- self.tensors[k] = v
- continue
- elif type(v) not in sizes:
- raise TypeError(
- 'SharedTable does not support values of '
- + 'type '
- + str(type(v))
- )
- sizes[type(v)] += 1
- # pop tensors from init_dict
- for k in self.tensors.keys():
- init_dict.pop(k)
- # create raw arrays for each type
- for typ, sz in sizes.items():
- self.arrays[typ] = RawArray(self.types[typ], sz)
- # track indices for each key, assign them to their typed rawarray
- idxs = {typ: 0 for typ in self.types.keys()}
- for k, v in init_dict.items():
- val_type = type(v)
- self.idx[k] = (idxs[val_type], val_type)
- if val_type == str:
- v = sys.intern(v)
- self.arrays[val_type][idxs[val_type]] = v
- idxs[val_type] += 1
- # initialize any needed empty arrays
- for typ, ctyp in self.types.items():
- if typ not in self.arrays:
- self.arrays[typ] = RawArray(ctyp, 0)
- self.lock = Lock()
-
- def __len__(self):
- return len(self.idx) + len(self.tensors)
-
- def __iter__(self):
- return iter([k for k in self.idx] + [k for k in self.tensors])
-
- def __contains__(self, key):
- return key in self.idx or key in self.tensors
-
- def __getitem__(self, key):
- """
- Return shared value if key is available.
- """
- if key in self.tensors:
- return self.tensors[key]
- elif key in self.idx:
- idx, typ = self.idx[key]
- return self.arrays[typ][idx]
- else:
- raise KeyError('Key "{}" not found in SharedTable'.format(key))
-
- def __setitem__(self, key, value):
- """
- If key is in table, update it. Otherwise, extend the array to make room.
-
- This uses additive resizing not multiplicative, since the number
- of keys is not likely to change frequently during a run, so do not
- abuse it.
-
- Raises an error if you try to change the type of the value stored for
- that key -- if you need to do this, you must delete the key first.
- """
- val_type = type(value)
- if 'Tensor' in str(val_type):
- self.tensors[key] = value
- return
- if val_type not in self.types:
- raise TypeError('SharedTable does not support type ' + str(type(value)))
- if val_type == str:
- value = sys.intern(value)
- if key in self.idx:
- idx, typ = self.idx[key]
- if typ != val_type:
- raise TypeError(
- (
- 'Cannot change stored type for {key} from '
- + '{v1} to {v2}. You need to del the key first'
- + ' if you need to change value types.'
- ).format(key=key, v1=typ, v2=val_type)
- )
- self.arrays[typ][idx] = value
- else:
- raise KeyError(
- 'Cannot add more keys to the shared table as '
- 'they will not be synced across processes.'
- )
-
- def __delitem__(self, key):
- if key in self.tensors:
- del self.tensors[key]
- elif key in self.idx:
- del self.idx[key]
- else:
- raise KeyError('Key "{}" not found in SharedTable'.format(key))
-
- def __str__(self):
- """
- Return simple dict representation of the mapping.
- """
- lhs = [
- '{k}: {v}'.format(k=key, v=self.arrays[typ][idx])
- for key, (idx, typ) in self.idx.items()
- ]
- rhs = ['{k}: {v}'.format(k=k, v=v) for k, v in self.tensors.items()]
- return '{{{}}}'.format(', '.join(lhs + rhs))
-
- def __repr__(self):
- """
- Return the object type and memory location with the mapping.
- """
- representation = super().__repr__()
- return representation.replace('>', ': {}>'.format(str(self)))
-
- def get_lock(self):
- """
- Return the lock.
- """
- return self.lock
-
-
-def is_tensor(v):
- """
- Return if an object is a torch Tensor, without importing torch.
- """
- if type(v).__module__.startswith('torch'):
- import torch
-
- return torch.is_tensor(v)
- return False
diff --git a/projects/babi/__init__.py b/projects/babi/__init__.py
deleted file mode 100644
--- a/projects/babi/__init__.py
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/usr/bin/env python3
-
-# Copyright (c) Facebook, Inc. and its affiliates.
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-import sys
-
-if sys.version_info < (3, 0):
- raise RuntimeError('ParlAI requires Python 3.')
diff --git a/projects/beat_the_bot/__init__.py b/projects/beat_the_bot/__init__.py
deleted file mode 100644
--- a/projects/beat_the_bot/__init__.py
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/usr/bin/env python3
-
-# Copyright (c) Facebook, Inc. and its affiliates.
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-import sys
-
-if sys.version_info < (3, 0):
- raise RuntimeError('ParlAI requires Python 3.')
diff --git a/projects/controllable_dialogue/controllable_seq2seq/controllable_seq2seq.py b/projects/controllable_dialogue/controllable_seq2seq/controllable_seq2seq.py
--- a/projects/controllable_dialogue/controllable_seq2seq/controllable_seq2seq.py
+++ b/projects/controllable_dialogue/controllable_seq2seq/controllable_seq2seq.py
@@ -21,7 +21,6 @@
from parlai.core.torch_agent import TorchAgent, Output, Batch
from parlai.utils.misc import round_sigfigs
from parlai.utils.torch import padded_tensor, argsort, neginf
-from parlai.utils.thread import SharedTable
from .modules import Seq2seq, opt_to_kwargs
from .util import ConvAI2History, show_beam_cands, reorder_extrep2gram_qn
from .controls import (
@@ -617,16 +616,7 @@ def share(self):
"""
shared = super().share()
shared['model'] = self.model
- if self.opt.get('numthreads', 1) > 1:
- # we're doing hogwild so share the model too
- if isinstance(self.metrics, dict):
- # move metrics and model to shared memory
- self.metrics = SharedTable(self.metrics)
- self.model.share_memory()
- shared['states'] = { # don't share optimizer states
- 'optimizer_type': self.opt['optimizer']
- }
- shared['metrics'] = self.metrics # do after numthreads check
+ shared['metrics'] = self.metrics
if self.beam_dot_log is True:
shared['beam_dot_dir'] = self.beam_dot_dir
return shared
diff --git a/projects/dialogue_safety/__init__.py b/projects/dialogue_safety/__init__.py
deleted file mode 100644
--- a/projects/dialogue_safety/__init__.py
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/usr/bin/env python3
-
-# Copyright (c) Facebook, Inc. and its affiliates.
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-import sys
-
-if sys.version_info < (3, 0):
- raise RuntimeError('ParlAI requires Python 3.')
diff --git a/projects/light/__init__.py b/projects/light/__init__.py
deleted file mode 100644
--- a/projects/light/__init__.py
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/usr/bin/env python3
-
-# Copyright (c) Facebook, Inc. and its affiliates.
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-import sys
-
-if sys.version_info < (3, 0):
- raise RuntimeError('ParlAI requires Python 3.')
diff --git a/projects/mastering_the_dungeon/__init__.py b/projects/mastering_the_dungeon/__init__.py
deleted file mode 100644
--- a/projects/mastering_the_dungeon/__init__.py
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/usr/bin/env python3
-
-##
-## Copyright (c) Facebook, Inc. and its affiliates.
-## This source code is licensed under the MIT license found in the
-## LICENSE file in the root directory of this source tree.
-##
diff --git a/projects/mastering_the_dungeon/agents/__init__.py b/projects/mastering_the_dungeon/agents/__init__.py
deleted file mode 100644
--- a/projects/mastering_the_dungeon/agents/__init__.py
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/usr/bin/env python3
-
-##
-## Copyright (c) Facebook, Inc. and its affiliates.
-## This source code is licensed under the MIT license found in the
-## LICENSE file in the root directory of this source tree.
-##
diff --git a/projects/mastering_the_dungeon/agents/graph_world2/__init__.py b/projects/mastering_the_dungeon/agents/graph_world2/__init__.py
deleted file mode 100644
--- a/projects/mastering_the_dungeon/agents/graph_world2/__init__.py
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/usr/bin/env python3
-
-##
-## Copyright (c) Facebook, Inc. and its affiliates.
-## This source code is licensed under the MIT license found in the
-## LICENSE file in the root directory of this source tree.
-##
diff --git a/projects/mastering_the_dungeon/agents/graph_world2/agents.py b/projects/mastering_the_dungeon/agents/graph_world2/agents.py
deleted file mode 100644
--- a/projects/mastering_the_dungeon/agents/graph_world2/agents.py
+++ /dev/null
@@ -1,783 +0,0 @@
-#!/usr/bin/env python3
-
-##
-## Copyright (c) Facebook, Inc. and its affiliates.
-## This source code is licensed under the MIT license found in the
-## LICENSE file in the root directory of this source tree.
-##
-
-from parlai.core.agents import Agent
-from collections import defaultdict as dd
-import spacy
-from .models import ObjectChecklistModel, Seq2SeqModel
-import numpy as np
-from torch.autograd import Variable
-import torch
-from copy import deepcopy
-from projects.mastering_the_dungeon.tasks.graph_world2.graph import (
- DEFAULT_ROOMS,
- DEFAULT_CONTAINERS,
- DEFAULT_AGENTS,
- DEDUP_OBJECTS,
- DEDUP_PROPS,
-)
-
-nlp = spacy.load('en')
-
-
-def parse_action_tuple(insts):
- if insts[0] in [
- 'go',
- 'drop',
- 'wear',
- 'wield',
- 'eat',
- 'drink',
- 'remove',
- 'unwield',
- 'hit',
- ]:
- return insts[0], ' '.join(insts[1:])
- if insts[0] == 'get':
- args = ' '.join(insts[1:]).split(' from ')
- if len(args) == 1:
- return 'get', args[0]
- else:
- return 'get', args[0], args[1]
- if insts[0] == 'give':
- args = ' '.join(insts[1:]).split(' to ')
- return 'give', args[0], args[1]
- if insts[0] == 'take':
- args = ' '.join(insts[1:]).split(' from ')
- return 'take', args[0], args[1]
- if insts[0] == 'put':
- args = ' '.join(insts[1:]).split(' in ')
- return 'put', args[0], args[1]
- assert False, insts
-
-
-def reverse_parse_action(action_tuple):
- if action_tuple[0] == 'stop':
- return 'STOP'
- if action_tuple[0] in [
- 'go',
- 'drop',
- 'wear',
- 'wield',
- 'eat',
- 'drink',
- 'remove',
- 'unwield',
- 'hit',
- ]:
- return '{} {}'.format(action_tuple[0], action_tuple[1])
- if action_tuple[0] == 'get':
- if len(action_tuple) == 2:
- return 'get {}'.format(action_tuple[1])
- else:
- return 'get {} from {}'.format(action_tuple[1], action_tuple[2])
- if action_tuple[0] == 'give':
- return 'give {} to {}'.format(action_tuple[1], action_tuple[2])
- if action_tuple[0] == 'take':
- return 'take {} from {}'.format(action_tuple[1], action_tuple[2])
- if action_tuple[0] == 'put':
- return 'put {} in {}'.format(action_tuple[1], action_tuple[2])
- assert False, action_tuple
-
-
-class DataAgentBase(Agent):
- def __init__(self, opt, shared=None):
- super().__init__(opt, shared)
-
- if not shared:
- self.word2cnt = dd(int)
- else:
- self.word2cnt = shared['word2cnt']
-
- def _tokenize(self, text, lower=True):
- return list(map(lambda x: x.lower_ if lower else x.orth_, list(nlp(text))))
-
- def act(self):
- observation = self.observation
-
- tokens = self._tokenize(observation['text'])
- for token in tokens:
- self.word2cnt[token] += 1
- return {}
-
- def build(self):
- opt = self.opt
- word2cnt = [(k, v) for k, v in self.word2cnt.items()]
- word2cnt.sort(key=lambda x: x[1], reverse=True)
- word_offset, word2index = 2, {}
- word2index['PAD'] = 0
- word2index['UNK'] = 1
- for i in range(opt['vocab_size'] - word_offset):
- if i >= len(word2cnt):
- break
- word = word2cnt[i][0]
- word2index[word] = i + word_offset
- self.word2index = word2index
- self.wordcnt = len(word2index)
-
- def _get_word_index(self, token):
- if token in self.word2index:
- return self.word2index[token]
- return self.word2index['UNK']
-
- def share(self):
- shared = super().share()
- shared['word2cnt'] = self.word2cnt
- return shared
-
- def build_action_id(self):
- action2id = {}
- offset = 0
- for ent in DEFAULT_ROOMS:
- action2id[('go', ent)] = offset
- offset += 1
-
- for ent in DEDUP_OBJECTS + DEFAULT_CONTAINERS:
- action2id[('get', ent)] = offset
- offset += 1
- action2id[('drop', ent)] = offset
- offset += 1
-
- for i, ent in enumerate(DEDUP_OBJECTS):
- if DEDUP_PROPS[i] == 'food':
- action2id[('eat', ent)] = offset
- offset += 1
- elif DEDUP_PROPS[i] == 'drink':
- action2id[('drink', ent)] = offset
- offset += 1
- elif DEDUP_PROPS[i] == 'wearable':
- action2id[('wear', ent)] = offset
- offset += 1
- action2id[('remove', ent)] = offset
- offset += 1
- elif DEDUP_PROPS[i] == 'wieldable':
- action2id[('wield', ent)] = offset
- offset += 1
- action2id[('unwield', ent)] = offset
- offset += 1
-
- for ent_i in DEDUP_OBJECTS + DEFAULT_CONTAINERS:
- for ent_j in DEFAULT_CONTAINERS:
- if ent_i == ent_j:
- continue
- action2id[('put', ent_i, ent_j)] = offset
- offset += 1
- action2id[('get', ent_i, ent_j)] = offset
- offset += 1
-
- for ent_i in DEDUP_OBJECTS + DEFAULT_CONTAINERS:
- for ent_j in DEFAULT_AGENTS:
- if ent_j == 'dragon':
- continue
- action2id[('give', ent_i, ent_j)] = offset
- offset += 1
- action2id[('take', ent_i, ent_j)] = offset
- offset += 1
-
- for ent in DEFAULT_AGENTS:
- if ent != 'dragon':
- action2id[('hit', ent)] = offset
- offset += 1
-
- action2id[('stop',)] = offset
- offset += 1
-
- self.y_dim = offset
- print('y_dim = {}'.format(self.y_dim))
-
- self.action2id = action2id
- self.id2action = [None for _ in range(self.y_dim)]
- for k, v in self.action2id.items():
- self.id2action[v] = k
-
- def build_action_key(self):
- action_key = np.zeros((self.y_dim,), dtype=np.int64)
- for i in range(self.y_dim):
- action_tuple = self.get_action_tuple(i)
- if len(action_tuple) <= 1:
- continue
- my_key = action_tuple[1]
- action_key[i] = self._get_word_index(my_key.replace(' ', '_'))
- self.action_key = action_key
-
- def build_second_action_key(self):
- second_action_key = np.zeros((self.y_dim,), dtype=np.int64)
- for i in range(self.y_dim):
- action_tuple = self.get_action_tuple(i)
- if len(action_tuple) <= 2:
- continue
- my_key = action_tuple[2]
- second_action_key[i] = self._get_word_index(my_key.replace(' ', '_'))
- self.second_action_key = second_action_key
-
- def build_action_type(self):
- action_types = deepcopy(self.ACTION_TYPES)
- action_type = np.zeros((self.y_dim,), dtype=np.int64)
- for i in range(self.y_dim):
- action_tuple = self.get_action_tuple(i)
- my_type = action_tuple[0]
- action_type[i] = action_types.index(my_type)
- self.action_type = action_type
- self.num_actions = len(action_types)
-
- def get_num_actions(self):
- return self.num_actions
-
- def build_check_mapping(self):
- check_to_key = {}
- for i in range(self.y_dim):
- action_tuple = self.get_action_tuple(i)
- if len(action_tuple) == 1:
- check_to_key[action_tuple] = action_tuple[0]
- else:
- check_to_key[action_tuple] = action_tuple[1]
- key_to_check = dd(set)
- for k, v in check_to_key.items():
- key_to_check[v].add(k)
- self.check_to_key, self.key_to_check = check_to_key, key_to_check
-
- check_mapping = np.zeros((self.y_dim, self.y_dim), dtype=np.float32)
- for i in range(self.y_dim):
- for j in range(self.y_dim):
- if (
- self.get_action_tuple(j)
- in key_to_check[check_to_key[self.get_action_tuple(i)]]
- ):
- check_mapping[i, j] = 1.0
- self.check_mapping = check_mapping
-
- def get_check_mapping(self):
- return self.check_mapping
-
- def get_action_tuple(self, id):
- return self.id2action[id]
-
- def get_action_id(self, action):
- return self.action2id[action]
-
- def reverse_parse_action(self, action_tuple):
- return reverse_parse_action(action_tuple)
-
- def get_mask(self, g, mask):
- possible_actions = g.get_possible_actions()
- for action in possible_actions:
- action_tuple = parse_action_tuple(action.split())
- action_id = self.get_action_id(action_tuple)
- mask[action_id] = 1.0
- mask[self.get_action_id(('stop',))] = 1.0
-
-
-class ObjectChecklistDataAgent(DataAgentBase):
- def __init__(self, opt, shared=None):
- super().__init__(opt, shared)
- self.num_rooms = len(DEFAULT_ROOMS)
- self.num_objects = len(DEDUP_OBJECTS)
- self.num_containers = len(DEFAULT_CONTAINERS)
- self.num_npcs = len(DEFAULT_AGENTS) - 1
-
- def build(self):
- self.ACTION_TYPES = [
- 'go',
- 'get',
- 'drop',
- 'eat',
- 'drink',
- 'wear',
- 'wield',
- 'remove',
- 'unwield',
- 'give',
- 'take',
- 'put',
- 'hit',
- 'stop',
- ]
- super().build()
- self.build_action_id()
- self.build_action_key()
- self.build_second_action_key()
- self.build_action_type()
- self.build_check_mapping()
-
- def get_room(self, g):
- return self._get_word_index(
- g.node_to_desc_raw(g.node_contained_in('dragon')).replace(' ', '_')
- )
-
- def _tokenize(self, text, lower=True):
- tokenized = ' '.join(
- list(map(lambda x: x.lower_ if lower else x.orth_, list(nlp(text))))
- )
- for ent in DEFAULT_ROOMS + DEFAULT_CONTAINERS + DEFAULT_AGENTS + DEDUP_OBJECTS:
- tokenized = tokenized.replace(ent, ent.replace(' ', '_'))
- return tokenized.split()
-
- def get_data(self, observations, datatype='train', assert_=True):
- opt = self.opt
- batch_size = len(observations)
- seq_in, seq_out = 0, 0
- tokens_list, inst_list, symb_points_list = [], [], []
- for observation in observations:
- graph, text, actions = (
- observation['graph'],
- observation['text'],
- observation['actions'],
- )
- tokens_list.append(self._tokenize(text))
- seq_in = max(seq_in, len(tokens_list[-1]))
-
- graph = observation['graph']
- inst, symb_points = graph.parse(actions)
- seq_out = max(seq_out, len(symb_points) - 1 + 1) # +1 for stop
- inst_list.append(inst)
- symb_points_list.append(symb_points)
-
- if datatype == 'valid':
- seq_out = opt['max_seq_out']
-
- seq_in = min(seq_in, opt['max_seq_in'])
- y_dim = self.y_dim
- x = np.zeros((batch_size, seq_in), dtype=np.int64)
- current_room = np.zeros((batch_size, seq_out), dtype=np.int64)
- checked = np.zeros((batch_size, seq_out + 1, y_dim), dtype=np.float32)
- y = np.zeros((batch_size, seq_out, y_dim), dtype=np.float32)
- y_mask = np.zeros((batch_size, seq_out, y_dim), dtype=np.float32)
- counter_feat = np.zeros((batch_size, seq_out, y_dim), dtype=np.int64)
-
- graph = observations[0]['graph']
-
- action_key = self.action_key
- action_type = self.action_type
- second_action_key = self.second_action_key
-
- for i in range(batch_size):
- for j, token in enumerate(tokens_list[i]):
- if j >= seq_in:
- break
- x[i, j] = self._get_word_index(token)
-
- inst = inst_list[i]
- g = observations[i]['graph'].copy()
- len_plus_one = len(symb_points_list[i])
-
- action_tuples = []
- for j in range(len_plus_one - 1):
- k, l = symb_points_list[i][j], symb_points_list[i][j + 1]
- action_tuples.append(parse_action_tuple(inst[k:l]))
-
- for j in range(len_plus_one):
- if j < len_plus_one - 1:
- cur_tuple = action_tuples[j]
- y[i, j, self.get_action_id(cur_tuple)] = 1.0
- else:
- stop_tuple = ('stop',)
- y[i, j, self.get_action_id(stop_tuple)] = 1.0
-
- current_room[i, j] = self.get_room(g)
-
- self.get_mask(g, y_mask[i, j])
-
- if j < len_plus_one - 1:
- k, l = symb_points_list[i][j], symb_points_list[i][j + 1]
- parse_success = g.parse_exec(' '.join(inst[k:l]))
- if assert_:
- assert parse_success, (
- ' '.join(inst[k:l]) + ' ' + ' '.join(inst)
- )
-
- counter_feat[i, j + 1] = counter_feat[i, j]
- cur_tuple = action_tuples[j]
- for action_name in self.key_to_check[self.check_to_key[cur_tuple]]:
- action_id = self.get_action_id(action_name)
- counter_feat[i, j + 1, action_id] += 1
-
- counter_feat = np.clip(counter_feat, None, opt['counter_max'])
- return (
- x,
- action_key,
- second_action_key,
- action_type,
- current_room,
- checked,
- y,
- y_mask,
- counter_feat,
- )
-
-
-class Seq2SeqDataAgent(DataAgentBase):
- def build(self):
- self.ACTION_TYPES = [
- 'go',
- 'get',
- 'drop',
- 'eat',
- 'drink',
- 'wear',
- 'wield',
- 'remove',
- 'unwield',
- 'give',
- 'take',
- 'put',
- 'hit',
- 'stop',
- ]
- super().build()
- self.build_action_id()
-
- def get_data(self, observations, datatype='train', assert_=True):
- opt = self.opt
- batch_size = len(observations)
- seq_in, seq_out = 0, 0
- tokens_list, inst_list, symb_points_list = [], [], []
- for observation in observations:
- graph, text, actions = (
- observation['graph'],
- observation['text'],
- observation['actions'],
- )
- tokens_list.append(self._tokenize(text))
- seq_in = max(seq_in, len(tokens_list[-1]))
-
- graph = observation['graph']
- inst, symb_points = graph.parse(actions)
- seq_out = max(seq_out, len(symb_points) - 1 + 1) # +1 for stop
- inst_list.append(inst)
- symb_points_list.append(symb_points)
-
- if datatype == 'valid':
- seq_out = opt['max_seq_out']
-
- seq_in = min(seq_in, opt['max_seq_in'])
- y_dim = self.y_dim
- x = np.zeros((batch_size, seq_in), dtype=np.int64)
- y = np.zeros((batch_size, seq_out, y_dim), dtype=np.float32)
-
- for i in range(batch_size):
- for j, token in enumerate(tokens_list[i]):
- if j >= seq_in:
- break
- x[i, j] = self._get_word_index(token)
-
- inst = inst_list[i]
- g = observations[i]['graph'].copy()
- len_plus_one = len(symb_points_list[i])
-
- action_tuples = []
- for j in range(len_plus_one - 1):
- k, l = symb_points_list[i][j], symb_points_list[i][j + 1]
- action_tuples.append(parse_action_tuple(inst[k:l]))
-
- for j in range(len_plus_one):
- if j < len_plus_one - 1:
- cur_tuple = action_tuples[j]
-
- y[i, j, self.get_action_id(cur_tuple)] = 1.0
-
- else:
- stop_tuple = ('stop',)
- y[i, j, self.get_action_id(stop_tuple)] = 1.0
-
- if j < len_plus_one - 1:
- k, l = symb_points_list[i][j], symb_points_list[i][j + 1]
- parse_success = g.parse_exec(' '.join(inst[k:l]))
- if assert_:
- assert parse_success, ' '.join(inst[k:l])
- return x, y
-
-
-class ModelAgentBase(Agent):
- def __init__(self, opt, shared=None, data_agent=None):
- super().__init__(opt, shared)
- if not shared:
- self.data_agent = data_agent
- params = filter(lambda p: p.requires_grad, self.model.parameters())
- self.optimizer = torch.optim.Adam(params, lr=opt['lr'])
- if opt['cuda']:
- self.model.cuda()
- else:
- self.data_agent = shared['data_agent']
- self.model = shared['model']
- self.optimizer = shared['optimizer']
-
- def share(self):
- shared = super().share()
- shared['data_agent'] = self.data_agent
- shared['model'] = self.model
- shared['optimizer'] = self.optimizer
- return shared
-
- def _get_variable(self, np_a, volatile=False):
- if self.opt['cuda']:
- return Variable(torch.from_numpy(np_a).cuda(), volatile=volatile)
- return Variable(torch.from_numpy(np_a), volatile=volatile)
-
- def _get_f1(self, tokens_1, tokens_2):
- tokens_1, tokens_2 = set(tokens_1), set(tokens_2)
- tp, fp, fn = 0, 0, 0
- for token in tokens_2:
- if token in tokens_1:
- tp += 1
- else:
- fp += 1
- for token in tokens_1:
- if token not in tokens_2:
- fn += 1
- prec = 1.0 * tp / (tp + fp) if tp + fp > 0 else 0.0
- recall = 1.0 * tp / (tp + fn) if tp + fn > 0 else 0.0
- f1 = 2.0 * prec * recall / (prec + recall) if prec + recall > 0 else 0.0
- return f1
-
- def act(self):
- return self.batch_act([self.observation])[0]
-
-
-class ObjectChecklistModelAgent(ModelAgentBase):
- def __init__(self, opt, shared=None, data_agent=None):
- if not shared:
- self.model = ObjectChecklistModel(opt, data_agent)
- super().__init__(opt, shared, data_agent)
-
- def batch_act(self, observations):
- ori_len = len(observations)
- observations = [obv for obv in observations if 'text' in obv]
- if self.opt['datatype'] == 'train' or self.opt['datatype'] == 'pretrain':
- (
- x,
- action_key,
- second_action_key,
- action_type,
- current_room,
- checked,
- y,
- y_mask,
- counter_feat,
- ) = self.data_agent.get_data(observations)
- (
- x,
- action_key,
- second_action_key,
- action_type,
- current_room,
- checked,
- y,
- y_mask,
- counter_feat,
- ) = (
- self._get_variable(x),
- self._get_variable(action_key),
- self._get_variable(second_action_key),
- self._get_variable(action_type),
- self._get_variable(current_room),
- self._get_variable(checked),
- self._get_variable(y),
- self._get_variable(y_mask),
- self._get_variable(counter_feat),
- )
-
- loss = self.model.forward_loss(
- x,
- action_key,
- second_action_key,
- action_type,
- current_room,
- checked,
- y,
- y_mask,
- counter_feat,
- )
- self.optimizer.zero_grad()
- loss.backward()
- self.optimizer.step()
-
- reply = [{'loss': loss.data[0]} for _ in range(ori_len)]
- return reply
- else:
- (
- x,
- action_key,
- second_action_key,
- action_type,
- current_room,
- checked,
- y,
- y_mask,
- counter_feat,
- ) = self.data_agent.get_data(observations, 'valid')
- (
- x,
- action_key,
- second_action_key,
- action_type,
- current_room,
- checked,
- y,
- y_mask,
- counter_feat,
- ) = (
- self._get_variable(x, True),
- self._get_variable(action_key, True),
- self._get_variable(second_action_key, True),
- self._get_variable(action_type, True),
- self._get_variable(current_room, True),
- self._get_variable(checked, True),
- self._get_variable(y, True),
- self._get_variable(y_mask, True),
- self._get_variable(counter_feat),
- )
-
- loss = self.model.forward_loss(
- x,
- action_key,
- second_action_key,
- action_type,
- current_room,
- checked,
- y,
- y_mask,
- counter_feat,
- False,
- )
- reply = [
- {
- 'loss': 0.0,
- 'cnt': 0.0,
- 'acc': 0,
- 'len': 0,
- 'f1': 0,
- 'correct_data': [],
- 'wrong_data': [],
- }
- for _ in range(ori_len)
- ]
-
- check_mapping = self.data_agent.get_check_mapping()
- check_mapping = self._get_variable(check_mapping, True)
- text_out = self.model.forward_predict(
- x,
- action_key,
- second_action_key,
- action_type,
- check_mapping,
- checked,
- [obv['graph'] for obv in observations],
- self.data_agent,
- )
-
- for i in range(len(observations)):
- data_rep = '{} ||| {} ||| {}'.format(
- observations[i]['actions'],
- ' '.join(text_out[i][:-1]),
- observations[i]['text'],
- )
-
- graph_a, graph_b = (
- observations[i]['graph'].copy(),
- observations[i]['graph'].copy(),
- )
- graph_a.parse_exec(observations[i]['actions'])
- graph_b.parse_exec(' '.join(text_out[i][:-1]))
- if graph_a == graph_b:
- reply[i]['acc'] = 1.0
- reply[i]['correct_data'].append(data_rep)
- else:
- reply[i]['wrong_data'].append(data_rep)
-
- inst, symb_points = observations[i]['graph'].parse(
- observations[i]['actions']
- )
- text_gt = []
- for j in range(len(symb_points) - 1):
- k, l = symb_points[j], symb_points[j + 1]
- text_gt.append(' '.join(inst[k:l]))
- reply[i]['f1'] = self._get_f1(text_gt, text_out[i])
-
- reply[i]['loss'] = loss.data[0]
- reply[i]['cnt'] = observations[i]['weight']
- reply[i]['len'] = len(text_gt)
-
- return reply
-
-
-class Seq2SeqModelAgent(ModelAgentBase):
- def __init__(self, opt, shared=None, data_agent=None):
- if not shared:
- self.model = Seq2SeqModel(opt, data_agent)
- super().__init__(opt, shared, data_agent)
-
- def batch_act(self, observations):
- ori_len = len(observations)
- observations = [obv for obv in observations if 'text' in obv]
- if self.opt['datatype'] == 'train':
- x, y = self.data_agent.get_data(observations)
- x, y = self._get_variable(x), self._get_variable(y)
-
- loss = self.model.forward_loss(x, y)
- self.optimizer.zero_grad()
- loss.backward()
- self.optimizer.step()
-
- reply = [{}] * ori_len
- reply[0]['loss'] = loss.data[0]
- return reply
- else:
- x, y = self.data_agent.get_data(observations, 'valid')
- x, y = self._get_variable(x), self._get_variable(y)
-
- loss = self.model.forward_loss(x, y)
- reply = [
- {
- 'loss': 0.0,
- 'cnt': 0.0,
- 'acc': 0,
- 'len': 0,
- 'f1': 0,
- 'correct_data': [],
- 'wrong_data': [],
- }
- for _ in range(ori_len)
- ]
-
- text_out = self.model.forward_predict(
- x, [obv['graph'] for obv in observations], self.data_agent
- )
-
- for i in range(len(observations)):
- data_rep = '{} ||| {} ||| {}'.format(
- observations[i]['actions'],
- ' '.join(text_out[i][:-1]),
- observations[i]['text'],
- )
-
- graph_a, graph_b = (
- observations[i]['graph'].copy(),
- observations[i]['graph'].copy(),
- )
- graph_a.parse_exec(observations[i]['actions'])
- graph_b.parse_exec(' '.join(text_out[i][:-1]))
- if graph_a == graph_b:
- reply[i]['acc'] = 1.0
- reply[i]['correct_data'].append(data_rep)
- else:
- reply[i]['wrong_data'].append(data_rep)
-
- inst, symb_points = observations[i]['graph'].parse(
- observations[i]['actions']
- )
- text_gt = []
- for j in range(len(symb_points) - 1):
- k, l = symb_points[j], symb_points[j + 1]
- text_gt.append(' '.join(inst[k:l]))
- reply[i]['f1'] = self._get_f1(text_gt, text_out[i])
-
- reply[i]['loss'] = loss.data[0]
- reply[i]['cnt'] = observations[i]['weight']
- reply[i]['len'] = len(text_gt)
-
- return reply
diff --git a/projects/mastering_the_dungeon/agents/graph_world2/models.py b/projects/mastering_the_dungeon/agents/graph_world2/models.py
deleted file mode 100644
--- a/projects/mastering_the_dungeon/agents/graph_world2/models.py
+++ /dev/null
@@ -1,566 +0,0 @@
-#!/usr/bin/env python3
-
-# Copyright (c) Facebook, Inc. and its affiliates.
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-from torch import nn
-import torch
-from torch.autograd import Variable
-from torch.nn import functional as F
-from copy import deepcopy
-
-
-def mask_out(data, mask):
- return data.index_select(0, mask.nonzero().squeeze())
-
-
-def normalize(data, p=2, dim=1, eps=1e-12):
- return data / torch.norm(data, p, dim).clamp(min=eps).expand_as(data)
-
-
-class ObjectChecklistModel(nn.Module):
- def __init__(self, opt, data_agent):
- super().__init__()
- self.opt = opt
-
- self.input_emb = nn.Embedding(
- data_agent.wordcnt, opt['embedding_dim'], padding_idx=0
- )
- self.action_type_emb = nn.Embedding(
- data_agent.get_num_actions(), opt['action_type_emb_dim']
- )
- self.encoder = nn.GRU(
- opt['embedding_dim'],
- opt['rnn_h'],
- opt['rnn_layers'],
- batch_first=True,
- bidirectional=opt['bidir'],
- )
- self.decoder = nn.Sequential(nn.Linear(opt['rnn_h'], 1))
- self.log_softmax = nn.LogSoftmax()
- self.trans = nn.Sequential(
- nn.Linear(opt['rnn_h'] * (2 if opt['bidir'] else 1), opt['embedding_dim']),
- nn.Tanh(),
- )
- counter_emb = opt['counter_emb_dim']
- if opt['counter_ablation']:
- counter_emb = 0
- self.dec_gru = nn.GRU(
- opt['rnn_h'] * (2 if opt['bidir'] else 1)
- + counter_emb
- + (opt['embedding_dim'] if not opt['room_ablation'] else 0)
- + opt['action_type_emb_dim']
- + opt['action_type_emb_dim']
- + opt['embedding_dim']
- + opt['embedding_dim']
- + opt['rnn_h'] * (2 if opt['bidir'] else 1),
- opt['rnn_h'],
- opt['rnn_layers'],
- batch_first=True,
- )
- self.merge = nn.Sequential(nn.Linear(opt['rnn_h'] * 2, opt['rnn_h']), nn.Tanh())
- self.counter_emb = nn.Embedding(opt['counter_max'] + 1, opt['counter_emb_dim'])
-
- def forward_loss(
- self,
- x,
- action_key,
- second_action_key,
- action_type,
- current_room,
- checked,
- y,
- y_mask,
- counter_feat,
- average_=True,
- ):
- """
- x: [batch, seq_in], int
- action_key: [y_dim], int
- second_action_key: [y_dim], int
- action_type: [y_dim], int
- current_room: [batch, seq_out], int
- checked: [batch, seq_out + 1, y_dim], float, binary
- y: [batch, seq_out, y_dim], float, binary
- y_mask: [batch, seq_out, y_dim], float, binary
- counter_feat: [batch, seq_out, y_dim], int
- """
-
- opt = self.opt
- batch_size, seq_out, seq_in = x.size(0), y.size(1), x.size(1)
- h_0 = Variable(
- torch.zeros(
- opt['rnn_layers'] * (2 if opt['bidir'] else 1), batch_size, opt['rnn_h']
- )
- )
- if opt['cuda']:
- h_0 = h_0.cuda()
-
- emb_out = self.input_emb(x) # [batch, seq_in, dim]
- enc_out, hidden = self.encoder(
- emb_out, h_0
- ) # [batch, seq_in, h], [layer, batch, h]
-
- action_emb_ori = self.input_emb(action_key.unsqueeze(1)).squeeze(
- 1
- ) # [y_dim, dim]
- y_dim, emb_dim = action_emb_ori.size()
- action_emb = (
- action_emb_ori.unsqueeze(0)
- .expand(batch_size, y_dim, emb_dim)
- .transpose(1, 2)
- ) # [batch, dim, y_dim]
-
- second_action_emb_ori = self.input_emb(second_action_key.unsqueeze(1)).squeeze(
- 1
- ) # [y_dim, dim]
- second_action_emb = (
- second_action_emb_ori.unsqueeze(0)
- .expand(batch_size, y_dim, emb_dim)
- .transpose(1, 2)
- ) # [batch, dim, y_dim]
-
- alpha = F.softmax(
- torch.bmm(emb_out, action_emb).transpose(1, 2).contiguous().view(-1, seq_in)
- + torch.bmm(
- self.trans(enc_out.contiguous().view(batch_size * seq_in, -1))
- .contiguous()
- .view(batch_size, seq_in, -1),
- action_emb,
- )
- .transpose(1, 2)
- .contiguous()
- .view(-1, seq_in)
- ) # [batch * y_dim, seq_in]
- attention = torch.bmm(
- alpha.view(batch_size, y_dim, seq_in), enc_out
- ) # [batch, y_dim, h]
-
- second_alpha = F.softmax(
- torch.bmm(emb_out, second_action_emb)
- .transpose(1, 2)
- .contiguous()
- .view(-1, seq_in)
- + torch.bmm(
- self.trans(enc_out.view(batch_size * seq_in, -1)).view(
- batch_size, seq_in, -1
- ),
- second_action_emb,
- )
- .transpose(1, 2)
- .contiguous()
- .view(-1, seq_in)
- )
- second_attention = torch.bmm(
- second_alpha.view(batch_size, y_dim, seq_in), enc_out
- ) # [batch, y_dim, h]
-
- action_type_out_ori = self.action_type_emb(action_type) # [y_dim, dim]
- action_type_out = action_type_out_ori.unsqueeze(0).expand(
- batch_size, y_dim, opt['action_type_emb_dim']
- )
- action_type_emb_dim = action_type_out.size(2)
-
- room_emb = self.input_emb(current_room) # [batch, seq_out, emb_dim]
-
- loss = 0
- if not average_:
- loss = None
-
- hidden = (
- self.merge(hidden.view(batch_size, -1))
- .unsqueeze(1)
- .expand(batch_size, y_dim, opt['rnn_h'])
- .contiguous()
- .view(1, batch_size * y_dim, -1)
- )
-
- for i in range(seq_out):
- counter_in = self.counter_emb(counter_feat[:, i]) # [batch, y_dim, dim]
- room_in = room_emb[:, i].unsqueeze(1).expand(batch_size, y_dim, emb_dim)
-
- if i == 0:
- y_in = Variable(torch.zeros(batch_size, y_dim))
- if opt['cuda']:
- y_in = y_in.cuda()
- else:
- y_in = y[:, i - 1]
-
- y_second_in = (
- torch.mm(y_in, second_action_emb_ori)
- .unsqueeze(1)
- .expand(batch_size, y_dim, emb_dim)
- ) # [batch, y_dim, dim]
- y_type_in = (
- torch.mm(y_in, action_type_out_ori)
- .unsqueeze(1)
- .expand(batch_size, y_dim, action_type_emb_dim)
- ) # [batch, y_dim, dim]
- y_in = (
- torch.mm(y_in, action_emb_ori)
- .unsqueeze(1)
- .expand(batch_size, y_dim, emb_dim)
- ) # [batch, y_dim, dim]
-
- dec_in_list = [attention]
- if not opt['counter_ablation']:
- dec_in_list.append(counter_in)
- if not opt['room_ablation']:
- dec_in_list.append(room_in)
- dec_in_list.append(action_type_out)
- dec_in_list.append(y_type_in)
- dec_in_list.append(y_second_in)
- dec_in_list.append(y_in)
- dec_in_list.append(second_attention)
- dec_in = torch.cat(dec_in_list, 2)
- dec_out, hidden = self.dec_gru(
- dec_in.view(batch_size * y_dim, 1, -1), hidden
- ) # [batch * y_dim, 1, h], [1, batch * y_dim, h]
-
- dec_out = dec_out.squeeze(1) # [batch * y_dim, h]
-
- dec_out = self.log_softmax(self.decoder(dec_out).view(batch_size, y_dim))
-
- if not average_:
- new_loss = -(dec_out * y[:, i]).sum(1)
- if loss is None:
- loss = new_loss
- else:
- loss += new_loss
- else:
- loss += -(dec_out * y[:, i]).sum()
-
- if not average_:
- return loss
-
- loss /= y.sum()
- return loss
-
- def forward_predict(
- self,
- x,
- action_key,
- second_action_key,
- action_type,
- check_mapping,
- checked,
- graphs,
- data_agent,
- constrain_=True,
- ):
- """
- check_mapping: [y_dim, y_dim], float, binary
- """
- graphs = deepcopy(graphs)
- opt = self.opt
- batch_size, seq_out, seq_in = x.size(0), opt['max_seq_out'], x.size(1)
- h_0 = Variable(
- torch.zeros(
- opt['rnn_layers'] * (2 if opt['bidir'] else 1), batch_size, opt['rnn_h']
- ),
- volatile=True,
- )
- if opt['cuda']:
- h_0 = h_0.cuda()
-
- emb_out = self.input_emb(x)
- enc_out, hidden = self.encoder(emb_out, h_0)
-
- action_emb_ori = self.input_emb(action_key.unsqueeze(1)).squeeze(
- 1
- ) # [y_dim, dim]
- y_dim, emb_dim = action_emb_ori.size()
- action_emb = (
- action_emb_ori.unsqueeze(0)
- .expand(batch_size, y_dim, emb_dim)
- .transpose(1, 2)
- ) # [batch, dim, y_dim]
-
- second_action_emb_ori = self.input_emb(second_action_key.unsqueeze(1)).squeeze(
- 1
- ) # [y_dim, dim]
- second_action_emb = (
- second_action_emb_ori.unsqueeze(0)
- .expand(batch_size, y_dim, emb_dim)
- .transpose(1, 2)
- ) # [batch, dim, y_dim]
-
- alpha = F.softmax(
- torch.bmm(emb_out, action_emb).transpose(1, 2).contiguous().view(-1, seq_in)
- + torch.bmm(
- self.trans(enc_out.contiguous().view(batch_size * seq_in, -1))
- .contiguous()
- .view(batch_size, seq_in, -1),
- action_emb,
- )
- .transpose(1, 2)
- .contiguous()
- .view(-1, seq_in)
- )
- attention = torch.bmm(
- alpha.view(batch_size, y_dim, seq_in), enc_out
- ) # [batch, y_dim, h]
-
- second_alpha = F.softmax(
- torch.bmm(emb_out, second_action_emb)
- .transpose(1, 2)
- .contiguous()
- .view(-1, seq_in)
- + torch.bmm(
- self.trans(enc_out.view(batch_size * seq_in, -1)).view(
- batch_size, seq_in, -1
- ),
- second_action_emb,
- )
- .transpose(1, 2)
- .contiguous()
- .view(-1, seq_in)
- )
- second_attention = torch.bmm(
- second_alpha.view(batch_size, y_dim, seq_in), enc_out
- ) # [batch, y_dim, h]
-
- action_type_out_ori = self.action_type_emb(action_type.unsqueeze(1)).squeeze(
- 1
- ) # [y_dim, dim]
- action_type_out = action_type_out_ori.unsqueeze(0).expand(
- batch_size, y_dim, opt['action_type_emb_dim']
- )
- action_type_emb_dim = action_type_out.size(2)
-
- counter_feat = Variable(torch.zeros(batch_size, y_dim).long())
- if opt['cuda']:
- counter_feat = counter_feat.cuda()
-
- text_out = [[] for _ in range(batch_size)]
-
- hidden = (
- self.merge(hidden.view(batch_size, -1))
- .unsqueeze(1)
- .expand(batch_size, y_dim, opt['rnn_h'])
- .contiguous()
- .view(1, batch_size * y_dim, -1)
- )
-
- y_onehot = None
- for i in range(seq_out):
- room_in = torch.zeros(batch_size).long()
- for j in range(batch_size):
- room_in[j] = data_agent.get_room(graphs[j])
- if opt['cuda']:
- room_in = room_in.cuda()
- room_in = Variable(room_in, volatile=True)
- room_in = self.input_emb(room_in.unsqueeze(1)).expand(
- batch_size, y_dim, emb_dim
- )
-
- if i == 0:
- y_in = Variable(torch.zeros(batch_size, y_dim))
- if opt['cuda']:
- y_in = y_in.cuda()
- else:
- y_in = y_onehot
-
- y_second_in = (
- torch.mm(y_in, second_action_emb_ori)
- .unsqueeze(1)
- .expand(batch_size, y_dim, emb_dim)
- ) # [batch, y_dim, dim]
- y_type_in = (
- torch.mm(y_in, action_type_out_ori)
- .unsqueeze(1)
- .expand(batch_size, y_dim, action_type_emb_dim)
- ) # [batch, y_dim, dim]
- y_in = (
- torch.mm(y_in, action_emb_ori)
- .unsqueeze(1)
- .expand(batch_size, y_dim, emb_dim)
- ) # [batch, y_dim, dim]
-
- counter_in = self.counter_emb(counter_feat) # [batch, y_dim, dim]
-
- dec_in_list = [attention]
- if not opt['counter_ablation']:
- dec_in_list.append(counter_in)
- if not opt['room_ablation']:
- dec_in_list.append(room_in)
- dec_in_list.append(action_type_out)
- dec_in_list.append(y_type_in)
- dec_in_list.append(y_second_in)
- dec_in_list.append(y_in)
- dec_in_list.append(second_attention)
- dec_in = torch.cat(dec_in_list, 2)
-
- dec_out, hidden = self.dec_gru(
- dec_in.view(batch_size * y_dim, 1, -1), hidden
- ) # [batch * y_dim, 1, h], [1, batch * y_dim, h]
-
- y_mask = torch.zeros(batch_size, y_dim)
- for j in range(batch_size):
- data_agent.get_mask(graphs[j], y_mask[j])
- if opt['cuda']:
- y_mask = y_mask.cuda()
- y_mask = Variable(y_mask, volatile=True)
-
- dec_out = dec_out.squeeze(1) # [batch * y_dim, h]
-
- dec_out = self.decoder(dec_out).view(batch_size, y_dim)
-
- if constrain_:
- dec_out = dec_out * y_mask + -1e7 * (1 - y_mask)
- y_out = torch.max(dec_out, 1, keepdim=True)[1].data
- y_onehot = torch.zeros(batch_size, y_dim)
- y_onehot.scatter_(1, y_out.cpu(), 1)
- if opt['cuda']:
- y_onehot = y_onehot.cuda()
- y_onehot = Variable(y_onehot, volatile=True) # [batch, y_dim]
-
- y_out = y_out.squeeze()
- for j in range(batch_size):
- if len(text_out[j]) > 0 and text_out[j][-1] == 'STOP':
- continue
- cur_tuple = data_agent.get_action_tuple(y_out[j])
- text_out[j].append(data_agent.reverse_parse_action(cur_tuple))
- if text_out[j][-1] != 'STOP':
- exec_result = graphs[j].parse_exec(text_out[j][-1])
- if constrain_:
- assert exec_result, text_out[j][-1]
- for action_name in data_agent.key_to_check[
- data_agent.check_to_key[cur_tuple]
- ]:
- action_id = data_agent.get_action_id(action_name)
- counter_feat[j, action_id] = counter_feat[j, action_id] + 1
- counter_feat.data.clamp_(max=opt['counter_max'])
-
- return text_out
-
-
-class Seq2SeqModel(nn.Module):
- def __init__(self, opt, data_agent):
- super().__init__()
- self.opt = opt
-
- self.y_dim = data_agent.y_dim
-
- self.input_emb = nn.Embedding(
- data_agent.wordcnt, opt['embedding_dim'], padding_idx=0
- )
- self.encoder = nn.GRU(
- opt['embedding_dim'], opt['rnn_h'], opt['rnn_layers'], batch_first=True
- )
- self.decoder = nn.GRU(
- self.y_dim, opt['rnn_h'], opt['rnn_layers'], batch_first=True
- )
- self.mapping = nn.Sequential(
- nn.Linear(opt['rnn_h'] * 2, self.y_dim), nn.LogSoftmax()
- )
-
- def forward_loss(self, x, y, average_=True):
- """
- x: [batch, seq_in], int
- y: [batch, seq_out, 3 * target], float, binary
- """
-
- opt = self.opt
- batch_size, seq_out = x.size(0), y.size(1)
- h_0 = Variable(torch.zeros(opt['rnn_layers'], batch_size, opt['rnn_h']))
- if opt['cuda']:
- h_0 = h_0.cuda()
-
- enc_out, hidden = self.encoder(
- self.input_emb(x), h_0
- ) # [batch, seq_in, h], [layer, batch, h]
- loss = 0 if average_ else None
- for i in range(seq_out):
- if i == 0:
- y_in = Variable(torch.zeros(batch_size, 1, y.size(2)))
- if opt['cuda']:
- y_in = y_in.cuda()
- else:
- y_in = y[:, i - 1].unsqueeze(1)
- dec_out, hidden = self.decoder(
- y_in, hidden
- ) # [batch, 1, h], [layer, batch, h]
- alpha = F.softmax(
- torch.bmm(enc_out, hidden[-1].unsqueeze(2))
- ) # [batch, seq_in, 1]
- attention = torch.bmm(enc_out.transpose(1, 2), alpha).squeeze(
- 2
- ) # [batch, h]
- dec_out = self.mapping(
- torch.cat([attention, dec_out.squeeze(1)], dim=1)
- ) # [batch, y_dim]
- if average_:
- loss += -(dec_out * y[:, i]).sum()
- else:
- new_loss = -(dec_out * y[:, i]).sum(1)
- if loss is None:
- loss = new_loss
- else:
- loss += new_loss
-
- if not average_:
- return loss
- loss /= y.sum()
-
- return loss
-
- def forward_predict(self, x, graphs, data_agent, constrain_=True):
- graphs = deepcopy(graphs)
- opt = self.opt
- batch_size = x.size(0)
- h_0 = Variable(torch.zeros(opt['rnn_layers'], batch_size, opt['rnn_h']))
- if opt['cuda']:
- h_0 = h_0.cuda()
-
- enc_out, hidden = self.encoder(
- self.input_emb(x), h_0
- ) # [batch, seq_in, h], [layer, batch, h]
- text_out = [[] for _ in range(batch_size)]
- y_onehot = None
- for i in range(opt['max_seq_out']):
- if i == 0:
- y_in = Variable(torch.zeros(batch_size, 1, self.y_dim))
- if opt['cuda']:
- y_in = y_in.cuda()
- else:
- y_in = y_onehot.unsqueeze(1)
-
- dec_out, hidden = self.decoder(y_in, hidden)
- alpha = F.softmax(torch.bmm(enc_out, hidden[-1].unsqueeze(2)))
- attention = torch.bmm(enc_out.transpose(1, 2), alpha).squeeze(2)
- dec_out = self.mapping(
- torch.cat([attention, dec_out.squeeze(1)], dim=1)
- ) # [batch, y_dim]
-
- y_mask = torch.zeros(batch_size, self.y_dim)
- for j in range(batch_size):
- data_agent.get_mask(graphs[j], y_mask[j])
- if opt['cuda']:
- y_mask = y_mask.cuda()
- y_mask = Variable(y_mask, volatile=True)
- if constrain_:
- dec_out = dec_out * y_mask + -1e7 * (1 - y_mask)
-
- y_out = torch.max(dec_out, 1, keepdim=True)[1].data # [batch, 1]
- y_onehot = torch.zeros(batch_size, self.y_dim) # [batch, y_dim]
- y_onehot.scatter_(1, y_out.cpu(), 1)
- y_onehot = Variable(y_onehot)
- if opt['cuda']:
- y_onehot = y_onehot.cuda()
-
- y_out = y_out.squeeze()
- for j in range(batch_size):
- if len(text_out[j]) > 0 and text_out[j][-1] == 'STOP':
- continue
- text_out[j].append(
- data_agent.reverse_parse_action(
- data_agent.get_action_tuple(y_out[j])
- )
- )
- if text_out[j][-1] != 'STOP':
- exec_result = graphs[j].parse_exec(text_out[j][-1])
- if constrain_:
- assert exec_result, text_out[j][-1]
- return text_out
diff --git a/projects/mastering_the_dungeon/mturk/__init__.py b/projects/mastering_the_dungeon/mturk/__init__.py
deleted file mode 100644
--- a/projects/mastering_the_dungeon/mturk/__init__.py
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/usr/bin/env python3
-
-##
-## Copyright (c) Facebook, Inc. and its affiliates.
-## This source code is licensed under the MIT license found in the
-## LICENSE file in the root directory of this source tree.
-##
diff --git a/projects/mastering_the_dungeon/mturk/tasks/MTD/__init__.py b/projects/mastering_the_dungeon/mturk/tasks/MTD/__init__.py
deleted file mode 100644
--- a/projects/mastering_the_dungeon/mturk/tasks/MTD/__init__.py
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/usr/bin/env python3
-
-##
-## Copyright (c) Facebook, Inc. and its affiliates.
-## This source code is licensed under the MIT license found in the
-## LICENSE file in the root directory of this source tree.
-##
diff --git a/projects/mastering_the_dungeon/mturk/tasks/MTD/run.py b/projects/mastering_the_dungeon/mturk/tasks/MTD/run.py
deleted file mode 100644
--- a/projects/mastering_the_dungeon/mturk/tasks/MTD/run.py
+++ /dev/null
@@ -1,902 +0,0 @@
-#!/usr/bin/env python3
-
-##
-## Copyright (c) Facebook, Inc. and its affiliates.
-## This source code is licensed under the MIT license found in the
-## LICENSE file in the root directory of this source tree.
-##
-
-from parlai.core.params import ParlaiParser
-import os
-import time
-from joblib import Parallel, delayed
-from os.path import join
-import pickle
-import random
-from copy import copy, deepcopy
-from collections import defaultdict as dd
-import traceback
-import numpy as np
-from projects.mastering_the_dungeon.projects.graph_world2.train import (
- additional_validate,
- ablation_exp,
-)
-import sys
-
-import projects.mastering_the_dungeon as parlai_internal
-
-sys.modules['parlai_internal'] = parlai_internal
-
-START_TASK_TIMEOUT = 10 * 60
-parent_dir = os.path.dirname(os.path.abspath(__file__))
-checkpoint_dir = join(parent_dir, 'checkpoint')
-cur_dir = join(parent_dir, 'tmp')
-
-if not os.path.exists(checkpoint_dir):
- os.makedirs(checkpoint_dir)
-if not os.path.exists(cur_dir):
- os.makedirs(cur_dir)
-
-
-def print_and_log(s):
- print(s)
- f_log = open(join(cur_dir, 'mtd_log.txt'), 'a+')
- f_log.write(s + '\n')
- f_log.close()
-
-
-def log_only(s):
- f_log = open(join(cur_dir, 'mtd_log.txt'), 'a+')
- f_log.write(s + '\n')
- f_log.close()
-
-
-def get_output_dir(opt, round_index, version_num=None):
- output_dir = join(
- opt['datapath'], 'graph_world2_v{}_r{}'.format(version_num, round_index)
- )
- if not os.path.exists(output_dir):
- os.makedirs(output_dir)
- return output_dir
-
-
-def get_init_data(
- opt, base_only=False, delta_only=False, version_num=None, resplit=True
-):
- RESPLIT_PREFIX = 'resplit_' if resplit else ''
- TRAIN_DIR, VALID_DIR = (
- join(opt['datapath'], 'graph_world2', 'train'),
- join(opt['datapath'], 'graph_world2', 'valid'),
- )
-
- if delta_only and opt['start_round'] > 1:
- output_dir = get_output_dir(
- opt, opt['start_round'] - 1, version_num=version_num
- )
- delta_train_data = pickle.load(
- open(
- join(output_dir, '{}delta_train_data.pkl'.format(RESPLIT_PREFIX)), 'rb'
- )
- )
- delta_valid_data = pickle.load(
- open(
- join(output_dir, '{}delta_valid_data.pkl'.format(RESPLIT_PREFIX)), 'rb'
- )
- )
- return delta_train_data, delta_valid_data
-
- def read_data(data_path):
- data = []
- for filename in os.listdir(data_path):
- if filename.endswith('pkl'):
- data.append(pickle.load(open(join(data_path, filename), 'rb')))
- return data
-
- train_data, valid_data = read_data(TRAIN_DIR), read_data(VALID_DIR)
- if base_only or delta_only:
- return train_data, valid_data
-
- for i in range(1, opt['start_round']):
- try:
- output_dir = get_output_dir(opt, i, version_num=version_num)
- delta_train_data = pickle.load(
- open(
- join(output_dir, '{}delta_train_data.pkl'.format(RESPLIT_PREFIX)),
- 'rb',
- )
- )
- delta_valid_data = pickle.load(
- open(
- join(output_dir, '{}delta_valid_data.pkl'.format(RESPLIT_PREFIX)),
- 'rb',
- )
- )
- train_data.extend(delta_train_data)
- valid_data.extend(delta_valid_data)
- except:
- print_and_log('Error: {}'.format(traceback.format_exc()))
- return train_data, valid_data
-
-
-def get_rest_data(opt, version_num=None):
- output_dir = get_output_dir(opt, opt['start_round'] - 1, version_num=version_num)
- rest_data = pickle.load(open(join(output_dir, 'resplit_rest_data.pkl'), 'rb'))
- return rest_data
-
-
-def overall_split(opt):
- VERSIONS = [13, 14, 15, 'BASELINE_2']
-
- min_for_round = [1000 for _ in range(5)]
-
- for version in VERSIONS:
- for round_index in range(1, 6):
- output_dir = get_output_dir(opt, round_index, version_num=version)
- filtered_data_list = pickle.load(
- open(join(output_dir, 'filtered_data_list.pkl'), 'rb')
- )
- min_for_round[round_index - 1] = min(
- min_for_round[round_index - 1], len(filtered_data_list)
- )
-
- def flatten(a):
- return [e for aa in a for e in aa]
-
- for round_index in range(1, 6):
- min_num = min_for_round[round_index - 1]
- train_num, test_num = int(min_num * 0.8), int(min_num * 0.2)
- for version in VERSIONS:
- output_dir = get_output_dir(opt, round_index, version_num=version)
- filtered_data_list = pickle.load(
- open(join(output_dir, 'filtered_data_list.pkl'), 'rb')
- )
- random.seed(13)
- random.shuffle(filtered_data_list)
- train_list, test_list, rest_list = (
- flatten(filtered_data_list[:train_num]),
- flatten(filtered_data_list[train_num : train_num + test_num]),
- flatten(filtered_data_list[train_num + test_num :]),
- )
- pickle.dump(
- train_list, open(join(output_dir, 'resplit_delta_train_data.pkl'), 'wb')
- )
- pickle.dump(
- test_list, open(join(output_dir, 'resplit_delta_valid_data.pkl'), 'wb')
- )
- pickle.dump(
- rest_list, open(join(output_dir, 'resplit_rest_data.pkl'), 'wb')
- )
-
-
-def train(
- opt,
- round_index,
- machine_index,
- file_index,
- train_data,
- valid_data,
- valid_weights=None,
- save_all=True,
- return_acc_len=False,
- seq2seq=False,
-):
- if valid_weights is not None:
- assert len(valid_data) == len(valid_weights), (
- len(valid_data),
- len(valid_weights),
- )
-
- train_filename = join(cur_dir, '{}_{}_train.pkl'.format(round_index, file_index))
- valid_filename = join(cur_dir, '{}_{}_valid.pkl'.format(round_index, file_index))
- out_filename = join(cur_dir, '{}_{}_out.txt'.format(round_index, file_index))
-
- pickle.dump(train_data, open(train_filename, 'wb'))
- pickle.dump(valid_data, open(valid_filename, 'wb'))
-
- if valid_weights is not None:
- weight_filename = join(
- cur_dir, '{}_{}_weights.pkl'.format(round_index, file_index)
- )
- pickle.dump(valid_weights, open(weight_filename, 'wb'))
- else:
- weight_filename = ''
-
- if save_all:
- model_filename = join(
- cur_dir, '{}_{}_model.pkl'.format(round_index, file_index)
- )
- data_agent_filename = join(
- cur_dir, '{}_{}_data_agent.pkl'.format(round_index, file_index)
- )
- wrong_data_filename = join(
- cur_dir, '{}_{}_wrong_data.pkl'.format(round_index, file_index)
- )
- else:
- model_filename = ''
- data_agent_filename = ''
- wrong_data_filename = ''
-
- new_opt = {
- 'max_iter': opt['max_iter'],
- 'num_runs': opt['num_runs'],
- 'train_data_file': train_filename,
- 'valid_data_file': valid_filename,
- 'perf_out_file': out_filename,
- 'weight_file': weight_filename,
- 'model_file': model_filename,
- 'data_agent_file': data_agent_filename,
- 'wrong_data_file': wrong_data_filename,
- 'task': 'projects.mastering_the_dungeon.tasks.graph_world2.agents',
- 'batchsize': 1,
- 'rnn_h': opt['rnn_h'],
- 'embedding_dim': opt['embedding_dim'],
- 'cuda': True,
- 'seq2seq': seq2seq,
- }
- os.chdir(parent_dir)
-
- job_num = machine_index % opt['num_machines']
- job_in_file = '../../../projects/graph_world2/job-in-{}.pkl'.format(job_num)
- job_out_file = '../../../projects/graph_world2/job-out-{}.txt'.format(job_num)
-
- if os.path.isfile(job_out_file):
- os.remove(job_out_file)
- if os.path.isfile(job_in_file):
- os.remove(job_in_file)
-
- with open(job_in_file, 'wb') as f_job_in:
- pickle.dump(new_opt, f_job_in)
-
- start_time = time.time()
- while True:
- time.sleep(5)
- if not os.path.isfile(job_in_file):
- break
- if time.time() - start_time > 60 * 5:
- try:
- os.remove(job_in_file)
- except:
- pass
- print_and_log('job {} timeout'.format(job_num))
- if return_acc_len:
- return 0.0, [0.0 for _ in range(4)]
- return 0.0
-
- start_time = time.time()
- while True:
- time.sleep(5)
- if time.time() - start_time > opt['job_timeout']:
- print_and_log(
- 'job {} tiemout after {} seconds, bad gpu'.format(
- job_num, opt['job_timeout']
- )
- )
- if return_acc_len:
- return 0.0, [0.0 for _ in range(4)]
- return 0.0
-
- if os.path.isfile(job_out_file):
- with open(job_out_file) as f_job_out:
- result = f_job_out.read().strip()
- os.remove(job_out_file)
- print_and_log('job {} exited: {}'.format(job_num, result))
- if result != '0':
- print_and_log('job {} abandoned'.format(job_num))
- if return_acc_len:
- return 0.0, [0.0 for _ in range(4)]
- return 0.0
- try:
- f_perf = open(out_filename)
- perf = float(f_perf.readline().strip())
- f_perf.close()
- print_and_log('job {} perf: {}'.format(job_num, perf))
- if return_acc_len:
- f_acc_len = open(out_filename + '.len')
- acc_len = list(map(float, f_acc_len.readline().strip().split()))
- f_acc_len.close()
- print_and_log('job {} acc_len: {}'.format(job_num, acc_len))
- return perf, acc_len
- return perf
- except:
- print_and_log('Error: {}'.format(traceback.format_exc()))
- if return_acc_len:
- return 0.0, [0.0 for _ in range(4)]
- return 0.0
-
-
-def batch_train(
- opt,
- round_index,
- round_train_data,
- round_valid_data,
- round_valid_weights=None,
- save_all=True,
- file_indices=None,
- return_acc_len=False,
- seq2seq=False,
-):
- i = 0
- perfs = []
- M = len(round_train_data)
- while i < M:
- j = min(i + opt['num_machines'], M)
- cur_perfs = Parallel(n_jobs=j - i, backend='threading')(
- delayed(train)(
- opt,
- round_index,
- train_index,
- file_indices[train_index] if file_indices else train_index,
- round_train_data[train_index],
- round_valid_data[train_index],
- valid_weights=round_valid_weights[train_index]
- if round_valid_weights
- else None,
- save_all=save_all,
- return_acc_len=return_acc_len,
- seq2seq=seq2seq,
- )
- for train_index in range(i, j)
- )
- perfs.extend(cur_perfs)
- i = j
-
- error_indices, valid_indices = [], []
- for i, perf in enumerate(perfs):
- if perf == 0.0 or type(perf) == tuple and perf[0] == 0.0:
- error_indices.append(i)
- elif i < opt['num_machines']:
- valid_indices.append(i)
-
- M = len(error_indices)
- TMP_NUM_MACHINES = len(valid_indices)
- if M > 0 and TMP_NUM_MACHINES > 0:
- i = 0
- error_perfs = []
- while i < M:
- j = min(i + TMP_NUM_MACHINES, M)
- cur_perfs = Parallel(n_jobs=j - i, backend='threading')(
- delayed(train)(
- opt,
- round_index,
- valid_indices[train_index],
- file_indices[error_indices[train_index]]
- if file_indices
- else error_indices[train_index],
- round_train_data[error_indices[train_index]],
- round_valid_data[error_indices[train_index]],
- valid_weights=round_valid_weights[error_indices[train_index]]
- if round_valid_weights
- else None,
- save_all=save_all,
- return_acc_len=return_acc_len,
- seq2seq=seq2seq,
- )
- for train_index in range(i, j)
- )
- error_perfs.extend(cur_perfs)
- i = j
- for i in range(M):
- perfs[error_indices[i]] = error_perfs[i]
-
- return perfs
-
-
-def batch_valid(opt, round_index, constrain_=True):
- perfs = []
- for i in range(100000):
- model_filename = join(cur_dir, '{}_{}_model.pkl'.format(round_index, i))
- if not os.path.exists(model_filename):
- break
- data_agent_filename = join(
- cur_dir, '{}_{}_data_agent.pkl'.format(round_index, i)
- )
- valid_filename = join(cur_dir, '{}_{}_valid.pkl'.format(round_index, i))
- model = pickle.load(open(model_filename, 'rb'))
- data_agent = pickle.load(open(data_agent_filename, 'rb'))
- perf = additional_validate(
- opt, model, data_agent, valid_filename, constrain_=constrain_
- )
- print('batch_valid {} {}'.format(round_index, i))
- perfs.append(perf)
- return perfs
-
-
-def batch_valid_with_data(
- opt, round_index, file_index, valid_filename, constrain_=True
-):
- model_filename = join(cur_dir, '{}_{}_model.pkl'.format(round_index, file_index))
- data_agent_filename = join(
- cur_dir, '{}_{}_data_agent.pkl'.format(round_index, file_index)
- )
- model = pickle.load(open(model_filename, 'rb'))
- data_agent = pickle.load(open(data_agent_filename, 'rb'))
- perf = additional_validate(
- opt, model, data_agent, valid_filename, constrain_=constrain_, no_hits=True
- )
- return perf
-
-
-def overall_ablation(opt):
- VERSIONS = [13, 14, 15, 'BASELINE_2']
- NAMES = ['MTD LIMIT', 'MTD', 'MTD NO MODEL FEEDBACK', 'BASELINE']
-
- all_train_data, all_valid_data, all_rest_data = [], [], []
- for v_id, version in enumerate(VERSIONS):
- version_train_data, version_valid_data, version_rest_data = [], [], []
- for i in range(2, 7):
- final_opt = deepcopy(opt)
- final_opt['start_round'] = i
- final_opt['datapath'] = final_opt['datapath'].replace('/data', '/new_data')
- cur_train_data, cur_valid_data = get_init_data(
- final_opt, delta_only=True, version_num=version, resplit=True
- )
- cur_rest_data = get_rest_data(final_opt, version_num=version)
- version_train_data.extend(cur_train_data)
- version_valid_data.extend(cur_valid_data)
- version_rest_data.extend(cur_rest_data)
- print(
- '{}: train {} test {} rest {}'.format(
- NAMES[v_id],
- len(version_train_data),
- len(version_valid_data),
- len(version_rest_data),
- )
- )
- all_train_data.append(version_train_data)
- all_valid_data.append(version_valid_data)
- all_rest_data.append(version_rest_data)
-
- init_train_data, init_valid_data = get_init_data(opt, base_only=True)
- init_valid_data.extend(init_train_data)
- print('init: test {}'.format(len(init_valid_data)))
-
- final_valid_data = copy(init_valid_data)
- for i in range(len(all_rest_data)):
- final_valid_data.extend(all_valid_data[i])
- final_valid_data.extend(all_rest_data[i])
- print('all: test {}'.format(len(final_valid_data)))
-
- final_train_data = all_train_data[1]
-
- train_filename = join(cur_dir, 'ABLATION_train.pkl')
- valid_filename = join(cur_dir, 'ABLATION_valid.pkl')
-
- pickle.dump(final_train_data, open(train_filename, 'wb'))
- pickle.dump(final_valid_data, open(valid_filename, 'wb'))
-
- model_filename = join(cur_dir, 'ABLATION_model.pkl')
- data_agent_filename = join(cur_dir, 'ABLATION_data_agent.pkl')
- wrong_data_filename = join(cur_dir, 'ABLATION_wrong_data.pkl')
-
- names = ['ac', 'ac - counter', 'ac - counter - room', 'seq2seq']
- seq2seq_options = [False, False, False, True]
- counter_ablations = [False, True, True, False]
- room_ablations = [False, False, True, False]
-
- for i in range(1, 3):
- new_opt = deepcopy(opt)
- new_opt.update(
- {
- 'max_iter': opt['max_iter'],
- 'num_runs': 1,
- 'train_data_file': train_filename,
- 'valid_data_file': valid_filename,
- 'model_file': model_filename,
- 'data_agent_file': data_agent_filename,
- 'wrong_data_file': wrong_data_filename,
- 'task': 'parlai_internal.tasks.graph_world2.agents',
- 'batchsize': 1,
- 'rnn_h': opt['rnn_h'],
- 'embedding_dim': opt['embedding_dim'],
- 'cuda': True,
- 'seq2seq': seq2seq_options[i],
- 'counter_ablation': counter_ablations[i],
- 'room_ablation': room_ablations[i],
- 'weight_file': '',
- 'datatype': 'train',
- }
- )
- perf = ablation_exp(new_opt)
- print(names[i], perf)
-
-
-def overall_run(opt, seq2seq=False):
- VERSIONS = [13, 14, 15, 'BASELINE_2']
- NAMES = ['MTD LIMIT', 'MTD', 'MTD NO MODEL FEEDBACK', 'BASELINE']
-
- all_train_data, all_valid_data, all_rest_data = [], [], []
- for v_id, version in enumerate(VERSIONS):
- version_train_data, version_valid_data, version_rest_data = [], [], []
- # for i in range(2, 7):
- for i in range(2, 3):
- final_opt = deepcopy(opt)
- final_opt['start_round'] = i
- cur_train_data, cur_valid_data = get_init_data(
- final_opt, delta_only=True, version_num=version, resplit=True
- )
- cur_rest_data = get_rest_data(final_opt, version_num=version)
- version_train_data.extend(cur_train_data)
- version_valid_data.extend(cur_valid_data)
- version_rest_data.extend(cur_rest_data)
- print(
- '{}: train {} test {} rest {}'.format(
- NAMES[v_id],
- len(version_train_data),
- len(version_valid_data),
- len(version_rest_data),
- )
- )
- all_train_data.append(version_train_data)
- all_valid_data.append(version_valid_data)
- all_rest_data.append(version_rest_data)
-
- init_train_data, init_valid_data = get_init_data(opt, base_only=True)
- init_valid_data.extend(init_train_data)
- print('init: test {}'.format(len(init_valid_data)))
-
- final_valid_data = copy(init_valid_data)
- for i in range(len(all_rest_data)):
- final_valid_data.extend(all_valid_data[i])
- final_valid_data.extend(all_rest_data[i])
- print('all: test {}'.format(len(final_valid_data)))
-
- round_train_data, round_valid_data = [], []
- M = opt['num_runs']
- for cur_train_data in all_train_data:
- for cur_valid_data in [init_valid_data, final_valid_data]:
- for _ in range(M):
- round_train_data.append(cur_train_data)
- round_valid_data.append(cur_valid_data)
-
- final_opt = deepcopy(opt)
- final_opt['num_runs'] = 1
- perfs = batch_train(
- final_opt,
- 'OVERALL_TEST' if not seq2seq else 'SEQ2SEQ_TEST',
- round_train_data,
- round_valid_data,
- save_all=True,
- return_acc_len=True,
- seq2seq=seq2seq,
- )
-
- def get_acc_and_acc_len(perfs):
- acc, acc_len, cnts = 0.0, [0.0 for _ in range(4)], 0
- accs = []
- for i, (cur_acc, cur_acc_len) in enumerate(perfs):
- if cur_acc == 0:
- continue
- acc += cur_acc
- accs.append(cur_acc)
- for j in range(4):
- acc_len[j] += cur_acc_len[j]
- cnts += 1
- if cnts == 0:
- return acc, acc_len
- acc /= cnts
- for j in range(4):
- acc_len[j] /= cnts
- stddev = np.std(np.array(accs), dtype=np.float64)
- return acc, acc_len, stddev
-
- start, end = 0, M
- for train_name in NAMES:
- for valid_name in ['INIT', 'ALL']:
- sub_perfs = perfs[start:end]
- acc, acc_len, stddev = get_acc_and_acc_len(sub_perfs)
- print_and_log(
- '{} on {}: acc {} stddev {} acc_len {}'.format(
- train_name, valid_name, acc, stddev, acc_len
- )
- )
- log_only('{} on {}: {}'.format(train_name, valid_name, sub_perfs))
- start = end
- end = start + M
-
-
-def overall_run_data_breakdown(opt, seq2seq=False):
- VERSIONS = [13, 14, 15, 'BASELINE_2']
- NAMES = ['MTD LIMIT', 'MTD', 'MTD NO MODEL FEEDBACK', 'BASELINE']
-
- all_train_data, all_valid_data, all_rest_data = [], [], []
- for v_id, version in enumerate(VERSIONS):
- version_train_data, version_valid_data, version_rest_data = [], [], []
- for i in range(2, 7):
- final_opt = deepcopy(opt)
- final_opt['start_round'] = i
- cur_train_data, cur_valid_data = get_init_data(
- final_opt, delta_only=True, version_num=version, resplit=True
- )
- cur_rest_data = get_rest_data(final_opt, version_num=version)
- version_train_data.extend(cur_train_data)
- version_valid_data.extend(cur_valid_data)
- version_rest_data.extend(cur_rest_data)
- print(
- '{}: train {} test {} rest {}'.format(
- NAMES[v_id],
- len(version_train_data),
- len(version_valid_data),
- len(version_rest_data),
- )
- )
- all_train_data.append(version_train_data)
- all_valid_data.append(version_valid_data)
- all_rest_data.append(version_rest_data)
-
- init_train_data, init_valid_data = get_init_data(opt, base_only=True)
- init_valid_data.extend(init_train_data)
- print('init: test {}'.format(len(init_valid_data)))
-
- final_valid_data = copy(init_valid_data)
- for i in range(len(all_rest_data)):
- final_valid_data.extend(all_valid_data[i])
- final_valid_data.extend(all_rest_data[i])
- print('all: test {}'.format(len(final_valid_data)))
-
- round_train_data, round_valid_data = [], []
- M = opt['num_runs']
- for cur_train_data in all_train_data:
- for cur_valid_data in [init_valid_data, final_valid_data]:
- for _ in range(M):
- round_train_data.append(cur_train_data)
- round_valid_data.append(cur_valid_data)
-
- def get_scores_and_std(perfs):
- def _get_mean(l):
- return sum(l) * 1.0 / len(l)
-
- def _get_std(l):
- return np.std(np.array(l), dtype=np.float64)
-
- ret = dd(list)
- for perf in perfs:
- for k, v in perf.items():
- ret[k].append(v)
- final_ret = {}
- for k, v in ret.items():
- final_ret[k + '_mean'] = _get_mean(v)
- final_ret[k + '_std'] = _get_std(v)
- return final_ret
-
- file_index = 0
- my_valid_data = [
- all_valid_data[0] + all_rest_data[0],
- all_valid_data[-1] + all_rest_data[-1],
- ]
- my_valid_names = ['MTD', 'BASELINE']
- for train_name in NAMES:
- for valid_name in ['INIT', 'ALL']:
- if train_name not in ['MTD', 'BASELINE'] or valid_name == 'INIT':
- file_index += M
- continue
- for valid_id in range(2):
- valid_filename = join(cur_dir, 'DATA_BREAK_data.pkl')
- pickle.dump(my_valid_data[valid_id], open(valid_filename, 'wb'))
- perfs = []
- for m in range(M):
- perf = batch_valid_with_data(
- opt,
- 'OVERALL_TEST' if not seq2seq else 'SEQ2SEQ_TEST',
- file_index + m,
- valid_filename,
- constrain_=True,
- )
- perfs.append(perf)
- scores = get_scores_and_std(perfs)
- print_and_log(
- '{} on {}: {}'.format(train_name, my_valid_names[valid_id], scores)
- )
- file_index += M
-
-
-def overall_add_val(opt, seq2seq=False, constrain_=True):
- NAMES = ['MTD LIMIT', 'MTD', 'MTD NO MODEL FEEDBACK', 'BASELINE']
-
- def get_scores_and_std(perfs):
- def _get_mean(l):
- return sum(l) * 1.0 / len(l)
-
- def _get_std(l):
- return np.std(np.array(l), dtype=np.float64)
-
- ret = dd(list)
- for perf in perfs:
- for k, v in perf.items():
- ret[k].append(v)
- final_ret = {}
- for k, v in ret.items():
- final_ret[k + '_mean'] = _get_mean(v)
- final_ret[k + '_std'] = _get_std(v)
- return final_ret
-
- perfs = batch_valid(
- opt, 'OVERALL_TEST' if not seq2seq else 'SEQ2SEQ_TEST', constrain_=constrain_
- )
- M = opt['num_runs']
- start, end = 0, M
- for train_name in NAMES:
- for valid_name in ['INIT', 'ALL']:
- sub_perfs = perfs[start:end]
- scores = get_scores_and_std(sub_perfs)
- print_and_log('{} on {}: {}'.format(train_name, valid_name, scores))
- start = end
- end = start + M
-
-
-def overall_run_rounds_breakdown(opt, seq2seq=False):
- VERSIONS = [13, 14, 15, 'BASELINE_2']
- NAMES = ['MTD LIMIT', 'MTD', 'MTD NO MODEL FEEDBACK', 'BASELINE']
-
- all_train_data, all_valid_data, all_rest_data = [], [], []
- for v_id, version in enumerate(VERSIONS):
- version_train_data, version_valid_data, version_rest_data = [], [], []
- for i in range(2, 7):
- final_opt = deepcopy(opt)
- final_opt['start_round'] = i
- cur_train_data, cur_valid_data = get_init_data(
- final_opt, delta_only=True, version_num=version, resplit=True
- )
- cur_rest_data = get_rest_data(final_opt, version_num=version)
- version_train_data.append(cur_train_data)
- version_valid_data.extend(cur_valid_data)
- version_rest_data.extend(cur_rest_data)
- print(
- '{}: train {} test {} rest {}'.format(
- NAMES[v_id],
- len(version_train_data),
- len(version_valid_data),
- len(version_rest_data),
- )
- )
- all_train_data.append(version_train_data)
- all_valid_data.append(version_valid_data)
- all_rest_data.append(version_rest_data)
-
- init_train_data, init_valid_data = get_init_data(opt, base_only=True)
- init_valid_data.extend(init_train_data)
- print('init: test {}'.format(len(init_valid_data)))
-
- final_valid_data = copy(init_valid_data)
- for i in range(len(all_rest_data)):
- final_valid_data.extend(all_valid_data[i])
- final_valid_data.extend(all_rest_data[i])
- print('all: test {}'.format(len(final_valid_data)))
-
- round_train_data, round_valid_data = [], []
- M = opt['num_runs']
- for cur_train_data in all_train_data:
- for cur_valid_data in [init_valid_data, final_valid_data]:
- for round_index in range(5):
- tmp_train_data = []
- for tmp_round_index in range(round_index + 1):
- tmp_train_data.extend(cur_train_data[tmp_round_index])
- for _ in range(M):
- round_train_data.append(tmp_train_data)
- round_valid_data.append(cur_valid_data)
-
- final_opt = deepcopy(opt)
- final_opt['num_runs'] = 1
- perfs = batch_train(
- final_opt,
- 'OVERALL_ROUND_BREAK' if not seq2seq else 'SEQ2SEQ_ROUND_BREAK',
- round_train_data,
- round_valid_data,
- save_all=True,
- return_acc_len=True,
- seq2seq=seq2seq,
- )
-
- def get_acc_and_acc_len(perfs):
- acc, acc_len, cnts = 0.0, [0.0 for _ in range(4)], 0
- accs = []
- for i, (cur_acc, cur_acc_len) in enumerate(perfs):
- if cur_acc == 0:
- continue
- acc += cur_acc
- accs.append(cur_acc)
- for j in range(4):
- acc_len[j] += cur_acc_len[j]
- cnts += 1
- if cnts == 0:
- return acc, acc_len
- acc /= cnts
- for j in range(4):
- acc_len[j] /= cnts
- stddev = np.std(np.array(accs), dtype=np.float64)
- return acc, acc_len, stddev
-
- start, end = 0, M
- for train_name in NAMES:
- for valid_name in ['INIT', 'ALL']:
- for round_index in range(5):
- sub_perfs = perfs[start:end]
- acc, acc_len, stddev = get_acc_and_acc_len(sub_perfs)
- print_and_log(
- '{} on {} round{}: acc {} stddev {} acc_len {}'.format(
- train_name, valid_name, round_index, acc, stddev, acc_len
- )
- )
- log_only(
- '{} on {} round {}: {}'.format(
- train_name, valid_name, round_index, sub_perfs
- )
- )
- start = end
- end = start + M
-
-
-if __name__ == '__main__':
- argparser = ParlaiParser(False, False)
-
- # ============ below copied from projects/graph_world2/train.py ============
- argparser.add_arg('--vocab_size', type=int, default=1000)
- argparser.add_arg('--terminate', type=bool, default=False)
- argparser.add_arg('--lr', type=float, default=1e-3)
- argparser.add_arg('--max_seq_in', type=int, default=30)
- argparser.add_arg('--embedding_dim', type=int, default=50)
- argparser.add_arg('--rnn_h', type=int, default=350)
- argparser.add_arg('--rnn_layers', type=int, default=1)
- argparser.add_arg('--cuda', type=bool, default=True)
- argparser.add_arg('--eval_period', type=int, default=200)
- argparser.add_arg('--max_seq_out', type=int, default=5)
- argparser.add_arg('--label_ratio', type=float, default=1.0)
- argparser.add_arg('--max_iter', type=int, default=100000)
- argparser.add_arg('--exit_iter', type=int, default=3000)
- argparser.add_arg('--num_runs', type=int, default=10)
-
- argparser.add_arg('--train_data_file', type=str, default='')
- argparser.add_arg('--valid_data_file', type=str, default='')
- argparser.add_arg('--perf_out_file', type=str, default='')
- argparser.add_arg('--weight_file', type=str, default='')
- argparser.add_arg('--model_file', type=str, default='')
- argparser.add_arg('--data_agent_file', type=str, default='')
- argparser.add_arg('--wrong_data_file', type=str, default='')
-
- argparser.add_arg('--once', type=bool, default=False)
- argparser.add_arg('--job_num', type=int)
-
- argparser.add_arg('--counter_ablation', type=bool, default=False)
- argparser.add_arg('--room_ablation', type=bool, default=False)
- # ============ above copied from projects/graph_world2/train.py ============
-
- argparser.add_argument('--num_machines', type=int, default=1)
- argparser.add_argument('--job_timeout', type=float, default=3600 * 4)
-
- argparser.add_argument('--split', action='store_true', default=False)
- argparser.add_argument('--train', action='store_true', default=False)
- argparser.add_argument('--eval', action='store_true', default=False)
- argparser.add_argument('--seq2seq', action='store_true', default=False)
- argparser.add_argument('--constrain', action='store_true', default=False)
- argparser.add_argument('--rounds_breakdown', action='store_true', default=False)
- argparser.add_argument('--data_breakdown', action='store_true', default=False)
- argparser.add_argument('--ablation', action='store_true', default=False)
-
- argparser.add_parlai_data_path()
- argparser.add_mturk_args()
- opt = argparser.parse_args()
-
- # ============ below copied from projects/graph_world2/train.py ============
- opt['bidir'] = True
- opt['action_type_emb_dim'] = 5
- opt['counter_max'] = 3
- opt['counter_emb_dim'] = 5
- # ============ above copied from projects/graph_world2/train.py ============
-
- if opt['split']:
- overall_split(opt)
- quit()
- if opt['train']:
- overall_run(opt, seq2seq=opt['seq2seq'])
- quit()
- if opt['eval']:
- overall_add_val(opt, seq2seq=opt['seq2seq'], constrain_=opt['constrain'])
- quit()
- if opt['rounds_breakdown']:
- overall_run_rounds_breakdown(opt, seq2seq=opt['seq2seq'])
- quit()
- if opt['data_breakdown']:
- overall_run_data_breakdown(opt, seq2seq=opt['seq2seq'])
- quit()
- if opt['ablation']:
- overall_ablation(opt)
- quit()
diff --git a/projects/mastering_the_dungeon/mturk/tasks/__init__.py b/projects/mastering_the_dungeon/mturk/tasks/__init__.py
deleted file mode 100644
--- a/projects/mastering_the_dungeon/mturk/tasks/__init__.py
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/usr/bin/env python3
-
-##
-## Copyright (c) Facebook, Inc. and its affiliates.
-## This source code is licensed under the MIT license found in the
-## LICENSE file in the root directory of this source tree.
-##
diff --git a/projects/mastering_the_dungeon/projects/graph_world2/__init__.py b/projects/mastering_the_dungeon/projects/graph_world2/__init__.py
deleted file mode 100644
--- a/projects/mastering_the_dungeon/projects/graph_world2/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/usr/bin/env python3
-
-# Copyright (c) Facebook, Inc. and its affiliates.
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
diff --git a/projects/mastering_the_dungeon/projects/graph_world2/gen_sbatch_script.py b/projects/mastering_the_dungeon/projects/graph_world2/gen_sbatch_script.py
deleted file mode 100644
--- a/projects/mastering_the_dungeon/projects/graph_world2/gen_sbatch_script.py
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/usr/bin/env python3
-
-##
-## Copyright (c) Facebook, Inc. and its affiliates.
-## This source code is licensed under the MIT license found in the
-## LICENSE file in the root directory of this source tree.
-##
-
-import argparse
-import subprocess
-
-
-def gen(out_name, opt):
- fout_batch = open('{}.sh'.format(out_name), 'w')
- fout_batch.write('chmod +x *.sh\n')
- fout_batch.write('rm job-out-*\n')
- fout_batch.write('rm job-in-*\n')
- for i in range(opt['num_gpus']):
- sh_name = '{}_{}.sh'.format(out_name, i)
- fout = open(sh_name, 'w')
- if opt['slurm']:
- fout.write(
- "srun -o checkpoint/slurm-gpu-job-%j.out --error=checkpoint/slurm-gpu-job-%j.err --gres=gpu:1 python3 train.py --job_num {}\n".format(
- i
- )
- )
- else:
- fout.write(
- "CUDA_VISIBLE_DEVICES={} python3 train.py --job_num {}\n".format(i, i)
- )
- fout.close()
- fout_batch.write("./{} &\n".format(sh_name))
- fout_batch.close()
- subprocess.call("chmod +x {}.sh".format(out_name).split())
-
-
-if __name__ == '__main__':
- parser = argparse.ArgumentParser()
- parser.add_argument('--num_gpus', type=int, default=1, help='number of GPUs to use')
- parser.add_argument(
- '--slurm', action='store_true', default=False, help='whether use slurm or not'
- )
- opt = vars(parser.parse_args())
- gen('batch_holder', opt)
diff --git a/projects/mastering_the_dungeon/projects/graph_world2/train.py b/projects/mastering_the_dungeon/projects/graph_world2/train.py
deleted file mode 100644
--- a/projects/mastering_the_dungeon/projects/graph_world2/train.py
+++ /dev/null
@@ -1,449 +0,0 @@
-#!/usr/bin/env python3
-
-##
-## Copyright (c) Facebook, Inc. and its affiliates.
-## This source code is licensed under the MIT license found in the
-## LICENSE file in the root directory of this source tree.
-##
-
-from parlai.core.params import ParlaiParser
-from parlai.core.worlds import create_task
-from projects.mastering_the_dungeon.agents.graph_world2.agents import (
- ObjectChecklistDataAgent,
- ObjectChecklistModelAgent,
- Seq2SeqDataAgent,
- Seq2SeqModelAgent,
-)
-from projects.mastering_the_dungeon.agents.graph_world2.models import Seq2SeqModel
-from copy import deepcopy
-import os
-import sys
-import torch
-from projects.mastering_the_dungeon.tasks.graph_world2.graph import Graph
-import statistics
-import pickle
-import time
-import traceback
-import scipy.stats as ss
-from torch.autograd import Variable
-import random
-
-import projects.mastering_the_dungeon as parlai_internal
-
-sys.modules['parlai_internal'] = parlai_internal
-
-
-def prepro(opt):
- agent = (
- ObjectChecklistDataAgent(opt) if not opt['seq2seq'] else Seq2SeqDataAgent(opt)
- )
-
- opt = deepcopy(opt)
- opt['datatype'] = 'train'
- opt['terminate'] = True
- opt['batchsize'] = 1
- world = create_task(opt, agent)
-
- for _ in world:
- world.parley()
-
- agent.build()
- return agent
-
-
-def validate(opt, agent):
- old_datatype = agent.opt['datatype']
- agent.opt['datatype'] = 'valid'
-
- opt = deepcopy(opt)
- opt['datatype'] = 'valid'
- opt['terminate'] = True
- opt['batchsize'] = 1
-
- old_stdout = sys.stdout
- sys.stdout = open(os.devnull, 'w')
- valid_world = create_task(opt, agent)
- sys.stdout = old_stdout
-
- for _ in valid_world:
- valid_world.parley()
-
- stats = valid_world.report()
- agent.opt['datatype'] = old_datatype
- return stats
-
-
-def get_metrics(actions, gt_actions):
- tp, fp, fn = 0, 0, 0
- action_set, gt_action_set = set(actions), set(gt_actions)
- for action in action_set:
- if action in gt_action_set:
- tp += 1
- else:
- fp += 1
- for action in gt_action_set:
- if action not in action_set:
- fn += 1
- prec = tp / (tp + fp) if tp + fp > 0 else 0.0
- recall = tp / (tp + fn) if tp + fn > 0 else 0.0
- f1 = 2.0 * prec * recall / (prec + recall) if prec + recall > 0 else 0.0
- return f1
-
-
-def get_accuracy(actions, gt_actions, graph):
- graph_a, graph_b = graph.copy(), graph.copy()
- graph_a.parse_exec(' '.join(actions))
- graph_b.parse_exec(' '.join(gt_actions))
- return float(graph_a == graph_b)
-
-
-def additional_validate(
- opt, model, data_agent, valid_data_file, constrain_=True, no_hits=False
-):
- seq2seq = isinstance(model, Seq2SeqModel)
-
- def _get_actions(inst, symb_points):
- ret = []
- for i in range(len(symb_points) - 1):
- ret.append(' '.join(inst[symb_points[i] : symb_points[i + 1]]))
- return ret
-
- def _get_variable(np_a, volatile=False):
- if opt['cuda']:
- return Variable(torch.from_numpy(np_a), volatile=volatile).cuda()
- return Variable(torch.from_numpy(np_a), volatile=volatile)
-
- if not seq2seq:
- check_mapping = _get_variable(data_agent.get_check_mapping(), True)
- valid_data = pickle.load(open(valid_data_file, 'rb'))
-
- all_f1 = 0
- all_gt_actions = []
- all_accuracy = 0
-
- for example in valid_data:
- exp_dict = {'text': example[2], 'actions': example[3], 'graph': example[1]}
- if not seq2seq:
- (
- x,
- action_key,
- second_action_key,
- action_type,
- current_room,
- checked,
- y,
- y_mask,
- counter_feat,
- ) = data_agent.get_data([exp_dict], 'valid')
- x, action_key, second_action_key, action_type, checked = (
- _get_variable(x, True),
- _get_variable(action_key, True),
- _get_variable(second_action_key, True),
- _get_variable(action_type, True),
- _get_variable(checked, True),
- )
- text_out = model.forward_predict(
- x,
- action_key,
- second_action_key,
- action_type,
- check_mapping,
- checked,
- [example[1]],
- data_agent,
- constrain_=constrain_,
- )[0]
- else:
- x, y = data_agent.get_data([exp_dict], 'valid')
- x, y = _get_variable(x, True), _get_variable(y, True)
- text_out = model.forward_predict(
- x, [example[1]], data_agent, constrain_=constrain_
- )[0]
- actions = text_out[:-1]
- gt_actions = _get_actions(*Graph.parse_static(example[3]))
- cur_f1 = get_metrics(actions, gt_actions)
- all_f1 += cur_f1
- all_accuracy += get_accuracy(actions, gt_actions, example[1])
-
- all_gt_actions.append(gt_actions)
-
- if constrain_ and not no_hits:
- random.seed(13)
- hits1, hits5, hits10 = 0, 0, 0
- for i, example in enumerate(valid_data):
- all_dicts = []
- for j in range(100):
- idx = i if j == 99 else random.randint(0, len(valid_data) - 1)
- exp_dict = {
- 'text': example[2],
- 'actions': ' '.join(all_gt_actions[idx]),
- 'graph': example[1],
- }
- all_dicts.append(exp_dict)
- if not seq2seq:
- (
- x,
- action_key,
- second_action_key,
- action_type,
- current_room,
- checked,
- y,
- y_mask,
- counter_feat,
- ) = data_agent.get_data(all_dicts, 'train', assert_=False)
- (
- x,
- action_key,
- second_action_key,
- action_type,
- current_room,
- checked,
- y,
- y_mask,
- counter_feat,
- ) = (
- _get_variable(x, True),
- _get_variable(action_key, True),
- _get_variable(second_action_key, True),
- _get_variable(action_type, True),
- _get_variable(current_room, True),
- _get_variable(checked, True),
- _get_variable(y, True),
- _get_variable(y_mask, True),
- _get_variable(counter_feat, True),
- )
- all_losses = (
- model.forward_loss(
- x,
- action_key,
- second_action_key,
- action_type,
- current_room,
- checked,
- y,
- y_mask,
- counter_feat,
- average_=False,
- )
- .data.cpu()
- .numpy()
- )
- else:
- x, y = data_agent.get_data(all_dicts, 'train', assert_=False)
- x, y = _get_variable(x, True), _get_variable(y, True)
- all_losses = model.forward_loss(x, y, average_=False).data.cpu().numpy()
- ranks = ss.rankdata(all_losses, method='ordinal')
- if ranks[-1] == 1:
- hits1 += 1
- if ranks[-1] <= 5:
- hits5 += 1
- if ranks[-1] <= 10:
- hits10 += 1
-
- N = len(valid_data)
- if constrain_ and not no_hits:
- return {
- 'accuracy': all_accuracy / N,
- 'f1': all_f1 / N,
- 'hits1': hits1 / N,
- 'hits5': hits5 / N,
- 'hits10': hits10 / N,
- }
- return {'f1': all_f1 / N, 'accuracy': all_accuracy / N}
-
-
-def log_print(s, out_file):
- print(s)
- if out_file is None:
- return
- f_log = open(out_file, 'a+')
- f_log.write(s + '\n')
- f_log.close()
-
-
-def main(opt, return_full=False, out_file=None):
- data_agent = prepro(opt)
- model_agent = (
- ObjectChecklistModelAgent(opt, data_agent=data_agent)
- if not opt['seq2seq']
- else Seq2SeqModelAgent(opt, data_agent=data_agent)
- )
-
- train_world = create_task(opt, model_agent)
-
- max_dict = model_agent.model.state_dict()
-
- max_acc, max_f1, max_data, last_max, max_acc_len = -1, 0, None, 0, None
- for iter in range(opt['max_iter']):
- if iter - last_max > opt['exit_iter']:
- break
-
- if 'inc_ratio' in opt and opt['inc_ratio'] > 0 and iter == opt['inc_pre_iters']:
- print('resetting best model for finetuning')
- model_agent.model.load_state_dict(max_dict)
- max_acc = 0
-
- train_world.parley()
- train_report = train_world.report()
-
- if iter % opt['eval_period'] == 0:
- stats = validate(opt, model_agent)
- cur_acc, cur_f1 = stats['acc'] / stats['cnt'], stats['f1'] / stats['cnt']
- if cur_acc > max_acc:
- max_acc = cur_acc
- max_data = (stats['correct_data'], stats['wrong_data'])
- max_dict = deepcopy(model_agent.model)
- last_max = iter
- max_acc_len = []
- for i in range(1, 5):
- max_acc_len.append(stats['acc_len'][i] / stats['cnt_len'][i])
- max_f1 = max(max_f1, cur_f1)
- s = '#{} train {:.4f} valid {:.4f} acc {:.4f} f1 {:.4f} max_acc {:.4f} max_f1 {:.4f} acc_len'.format(
- iter,
- train_report['loss'],
- stats['loss'] / stats['cnt'],
- cur_acc,
- cur_f1,
- max_acc,
- max_f1,
- )
- for i in range(1, 5):
- s += ' {:.4f}'.format(stats['acc_len'][i] / stats['cnt_len'][i])
- s += ' cnt_len'
- for i in range(1, 5):
- s += ' {:d}'.format(int(stats['cnt_len'][i]))
- log_print(s, out_file)
-
- wrong_data = max_data[1]
-
- if not return_full:
- return max_acc
- else:
- return max_acc, max_dict, model_agent.data_agent, wrong_data, max_acc_len
-
-
-def online_exp(opt, log_file=None):
- model_dict, data_agent, wrong_data, max_acc_len = None, None, None, None
- max_accs = []
- record = -1.0
- for _ in range(opt['num_runs']):
- max_acc, cur_model_dict, cur_data_agent, cur_wrong_data, cur_max_acc_len = main(
- opt, True, log_file
- )
- max_accs.append(max_acc)
- if max_acc > record:
- record = max_acc
- model_dict, data_agent, wrong_data = (
- cur_model_dict,
- cur_data_agent,
- cur_wrong_data,
- )
- if max_acc_len is None:
- max_acc_len = cur_max_acc_len
- else:
- for i in range(len(max_acc_len)):
- max_acc_len[i] += cur_max_acc_len[i]
- for i in range(len(max_acc_len)):
- max_acc_len[i] /= opt['num_runs']
-
- if opt['perf_out_file'] != '':
- fout = open(opt['perf_out_file'], 'w')
- fout.write('{}\n'.format(statistics.mean(max_accs)))
- fout.close()
- fout = open(opt['perf_out_file'] + '.len', 'w')
- fout.write('{}\n'.format(' '.join(list(map(str, max_acc_len)))))
- fout.close()
- if opt['model_file'] != '':
- pickle.dump(model_dict, open(opt['model_file'], 'wb'))
- if opt['data_agent_file'] != '':
- pickle.dump(data_agent, open(opt['data_agent_file'], 'wb'))
- if opt['wrong_data_file'] != '':
- pickle.dump(wrong_data, open(opt['wrong_data_file'], 'wb'))
-
-
-def ablation_exp(opt):
- max_acc, cur_model_dict, cur_data_agent, cur_wrong_data, cur_max_acc_len = main(
- opt, True, None
- )
- return additional_validate(
- opt,
- cur_model_dict,
- cur_data_agent,
- opt['valid_data_file'],
- constrain_=True,
- no_hits=True,
- )
-
-
-if __name__ == '__main__':
- if not os.path.exists('tmp'):
- os.makedirs('tmp')
-
- argparser = ParlaiParser()
- argparser.add_arg('--vocab_size', type=int, default=1000)
- argparser.add_arg('--terminate', type=bool, default=False)
- argparser.add_arg('--lr', type=float, default=1e-3)
- argparser.add_arg('--max_seq_in', type=int, default=30)
- argparser.add_arg('--embedding_dim', type=int, default=50)
- argparser.add_arg('--rnn_h', type=int, default=350)
- argparser.add_arg('--rnn_layers', type=int, default=1)
- argparser.add_arg('--cuda', type=bool, default=True)
- argparser.add_arg('--eval_period', type=int, default=200)
- argparser.add_arg('--max_seq_out', type=int, default=5)
- argparser.add_arg('--label_ratio', type=float, default=1.0)
- argparser.add_arg('--max_iter', type=int, default=100000)
- argparser.add_arg('--exit_iter', type=int, default=3000)
- argparser.add_arg('--num_runs', type=int, default=10)
-
- argparser.add_arg('--train_data_file', type=str, default='')
- argparser.add_arg('--valid_data_file', type=str, default='')
- argparser.add_arg('--perf_out_file', type=str, default='')
- argparser.add_arg('--weight_file', type=str, default='')
- argparser.add_arg('--model_file', type=str, default='')
- argparser.add_arg('--data_agent_file', type=str, default='')
- argparser.add_arg('--wrong_data_file', type=str, default='')
-
- argparser.add_arg('--once', type=bool, default=False)
- argparser.add_arg('--seq2seq', type=bool, default=False)
- argparser.add_arg('--job_num', type=int)
-
- argparser.add_arg('--counter_ablation', type=bool, default=False)
- argparser.add_arg('--room_ablation', type=bool, default=False)
-
- opt = argparser.parse_args()
-
- opt['bidir'] = True
- opt['action_type_emb_dim'] = 5
- opt['counter_max'] = 3
- opt['counter_emb_dim'] = 5
-
- if opt['once']:
- online_exp(opt)
- quit()
-
- job_num = opt['job_num']
- input_file = 'job-in-{}.pkl'.format(job_num)
- output_file = 'job-out-{}.txt'.format(job_num)
- log_file = 'job-log-{}.txt'.format(job_num)
-
- while True:
- time.sleep(5)
- if os.path.isfile(input_file):
- time.sleep(5)
- log_print('grab job {}'.format(job_num), log_file)
- try:
- with open(input_file, 'rb') as f_in:
- job_in_opt = pickle.load(f_in)
- os.remove(input_file)
- new_opt = deepcopy(opt)
- new_opt.update(job_in_opt)
- online_exp(new_opt, log_file)
- fout = open(output_file, 'w')
- fout.write('0\n')
- fout.close()
- except:
- fout = open(output_file, 'w')
- fout.write('Error in train: {}\n'.format(traceback.format_exc()))
- fout.close()
- log_print('job done {}'.format(job_num), log_file)
diff --git a/projects/mastering_the_dungeon/tasks/__init__.py b/projects/mastering_the_dungeon/tasks/__init__.py
deleted file mode 100644
--- a/projects/mastering_the_dungeon/tasks/__init__.py
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/usr/bin/env python3
-
-##
-## Copyright (c) Facebook, Inc. and its affiliates.
-## This source code is licensed under the MIT license found in the
-## LICENSE file in the root directory of this source tree.
-##
diff --git a/projects/mastering_the_dungeon/tasks/graph_world2/__init__.py b/projects/mastering_the_dungeon/tasks/graph_world2/__init__.py
deleted file mode 100644
--- a/projects/mastering_the_dungeon/tasks/graph_world2/__init__.py
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/usr/bin/env python3
-
-##
-## Copyright (c) Facebook, Inc. and its affiliates.
-## This source code is licensed under the MIT license found in the
-## LICENSE file in the root directory of this source tree.
-##
diff --git a/projects/mastering_the_dungeon/tasks/graph_world2/agents.py b/projects/mastering_the_dungeon/tasks/graph_world2/agents.py
deleted file mode 100644
--- a/projects/mastering_the_dungeon/tasks/graph_world2/agents.py
+++ /dev/null
@@ -1,138 +0,0 @@
-#!/usr/bin/env python3
-
-##
-## Copyright (c) Facebook, Inc. and its affiliates.
-## This source code is licensed under the MIT license found in the
-## LICENSE file in the root directory of this source tree.
-##
-import random
-from parlai.core.teachers import Teacher
-from os.path import join
-from collections import defaultdict as dd
-from copy import deepcopy
-import os
-import pickle
-
-
-class DefaultTeacher(Teacher):
- def __init__(self, opt, shared=None):
- self.datatype = opt['datatype']
- self.terminate = opt['terminate']
- self.random = not self.terminate
- self.step_size = opt.get('batchsize', 1)
- self.episode_index = shared and shared.get('batchindex') or 0
- self.opt = deepcopy(opt)
-
- if not shared:
- datapath = join(opt['datapath'], 'graph_world2', opt['datatype'])
- self.data = self._setup_data(datapath)
- if hasattr(self, 'valid_weights'):
- assert len(self.valid_weights) == len(self.data), (
- len(self.valid_weights),
- len(self.data),
- )
- self.stats = {
- 'loss': 0,
- 'cnt': 0,
- 'acc': 0,
- 'f1': 0,
- 'acc_len': dd(float),
- 'cnt_len': dd(float),
- 'correct_data': [],
- 'wrong_data': [],
- }
- else:
- self.data = shared['data']
- self.stats = shared['stats']
- if 'valid_weights' in shared:
- self.valid_weights = shared['valid_weights']
- self.len = len(self.data)
- super().__init__(opt, shared)
-
- self.iter = shared and shared.get('batchindex') or 0
-
- def __len__(self):
- return self.len
-
- def share(self):
- shared = super().share()
- shared['data'] = self.data
- shared['stats'] = self.stats
- if hasattr(self, 'valid_weights'):
- shared['valid_weights'] = self.valid_weights
- return shared
-
- def act(self):
- if self.episode_index >= self.len and self.terminate:
- self.epochDone = True
- if self.epochDone:
- return {'episode_done': True}
- self.iter += self.step_size
- opt = self.opt
- return_example = self.data[self.episode_index]
- if hasattr(self, 'valid_weights'):
- return_weight = self.valid_weights[self.episode_index]
- else:
- return_weight = 1.0
- self.episode_index += self.step_size
- if self.episode_index >= self.len:
- if self.terminate:
- self.epochDone = True
- self.episode_index %= self.len
- if self.random and self.episode_index == opt['batchsize'] - 1:
- random.shuffle(self.data)
- return {
- 'text': return_example[2],
- 'actions': return_example[3],
- 'graph': return_example[1],
- 'episode_done': True,
- 'weight': return_weight,
- }
-
- def observe(self, observation):
- self.observation = observation
- if self.datatype == 'valid':
- self.stats['loss'] += observation['loss'] * observation['cnt']
- self.stats['acc'] += observation['acc'] * observation['cnt']
- self.stats['f1'] += observation['f1'] * observation['cnt']
- self.stats['cnt'] += observation['cnt']
- l = observation['len']
- self.stats['acc_len'][l] += observation['acc'] * observation['cnt']
- self.stats['cnt_len'][l] += observation['cnt']
-
- self.stats['correct_data'].extend(observation['correct_data'])
- self.stats['wrong_data'].extend(observation['wrong_data'])
- else:
- if 'loss' in observation:
- self.stats['loss'] += observation['loss']
- self.stats['cnt'] += 1
- return observation
-
- def report(self):
- if self.datatype == 'train' or self.datatype == 'pretrain':
- stats = deepcopy(self.stats)
- stats['loss'] /= stats['cnt']
- self.stats['loss'] = 0.0
- self.stats['cnt'] = 0
- return stats
- else:
- return self.stats
-
- def _setup_data(self, datapath):
- opt = self.opt
- if opt['weight_file'] and self.datatype == 'valid':
- self.valid_weights = pickle.load(open(opt['weight_file'], 'rb'))
-
- if opt['train_data_file'] != '' and self.datatype == 'train':
- return pickle.load(open(opt['train_data_file'], 'rb'))
- if opt['valid_data_file'] != '' and self.datatype == 'valid':
- return pickle.load(open(opt['valid_data_file'], 'rb'))
-
- data = []
- for filename in os.listdir(datapath):
- if filename.endswith('pkl'):
- loaded_data = pickle.load(open(join(datapath, filename), 'rb'))
- data.append(loaded_data)
- if self.random:
- random.shuffle(data)
- return data
diff --git a/projects/mastering_the_dungeon/tasks/graph_world2/graph.py b/projects/mastering_the_dungeon/tasks/graph_world2/graph.py
deleted file mode 100644
--- a/projects/mastering_the_dungeon/tasks/graph_world2/graph.py
+++ /dev/null
@@ -1,1468 +0,0 @@
-#!/usr/bin/env python3
-
-##
-## Copyright (c) Facebook, Inc. and its affiliates.
-## This source code is licensed under the MIT license found in the
-## LICENSE file in the root directory of this source tree.
-##
-
-import random
-import numpy as np
-from copy import deepcopy
-from collections import Counter
-import parlai.core.build_data as build_data
-import torch
-import os
-
-DEFAULT_ROOMS = ['cavern', 'tower', 'forest']
-DEFAULT_OBJECTS = [
- 'rusty sword',
- 'elven sword',
- 'silver crown',
- 'blue ring',
- 'gold ring',
- 'bread',
- 'armor',
- 'mace',
- 'axe',
- 'crossbow',
- 'apple',
- 'apple',
- 'apple',
- 'beer',
-]
-DEFAULT_OBJECT_PROPS = [
- 'wieldable',
- 'wieldable',
- 'wearable',
- 'wearable',
- 'wearable',
- 'food',
- 'wearable',
- 'wieldable',
- 'wieldable',
- 'wieldable',
- 'food',
- 'food',
- 'food',
- 'drink',
-]
-DEFAULT_CONTAINERS = ['treasure chest', 'leather pouch']
-DEFAULT_AGENTS = ['dragon', 'orc', 'troll']
-INIT_HEALTH = 1
-
-
-def dedup(objects, props):
- visited = set()
- dedup_objects, dedup_props = [], []
- for i in range(len(objects)):
- if objects[i] in visited:
- continue
- visited.add(objects[i])
- dedup_objects.append(objects[i])
- dedup_props.append(props[i])
- return dedup_objects, dedup_props
-
-
-DEDUP_OBJECTS, DEDUP_PROPS = dedup(DEFAULT_OBJECTS, DEFAULT_OBJECT_PROPS)
-
-
-def rm(d, val):
- if val in d:
- del d[val]
-
-
-class Graph(object):
- def __init__(self, opt):
- self._opt = opt
- self._node_to_edges = {}
- self._node_to_prop = {}
- self._node_contained_in = {}
- self._node_contains = {}
- self._node_follows = {}
- self._node_followed_by = {}
- self._node_npcs = (
- set()
- ) # non-player characters that we move during update_world func.
- self._node_to_desc = {}
- self._node_freeze = False
- self._cnt = 0
- self._save_fname = 'tmp.gw'
- self._node_to_text_buffer = {}
-
- def new_agent(self, id):
- self._node_to_text_buffer[id] = '' # clear buffer
-
- def delete_node(self, id):
- rm(self._node_to_prop, id)
- if id in self._node_contains[self.location(id)]:
- self._node_contains[self.location(id)].remove(id)
- rm(self._node_contained_in, id)
- # all things inside this are zapped too
- os = deepcopy(self._node_contains[id])
- for o in os:
- self.delete_node(o)
- # now remove edges from other rooms
- for r in self._node_to_edges[id]:
- if r[0] == 'path_to':
- self._node_to_edges[r[1]].remove(['path_to', id])
- rm(self._node_to_edges, id)
- rm(self._node_to_text_buffer, id)
- rm(self._node_to_text_buffer, id)
- # remove all agents following this one:
- if id in self._node_followed_by:
- ags = deepcopy(self._node_followed_by[id])
- for a in ags:
- self.set_follow(a, None)
- rm(self._node_follows, id)
- if id in self._node_npcs:
- self._node_npcs.remove(id)
-
- def save_graph(self, fname):
- path = os.path.join(self._opt['datapath'], 'graph_world2')
- build_data.make_dir(path)
- if fname != '':
- self._save_fname = path + '/' + fname + '.gw2'
- else:
- fname = self._save_fname
- members = [
- attr
- for attr in dir(self)
- if not callable(getattr(self, attr))
- and (not attr.startswith("__"))
- and (attr.startswith("_"))
- ]
- model = {}
- for m in members:
- model[m] = getattr(self, m)
- with open(fname, 'wb') as write:
- torch.save(model, write)
-
- def load_graph(self, fname):
- if fname != '':
- path = os.path.join(self._opt['datapath'], 'graph_world2')
- fname = path + '/' + fname + '.gw2'
- else:
- fname = self._save_fname
- if not os.path.isfile(fname):
- print("[graph file not found: " + fname + ']')
- return
- print("[loading graph: " + fname + ']')
- members = [
- attr
- for attr in dir(self)
- if not callable(getattr(self, attr))
- and (not attr.startswith("__"))
- and (attr.startswith("_"))
- ]
- with open(fname, 'rb') as read:
- model = torch.load(read)
- for m in members:
- if m in model:
- setattr(self, m, model[m])
- else:
- print("[ loading: " + m + " is missing in file ]")
- self._save_fname = fname
-
- def freeze(self, freeze=None):
- if freeze is not None:
- self._node_freeze = freeze
- return self._node_freeze
-
- def node_path_to(self, id):
- rooms = self._node_to_edges[id]
- rooms = [r[1] for r in rooms if r[0] == 'path_to']
- return rooms
-
- def desc_to_node(
- self, desc, nearbyid=None, nearbytype=None, should_have=[], shouldnt_have=[]
- ):
- if nearbyid is not None:
- if nearbytype == 'path':
- o = self.node_path_to(self.location(nearbyid))
- elif nearbytype == 'carrying':
- o = self.node_contains(nearbyid)
- elif nearbytype == 'sameloc':
- o = self.node_contains(self.location(nearbyid))
- elif nearbytype == 'all':
- o1 = self.node_contains(nearbyid)
- o2 = self.node_contains(self.location(nearbyid))
- o3 = self.node_path_to(self.location(nearbyid))
- o = o1.union(o2).union(o3)
- else:
- o1 = self.node_contains(nearbyid)
- o2 = self.node_contains(self.location(nearbyid))
- o = o1.union(o2)
- else:
- o = set(self._node_to_desc.keys())
-
- o = [id for id in o if self._node_to_desc[id] == desc]
- o.sort() # ensure deterministic order (e.g., which apple is got first)
-
- if len(o) == 0:
- return None # No results given the nearby conditions
-
- for i in o:
- flag = True
- for prop in should_have:
- if not self.valid(i, prop):
- flag = False
- for prop in shouldnt_have:
- if self.valid(i, prop):
- flag = False
- if not flag:
- continue
- return i
-
- return False # There are results given the nearby conditions, but they do not satisfy the property constraints.
-
- def copy(self):
- return deepcopy(self)
-
- def unique_hash(self):
- # TODO: make it independent of specific world settings
- # object_ids, agent_ids, and container_ids are set by construct_graph
- s = ''
- apple_s = []
- for id in self.object_ids + self.container_ids + self.agent_ids:
- cur_s = ''
- if not self.node_exists(id):
- cur_s += 'eaten'
- else:
- cur_s += self._node_contained_in[id]
- for prop in ['wielding', 'wearing', 'dead']:
- if prop in self._node_to_prop[id]:
- cur_s += prop
- if self.node_to_desc_raw(id) == 'apple':
- apple_s.append(cur_s)
- else:
- s += cur_s
- s += ''.join(sorted(apple_s))
- return s
-
- def __eq__(self, other):
- return self.unique_hash() == other.unique_hash()
-
- def add_node(self, desc, props):
- id = desc
- if id != 'dragon':
- id = id + "_" + str(self._cnt)
- self._cnt = self._cnt + 1
- if id in self._node_to_edges:
- return False
- self._node_to_edges[id] = []
- if type(props) == str:
- self._node_to_prop[id] = {}
- self._node_to_prop[id][props] = True
- else:
- self._node_to_prop[id] = {}
- for p in props:
- self._node_to_prop[id][p] = True
- self._node_contains[id] = set()
- self._node_to_desc[id] = desc
-
- if 'agent' in self._node_to_prop[id]:
- self.set_prop(id, 'health', INIT_HEALTH)
-
- return id
-
- def node_exists(self, id):
- return id in self._node_contained_in
-
- def set_desc(self, id, desc):
- self._node_to_desc[id] = desc
-
- def node_to_desc_raw(self, id):
- return self._node_to_desc[id]
-
- def node_to_desc(self, id, use_the=False):
- if id in self._node_to_desc:
- ent = self._node_to_desc[id]
- if self.has_prop(id, 'dead'):
- ent = 'dead ' + ent
- if self.has_prop(id, 'agent') or self.has_prop(id, 'object'):
- if use_the:
- ent = 'the ' + ent
- else:
- ent = (
- 'an ' + ent
- if ent[0] in ['a', 'e', 'i', 'o', 'u']
- else 'a ' + ent
- )
- elif self.has_prop(id, 'room'):
- ent = 'the ' + ent
- return ent
- else:
- return id
-
- def add_edge(self, id1, edge, id2):
- if [edge, id2] not in self._node_to_edges[id1]:
- self._node_to_edges[id1].insert(0, [edge, id2])
-
- def add_path_to(self, id1, id2):
- if id1 == id2:
- return False
- self.add_edge(id1, 'path_to', id2)
- self.add_edge(id2, 'path_to', id1)
- return True
-
- def is_path_to(self, id1, id2):
- rooms = self._node_to_edges[id1]
- rooms = [r[1] for r in rooms if r[0] == 'path_to' and r[1] == id2]
- if len(rooms) == 1:
- return True
- else:
- return False
-
- def add_contained_in(self, id1, id2):
- if id1 in self._node_contained_in:
- i_am_in = self._node_contained_in[id1]
- self._node_contains[i_am_in].remove(id1)
- self._node_contained_in[id1] = id2
- self._node_contains[id2].add(id1)
- return True
-
- def node_contained_in(self, id):
- return self._node_contained_in[id]
-
- def set_follow(self, id1, id2):
- if id1 in self._node_follows:
- i_follow = self._node_follows[id1]
- self._node_followed_by[i_follow].remove(id1)
- if id2 is not None:
- self._node_follows[id1] = id2
- if id2 not in self._node_followed_by:
- self._node_followed_by[id2] = set()
- self._node_followed_by[id2].add(id1)
- return True
- else:
- if id1 in self._node_follows:
- self._node_follows.pop(id1)
-
- def valid(self, id, prop):
- if not id in self._node_to_prop:
- return False
- if not self.has_prop(id, prop):
- return False
- return True
-
- def messages_in_same_room_as(self, agent, txt):
- room = self._node_contained_in[agent]
- agents = self._node_contains[room]
- agents = [a for a in agents if self.has_prop(a, 'agent') and a != agent]
- if len(agents) > 0:
- for a in agents:
- self.send_msg(a, txt)
-
- def has_prop(self, id, prop):
- if id in self._node_to_prop:
- if prop in self._node_to_prop[id]:
- return self._node_to_prop[id][prop]
- return False
-
- def set_prop(self, id, prop, val=True):
- if id in self._node_to_prop:
- self._node_to_prop[id][prop] = val
-
- def inc_prop(self, id, prop, val=1):
- if id in self._node_to_prop:
- if prop not in self._node_to_prop[id]:
- self.set_prop(id, prop, 0)
- if type(self._node_to_prop[id][prop]) != int:
- self.set_prop(id, prop, 0)
- self._node_to_prop[id][prop] += val
-
- def delete_prop(self, id, prop):
- if id in self._node_to_prop:
- if prop in self._node_to_prop[id]:
- del self._node_to_prop[id][prop]
-
- def location(self, thing):
- return self._node_contained_in[thing]
-
- def room(self, thing):
- id = self._node_contained_in[thing]
- while not self.has_prop(id, 'room'):
- id = self._node_contained_in[id]
- return id
-
- def node_contains(self, loc):
- if loc in self._node_contains:
- return self._node_contains[loc]
- else:
- return set()
-
- def send_msg(self, agentid, txt):
- if agentid in self._node_to_text_buffer:
- self._node_to_text_buffer[agentid] += txt
-
- #### ----------------------------------------------------------------
- # TODO: Ideally, all functions below do not use the graph structure directly,
- # but only the accessor functions (i.e. should not use self._node_* ).
-
- def die(self, id):
- if not self.valid(id, 'agent'):
- return False
- self.send_msg(id, 'You are dead!!!!!!!\n')
- agent_desc = self.node_to_desc(id, use_the=True).capitalize()
- self.messages_in_same_room_as(id, agent_desc + ' is dead!!!!\n')
- self.set_follow(id, None)
- self.set_prop(id, 'dead')
- # self.delete_prop(id, 'agent')
- # self.set_prop(id, 'object')
-
- def move_agent(self, agent_id, to_txt=None, to_id=None):
- if to_id is None:
- to_id = self.desc_to_node(
- to_txt, nearbyid=agent_id, nearbytype='path', should_have=['room']
- )
- if to_id is None or to_id == False:
- return False
- if not self.valid(agent_id, 'agent'):
- return False
- # if not self.valid(to_id, 'room'): return False
- from_id = self.location(agent_id)
- to_desc = self.node_to_desc(to_id)
- from_desc = self.node_to_desc(from_id)
- if self.is_path_to(from_id, to_id):
- agent_desc = self.node_to_desc(agent_id, use_the=True).capitalize()
- self.messages_in_same_room_as(
- agent_id, agent_desc + ' leaves towards ' + to_desc + '.\n'
- )
- self.add_contained_in(agent_id, to_id)
- agent_desc = self.node_to_desc(agent_id).capitalize()
- self.messages_in_same_room_as(
- agent_id, agent_desc + ' enters from ' + from_desc + '.\n'
- )
- self.send_msg(agent_id, self.look(agent_id))
- else:
- return False
- if agent_id in self._node_followed_by:
- for ppl in self._node_followed_by[agent_id]:
- room2 = self.location(ppl)
- if from_id == room2:
- self.send_msg(ppl, 'You follow. ')
- self.move_agent(ppl, to_id=to_id)
- return True
-
- def follow(self, agent_id, params):
- if not self.valid(agent_id, 'agent'):
- return False
- thing = ' '.join(params)
- if thing == 'off' or thing == '':
- if agent_id in self._node_follows:
- thing_id = self._node_follows[agent_id]
- self.set_follow(agent_id, None)
- thing_desc = self.node_to_desc(thing_id, use_the=True)
- s = 'You stop following ' + thing_desc + '.\n'
- self.send_msg(agent_id, s)
- agent_desc = self.node_to_desc(agent_id, use_the=True).capitalize()
- s = agent_desc + ' stops following you.\n'
- self.send_msg(thing_id, s)
- return True
- else:
- s = 'You are not following anyone.\n'
- self.send_msg(agent_id, s)
- return True
- thing_id = self.desc_to_node(thing, nearbyid=agent_id, nearbytype='sameloc')
- if not thing_id or not self.valid(thing_id, 'agent'):
- return False
- room1_id = self.room(agent_id)
- room2_id = self.room(thing_id)
- thing_desc = self.node_to_desc(thing_id, use_the=True)
- if room1_id != room2_id:
- self.send_msg(agent_id, thing_desc + " is not here.")
- return True
- self.set_follow(agent_id, thing_id)
- s = 'You are following the ' + thing_desc + '.\n'
- self.send_msg(agent_id, s)
- agent_desc = self.node_to_desc(agent_id, use_the=True).capitalize()
- s = agent_desc + ' is following you.\n'
- self.send_msg(thing_id, s)
- return True
-
- def get_object(self, agent_id, obj_txt):
- if not self.valid(agent_id, 'agent'):
- return False
- obj_id = self.desc_to_node(
- obj_txt, nearbyid=agent_id, nearbytype='sameloc', should_have=['object']
- )
- if obj_id is None:
- self.send_msg(agent_id, 'It is not here.\n')
- return False
- if obj_id == False:
- self.send_msg(agent_id, 'It is not an object.\n')
- return False
- self.add_contained_in(obj_id, agent_id)
- self.send_msg(agent_id, 'Done.\n')
- return True
-
- def drop_object(self, agent_id, obj_txt):
- if not self.valid(agent_id, 'agent'):
- return False
- obj_id = self.desc_to_node(
- obj_txt,
- nearbyid=agent_id,
- nearbytype='carrying',
- should_have=['object'],
- shouldnt_have=['wearing', 'wielding'],
- )
- if obj_id is None:
- self.send_msg(agent_id, "You do not have that.\n")
- return False
- if obj_id == False:
- self.send_msg(agent_id, 'You must unwield/remove it before dropping it.\n')
- return False
- room_id = self.node_contained_in(agent_id)
- self.add_contained_in(obj_id, room_id)
- self.send_msg(agent_id, 'Done.\n')
- return True
-
- def put(self, agent_id, params):
- if not self.valid(agent_id, 'agent'):
- return False
- if not len(params) == 2:
- return False
- obj_id = self.desc_to_node(
- params[0],
- nearbyid=agent_id,
- nearbytype='carrying',
- should_have=['object'],
- shouldnt_have=['wearing', 'wielding'],
- )
- if obj_id is None:
- self.send_msg(agent_id, 'You do not have that.\n')
- return False
- if obj_id == False:
- self.send_msg(
- agent_id,
- 'You must unwield/remove it before putting it into containers.\n',
- )
- return False
- receiver_id = self.desc_to_node(
- params[1], nearbyid=agent_id, should_have=['container']
- )
- if receiver_id is None:
- self.send_msg(agent_id, 'That is not here.\n')
- return False
- if receiver_id == False:
- self.send_msg(agent_id, 'It is not a container.\n')
- return False
- self.add_contained_in(obj_id, receiver_id)
- receiver_desc = self.node_to_desc(receiver_id, use_the=True)
- self.send_msg(
- agent_id,
- "You put "
- + self.display_node_list([obj_id])
- + " in "
- + receiver_desc
- + '.\n',
- )
- return True
-
- def get_from(self, agent_id, params):
- if not self.valid(agent_id, 'agent'):
- return False
- if not len(params) == 2:
- return False
- victim_id = self.desc_to_node(
- params[1], nearbyid=agent_id, should_have=['container']
- )
- if victim_id is None:
- self.send_msg(agent_id, 'That is not here.\n')
- return False
- if victim_id == False:
- self.send_msg(agent_id, 'It is not a container.\n')
- return False
- # if not self.valid(victim_id, 'container'): return False
- obj_id = self.desc_to_node(
- params[0], nearbyid=victim_id, nearbytype='carrying', should_have=['object']
- )
- if obj_id is None:
- self.send_msg(agent_id, "You couldn't find it.\n")
- return False
- if obj_id == False:
- self.send_msg(agent_id, 'It is not an object.\n')
- return False
- self.add_contained_in(obj_id, agent_id)
- agent_desc = self.node_to_desc(agent_id, use_the=True).capitalize()
- victim_desc = self.node_to_desc(victim_id, use_the=True)
- self.send_msg(
- agent_id,
- "You took "
- + self.display_node_list([obj_id])
- + " from "
- + victim_desc
- + '.\n',
- )
- self.send_msg(
- victim_id,
- agent_desc
- + " took the "
- + self.display_node_list([obj_id])
- + " from you.\n",
- )
- return True
-
- def give(self, agent_id, params):
- if not self.valid(agent_id, 'agent'):
- return False
- if not len(params) == 2:
- return False
- obj_id = self.desc_to_node(
- params[0],
- nearbyid=agent_id,
- nearbytype='carrying',
- should_have=['object'],
- shouldnt_have=['wearing', 'wielding'],
- )
- if obj_id is None:
- self.send_msg(agent_id, 'You do not have that.\n')
- return False
- if obj_id == False:
- self.send_msg(
- agent_id, 'You must remove/unwield it before giving it to others.\n'
- )
- return False
- receiver_id = self.desc_to_node(
- params[1],
- nearbyid=agent_id,
- nearbytype='sameloc',
- should_have=['agent'],
- shouldnt_have=['dead'],
- )
- if receiver_id == agent_id:
- return False
- if receiver_id is None:
- self.send_msg(agent_id, 'They are not here.\n')
- return False
- if receiver_id == False:
- self.send_msg(agent_id, 'They are not alive agents.\n')
- return False
- self.add_contained_in(obj_id, receiver_id)
- agent_desc = self.node_to_desc(agent_id, use_the=True).capitalize()
- receiver_desc = self.node_to_desc(receiver_id, use_the=True)
- self.send_msg(
- agent_id,
- "You gave "
- + self.display_node_list([obj_id])
- + " to "
- + receiver_desc
- + '.\n',
- )
- self.send_msg(
- receiver_id,
- agent_desc + " gave you " + self.display_node_list([obj_id]) + '\n',
- )
- return True
-
- def take(self, agent_id, params):
- if not self.valid(agent_id, 'agent'):
- return False
- if not len(params) == 2:
- return False
- victim_id = self.desc_to_node(
- params[1], nearbyid=agent_id, nearbytype='sameloc', should_have=['agent']
- )
- if victim_id == agent_id:
- return False
- if victim_id is None:
- self.send_msg(agent_id, 'They are not here.\n')
- return False
- if victim_id == False:
- self.send_msg(agent_id, 'It is not an agent.\n')
- return False
- obj_id = self.desc_to_node(
- params[0], nearbyid=victim_id, nearbytype='carrying', should_have=['object']
- )
- if obj_id is None:
- self.send_msg(agent_id, 'They do not have that.\n')
- return False
- if obj_id == False:
- self.send_msg(agent_id, 'It is not an object.\n')
- return False
- self.add_contained_in(obj_id, agent_id)
- agent_desc = self.node_to_desc(agent_id, use_the=True).capitalize()
- victim_desc = self.node_to_desc(victim_id, use_the=True)
- self.send_msg(
- agent_id,
- "You took "
- + self.display_node_list([obj_id])
- + " from "
- + victim_desc
- + '.\n',
- )
- self.send_msg(
- victim_id,
- agent_desc
- + " took the "
- + self.display_node_list([obj_id])
- + " from you.\n",
- )
- return True
-
- def hit_agent(self, agent_id, victim_txt, victim_id=None):
- if victim_id is None:
- victim_id = self.desc_to_node(
- victim_txt,
- nearbyid=agent_id,
- nearbytype='sameloc',
- should_have=['agent'],
- shouldnt_have=['dead'],
- )
- if victim_id is None:
- self.send_msg(agent_id, 'They are not here.\n')
- return False
- if victim_id == False:
- self.send_msg(agent_id, "You can't hit that.\n")
- return False
- if not self.valid(agent_id, 'agent'):
- return False
- agent_desc = self.node_to_desc(agent_id, use_the=True).capitalize()
- victim_desc = self.node_to_desc(victim_id, use_the=True)
- self.send_msg(agent_id, "You hit " + victim_desc + '! ')
- self.send_msg(victim_id, agent_desc + " attacked you! ")
- energy = self.has_prop(victim_id, 'health')
- if type(energy) == bool:
- energy = INIT_HEALTH
- energy = max(0, energy - 1)
- if energy == 0:
- self.die(victim_id)
- elif energy < 4 and energy > 0:
- self.send_msg(victim_id, 'You are ' + self.health(victim_id) + '.\n')
- self.set_prop(victim_id, 'health', energy)
- return True
-
- def wear(self, agent_id, thing):
- thing_id = self.desc_to_node(
- thing,
- nearbyid=agent_id,
- nearbytype='carrying',
- should_have=['wearable'],
- shouldnt_have=['wearing'],
- )
- if thing_id is None:
- self.send_msg(agent_id, "You do not have that.\n")
- return False
- if thing_id == False:
- # self.send_msg(agent_id, 'You are wearing that or it is not wearable.\n')
- self.send_msg(agent_id, "You can't do that.\n")
- return False
- self.set_prop(thing_id, 'wearing')
- self.send_msg(agent_id, "Done.\n")
- self.inc_prop(agent_id, 'armour', 1)
- return True
-
- def wield(self, agent_id, thing):
- thing_id = self.desc_to_node(
- thing,
- nearbyid=agent_id,
- nearbytype='carrying',
- should_have=['wieldable'],
- shouldnt_have=['wielding'],
- )
- if thing_id is None:
- self.send_msg(agent_id, "You do not have that.\n")
- return False
- if thing_id == False:
- # self.send_msg(agent_id, 'You are wielding that or it is not wieldable.\n')
- self.send_msg(agent_id, "You can't do that.\n")
- return False
- self.set_prop(thing_id, 'wielding')
- self.send_msg(agent_id, "Done.\n")
- self.inc_prop(agent_id, 'weapon', 1)
- return True
-
- def remove(self, agent_id, thing):
- thing_id_wear = self.desc_to_node(
- thing, nearbyid=agent_id, nearbytype='carrying', should_have=['wearing']
- )
- thing_id_wield = self.desc_to_node(
- thing, nearbyid=agent_id, nearbytype='carrying', should_have=['wielding']
- )
- thing_id = thing_id_wear or thing_id_wield
- if thing_id is None:
- self.send_msg(agent_id, "You do not have that.\n")
- return False
- if thing_id == False:
- self.send_msg(agent_id, 'You are not using that.\n')
- return False
- if self.has_prop(thing_id, 'wielding'):
- self.set_prop(thing_id, 'wielding', None)
- self.inc_prop(agent_id, 'weapon', -1)
- else:
- self.set_prop(thing_id, 'wearing', None)
- self.inc_prop(agent_id, 'armour', -1)
- self.send_msg(agent_id, "Done.\n")
-
- return True
-
- def ingest(self, agent_id, cmd, thing):
- if cmd == 'eat':
- thing_id = self.desc_to_node(
- thing, nearbyid=agent_id, nearbytype='carrying', should_have=['food']
- )
- else:
- thing_id = self.desc_to_node(
- thing, nearbyid=agent_id, nearbytype='carrying', should_have=['drink']
- )
- if thing_id is None:
- self.send_msg(agent_id, "You do not have that.\n")
- return False
- if thing_id == False:
- if cmd == 'eat':
- self.send_msg(agent_id, "You can't eat that.\n")
- else:
- self.send_msg(agent_id, "You can't drink that.\n")
- return False
- self.delete_node(thing_id)
- self.send_msg(agent_id, "Yum.\n")
- energy = self.has_prop(agent_id, 'health')
- if energy == False:
- energy = INIT_HEALTH
- if energy < 8:
- energy = energy + 1
- self.set_prop(agent_id, 'health', energy)
- return True
-
- def create(self, agent_id, params):
- # -- create commands: --
- # create room kitchen -> creates room with path from this room
- # create path kitchen -> create path to that room from this one
- # create agent orc
- # create object ring
- # create container box
- # create [un]freeze
- # create reset/load/save [fname]
- # create rename <node> <value>
- # create delete <node>
- # create set_prop orc to health=5
- if not self.valid(agent_id, 'agent'):
- return False
- room_id = self.room(agent_id)
- all = ' '.join(params)
- txt = ' '.join(params[1:])
- if not (all == 'save' or all == 'load' or all == 'freeze' or all == 'unfreeze'):
- if txt == '':
- return False
- if params[0] == 'save':
- self.save_graph(txt)
- self.send_msg(agent_id, "[ saved: " + self._save_fname + ']\n')
- return True
- if params[0] == 'load' or params[0] == 'reset':
- self.load_graph(txt)
- self.send_msg(agent_id, "[ loaded: " + self._save_fname + ']\n')
- return True
- if params[0] == 'freeze':
- self.freeze(True)
- self.send_msg(agent_id, "Frozen.\n")
- return True
- if params[0] == 'unfreeze':
- self.freeze(False)
- self.send_msg(agent_id, "Unfrozen.\n")
- return True
- if params[0] == 'delete' or params[0] == 'del' or params[0] == 'rm':
- id = self.desc_to_node(txt, nearbyid=agent_id, nearbytype='all')
- if id == False:
- return False
- self.delete_node(id)
- self.send_msg(agent_id, "Deleted.\n")
- return True
- if params[0] == 'rename':
- params = self.split_params(params[1:], 'to')
- to_id = self.desc_to_node(params[0], nearbyid=agent_id, nearbytype='all')
- if to_id == False:
- return False
- self.set_desc(to_id, params[1])
- self.send_msg(agent_id, "Done.\n")
- return True
- if params[0] == 'agent':
- new_id = self.add_node(txt, params[0])
- self.add_contained_in(new_id, room_id)
- self._node_npcs.add(new_id)
- self.send_msg(agent_id, "Done.\n")
- return True
- if params[0] == 'room':
- new_id = self.add_node(txt, params[0])
- self.add_contained_in(new_id, room_id)
- self.add_path_to(new_id, room_id)
- self.send_msg(agent_id, "Done.\n")
- return True
- if params[0] == 'set_prop':
- params = self.split_params(params[1:], 'to')
- print(params)
- to_id = self.desc_to_node(params[0], nearbyid=agent_id, nearbytype='all')
- if to_id == False:
- return False
- key = params[1]
- value = True
- if '=' in key:
- sp = key.split('=')
- if len(sp) != 2:
- return False
- key = sp[0]
- value = sp[1]
- if value == 'True':
- value = True
- try:
- value = int(value)
- except ValueError:
- pass
- self.set_prop(to_id, key, value)
- self.send_msg(agent_id, "Done.\n")
- return True
- if (
- params[0] == 'container'
- or params[0] == 'object'
- or params[0] == 'food'
- or params[0] == 'drink'
- ):
- new_id = self.add_node(txt, 'object')
- self.add_contained_in(new_id, room_id)
- self.set_prop(new_id, params[0])
- self.send_msg(agent_id, "Done.\n")
- return True
- if params[0] == 'path':
- to_id = self.desc_to_node(txt)
- if to_id == False:
- return False
- self.add_path_to(to_id, room_id)
- self.send_msg(agent_id, "Done.\n")
- return True
- return False
-
- def display_room_edges(self, roomid, third_person=False):
- s = ''
- rooms = self._node_to_edges[roomid]
- rooms = [r[1] for r in rooms if r[0] == 'path_to']
- room = self.node_to_desc(roomid)
- if third_person:
- s += '{} is connected to '.format(room).capitalize()
- else:
- if len(rooms) == 1:
- s += 'There is a path to '
- else:
- s += 'There are paths to '
- s += self.display_node_list(rooms)
- s += '.\n'
- return s
-
- def display_room_objects(self, roomid, third_person=False):
- s = ''
- objects = self.node_contains(roomid)
- objects = [o for o in objects if self.has_prop(o, 'object')]
- # import pdb; pdb.set_trace()
- room = self.node_to_desc(roomid)
- if len(objects) == 0:
- s += '{} is empty.\n'.format(room).capitalize()
- else:
- if third_person:
- s += 'In {} there is '.format(room)
- else:
- s += 'There is '
- s += self.display_node_list(objects)
- if third_person:
- s += '\n'
- else:
- s += ' here.\n'
- return s
-
- def display_room_agents(self, me, room, third_person=False):
- s = ''
- agents = self.node_contains(room)
- agents = [a for a in agents if self.has_prop(a, 'agent') and a != me]
- if len(agents) > 0:
- for a in agents:
- desc = self.node_to_desc(a).capitalize()
- s += desc + ' is here.\n'
- return s
-
- def get_text(self, agent):
- txt = ''
- if agent in self._node_to_text_buffer:
- txt = self._node_to_text_buffer[agent]
- self._node_to_text_buffer[agent] = '' # clear buffer
- return txt
-
- def cnt_obj(self, obj, c):
- cnt = c[obj]
- if cnt == 1:
- return obj
- else:
- words = obj.split(' ')
- f = [
- 'two',
- 'three',
- 'four',
- 'five',
- 'six',
- 'seven',
- 'eight',
- 'nine',
- 'a lot of',
- ]
- rep = ['a', 'an', 'the']
- cnt = cnt - 2
- if cnt > 8:
- cnt = 8
- cnt = f[cnt]
- if words[0] in rep:
- return cnt + ' ' + ' '.join(words[1:]) + 's'
- else:
- return cnt + ' ' + ' '.join(words) + 's'
-
- def display_node_list(self, l):
- if len(l) == 0:
- return 'nothing'
- l = [self.node_to_desc(ent) for ent in l]
- if len(l) == 1:
- return l[0]
- c = Counter(l)
- l = set(l)
- s = ''
- cnt = 0
- for o in l:
- s += self.cnt_obj(o, c)
- if len(l) > 2 and cnt < len(l) - 1:
- s += ','
- s += ' '
- cnt = cnt + 1
- if cnt == len(l) - 1:
- s += 'and '
- return s.rstrip(' ')
-
- def display_node(self, id):
- s = ''
- if len(self.node_contains(id)) > 0:
- s = (
- s
- + id
- + ' contains '
- + self.display_node_list(self.node_contains(id))
- + '\n'
- )
- return s
-
- def examine(self, agent_id, thing):
- thing_id = self.desc_to_node(thing, nearbyid=agent_id)
- if thing_id is None:
- self.send_msg(agent_id, "That is not here.\n")
- return True
- s = ''
- if self.has_prop(thing_id, 'agent'):
- s = self.inventory(thing_id, agent_id)
- else:
- object_ids = self.node_contains(thing_id)
- object_ids = [o for o in object_ids if self.has_prop(o, 'object')]
- thing_desc = self.node_to_desc(thing_id, use_the=True).capitalize()
- inside_txt = ' contains '
- if len(object_ids) == 0:
- s += thing_desc + inside_txt + 'nothing.\n'
- else:
- s += thing_desc + inside_txt
- s += self.display_node_list(object_ids)
- s += '.\n'
- self.send_msg(agent_id, s)
- return True
-
- def inventory(self, id, id2=None):
- s = ''
- carry_ids = []
- wear_ids = []
- wield_ids = []
- for o in self.node_contains(id):
- if self.has_prop(o, 'wearing'):
- wear_ids.append(o)
- elif self.has_prop(o, 'wielding'):
- wield_ids.append(o)
- else:
- carry_ids.append(o)
- if id2 is not None:
- thing_desc = self.node_to_desc(id, use_the=True).capitalize() + ' is'
- else:
- thing_desc = 'You are'
- if len(carry_ids) == 0:
- s += thing_desc + ' carrying nothing.\n'
- else:
- s += thing_desc + ' carrying ' + self.display_node_list(carry_ids) + '.\n'
- if len(wear_ids) > 0:
- s += thing_desc + ' wearing ' + self.display_node_list(wear_ids) + '.\n'
- if len(wield_ids) > 0:
- s += thing_desc + ' wielding ' + self.display_node_list(wield_ids) + '.\n'
- return s
-
- def health(self, id):
- health = self.has_prop(id, 'health')
- if health == None or health == False:
- health = 1
- if health > 8:
- health = 8
- f = [
- 'dead',
- 'on the verge of death',
- 'very weak',
- 'weak',
- 'ok',
- 'good',
- 'strong',
- 'very strong',
- 'nigh on invincible',
- ]
- return f[health]
-
- def look(self, id):
- room = self.location(id)
- s = 'You are in {}.\n'.format(self.node_to_desc(room))
- s += self.display_room_agents(id, room)
- s += self.display_room_objects(room)
- s += self.display_room_edges(room)
- return s
-
- def split_params(self, params, word):
- return ' '.join(params).split(' {} '.format(word))
-
- def help(self):
- txt = (
- '----------------------------------\n'
- + 'Commands:\n'
- + 'look\n'
- + 'examine <thing>\n'
- + 'go <room>\n'
- + 'get/drop <object>\n'
- + 'eat/drink <object>\n'
- + 'wear/remove <object>\n'
- + 'wield/unwield <object>\n'
- + 'follow <agent>\n'
- + 'hit <agent>\n'
- + 'put <object> in <container>\n'
- + 'get <object> from <container>\n'
- + 'give <object> to <agent>\n'
- + 'take <object> from <agent>\n'
- + '----------------------------------\n'
- )
- return txt
-
- def get_possible_actions(self, my_agent_id='dragon'):
- # TODO: make it independent of specific world settings
- actions = []
- dragon_id = my_agent_id
- if self.valid(dragon_id, 'dead'):
- return actions
- current_room_id = self.node_contained_in(dragon_id)
- for id in self.node_path_to(current_room_id):
- actions.append('go {}'.format(self.node_to_desc_raw(id)))
- for id in self.object_ids + self.container_ids:
- if not self.node_exists(id):
- continue
- desc = self.node_to_desc_raw(id)
- if self.node_contained_in(id) == current_room_id:
- actions.append('get {}'.format(desc))
- if self.node_contained_in(id) == dragon_id:
- if not self.valid(id, 'wearing') and not self.valid(id, 'wielding'):
- actions.append('drop {}'.format(desc))
- for container_id in self.container_ids:
- if container_id != id and (
- self.node_contained_in(container_id) == dragon_id
- or self.node_contained_in(container_id) == current_room_id
- ):
- actions.append(
- 'put {} in {}'.format(
- desc, self.node_to_desc_raw(container_id)
- )
- )
- for agent_id in self.agent_ids:
- if (
- agent_id != dragon_id
- and not self.valid(agent_id, 'dead')
- and self.node_contained_in(agent_id) == current_room_id
- ):
- actions.append(
- 'give {} to {}'.format(
- desc, self.node_to_desc_raw(agent_id)
- )
- )
- if self.valid(id, 'food'):
- actions.append('eat {}'.format(desc))
- if self.valid(id, 'drink'):
- actions.append('drink {}'.format(desc))
- if self.valid(id, 'wearable') and not self.valid(id, 'wearing'):
- actions.append('wear {}'.format(desc))
- if self.valid(id, 'wearing'):
- actions.append('remove {}'.format(desc))
- if self.valid(id, 'wieldable') and not self.valid(id, 'wielding'):
- actions.append('wield {}'.format(desc))
- if self.valid(id, 'wielding'):
- actions.append('unwield {}'.format(desc))
-
- container_id = self.node_contained_in(id)
- if (
- self.valid(container_id, 'agent')
- and container_id != dragon_id
- and self.node_contained_in(container_id) == current_room_id
- ):
- actions.append(
- 'take {} from {}'.format(desc, self.node_to_desc_raw(container_id))
- )
- if self.valid(container_id, 'container') and (
- self.node_contained_in(container_id)
- == self.node_contained_in(dragon_id)
- or self.node_contained_in(container_id) == dragon_id
- ):
- actions.append(
- 'get {} from {}'.format(desc, self.node_to_desc_raw(container_id))
- )
-
- for id in self.agent_ids:
- if (
- id != dragon_id
- and not self.valid(id, 'dead')
- and self.node_contained_in(id) == current_room_id
- ):
- actions.append('hit {}'.format(self.node_to_desc_raw(id)))
-
- return list(set(actions))
-
- @staticmethod
- def parse_static(inst):
- inst = inst.lower().strip().split()
- symb_points = []
- for i, symb in enumerate(inst):
- if symb in [
- 'go',
- 'get',
- 'drop',
- 'hit',
- 'examine',
- 'ex',
- 'give',
- 'take',
- 'follow',
- 'put',
- 'create',
- 'c',
- 'eat',
- 'drink',
- 'wear',
- 'wield',
- 'unwield',
- 'remove',
- 'look',
- 'actions',
- 'hints',
- ]:
- symb_points.append(i)
- symb_points.append(len(inst))
- return inst, symb_points
-
- @staticmethod
- def filter_actions(inst):
- ret_actions = []
- inst, symb_points = Graph.parse_static(inst)
- for i in range(len(symb_points) - 1):
- j, k = symb_points[i], symb_points[i + 1]
- if inst[j] in [
- 'go',
- 'get',
- 'drop',
- 'hit',
- 'give',
- 'take',
- 'put',
- 'eat',
- 'drink',
- 'wear',
- 'wield',
- 'unwield',
- 'remove',
- ]:
- ret_actions.append(' '.join(inst[j:k]))
- return ' '.join(ret_actions)
-
- def parse(self, inst):
- return Graph.parse_static(inst)
-
- def parse_exec(self, agentid, inst=None):
- """ATTENTION: even if one of the actions is invalid, all actions before that will still be executed (the world state will be changed)!"""
- if inst is None:
- inst = agentid
- agentid = 'dragon'
-
- if self.has_prop(agentid, 'dead'):
- self.send_msg(agentid, "You are dead, you can't do anything, sorry.")
- return True
- inst, symb_points = self.parse(inst)
- if len(inst) == 1 and (
- inst[0] == 'a' or inst[0] == 'actions' or inst[0] == 'hints'
- ):
- self.send_msg(
- agentid,
- '\n'.join(sorted(self.get_possible_actions()))
- + '\ninventory\nlook\nexamine <object>\n',
- )
- return True
- if len(inst) == 1 and (
- inst[0] == 'i' or inst[0] == 'inv' or inst[0] == 'inventory'
- ):
- self.send_msg(agentid, self.inventory(agentid))
- return True
- if len(inst) == 1 and (inst[0] == 'health' or inst[0] == 'status'):
- self.send_msg(agentid, 'You are feeling ' + self.health(agentid) + '.\n')
- return True
- if len(inst) == 1 and (inst[0] == 'look' or inst[0] == 'l'):
- self.send_msg(agentid, self.look(agentid))
- return True
- if len(inst) == 1 and (inst[0] == 'wait' or inst[0] == 'w'):
- self.send_msg(agentid, 'You wait. ')
- return True
- if len(inst) == 1 and (inst[0] == 'help'):
- self.send_msg(agentid, self.help())
- return True
- if len(symb_points) <= 1 or symb_points[0] != 0:
- return False
- for i in range(len(symb_points) - 1):
- j, k = symb_points[i], symb_points[i + 1]
- params = inst[j + 1 : k]
- if inst[j] == 'go':
- room_name = ' '.join(inst[j + 1 : k])
- if not self.move_agent(agentid, room_name):
- return False
- elif inst[j] == 'eat' or inst[j] == 'drink':
- thing = ' '.join(inst[j + 1 : k])
- if not self.ingest(agentid, inst[j], thing):
- return False
- elif inst[j] == 'wear':
- thing = ' '.join(inst[j + 1 : k])
- if not self.wear(agentid, thing):
- return False
- elif inst[j] == 'wield':
- thing = ' '.join(inst[j + 1 : k])
- if not self.wield(agentid, thing):
- return False
- elif inst[j] == 'remove' or inst[j] == 'unwield':
- thing = ' '.join(inst[j + 1 : k])
- if not self.remove(agentid, thing):
- return False
- elif inst[j] == 'put':
- params = self.split_params(params, 'in')
- if not self.put(agentid, params):
- return False
- elif inst[j] == 'create' or inst[j] == 'c':
- if not self.create(agentid, params):
- return False
- elif inst[j] == 'get':
- if 'from' in inst[j + 1 : k]:
- # get X from Y
- params = self.split_params(params, 'from')
- if not self.get_from(agentid, params):
- return False
- else:
- # get from loc
- object_name = ' '.join(inst[j + 1 : k])
- if not self.get_object(agentid, object_name):
- return False
- elif inst[j] == 'drop':
- object_name = ' '.join(inst[j + 1 : k])
- if not self.drop_object(agentid, object_name):
- return False
- elif inst[j] == 'examine' or inst[j] == 'ex':
- thing = ' '.join(inst[j + 1 : k])
- if not self.examine(agentid, thing):
- return False
- elif inst[j] == 'hit':
- victim = ' '.join(inst[j + 1 : k])
- if not self.hit_agent(agentid, victim):
- return False
- elif inst[j] == 'give':
- params = self.split_params(params, 'to')
- if not self.give(agentid, params):
- return False
- elif inst[j] == 'take':
- params = self.split_params(params, 'from')
- if not self.take(agentid, params):
- return False
- elif inst[j] == 'follow':
- if not self.follow(agentid, params):
- return False
- else:
- return False
- # assert False
- return True
-
- def update_world(self):
- # move all the agents and junk, unless world frozen
- if self.freeze():
- return
- for agent_id in self._node_npcs:
- if self.has_prop(agent_id, 'dead'):
- continue
- # random movement for npcs..
- locs = self.node_path_to(self.room(agent_id))
- loc = locs[random.randint(0, len(locs) - 1)]
- act = 'go ' + self.node_to_desc(loc)
- self.move_agent(agent_id, to_id=loc)
- if random.randint(0, 100) < 50:
- act = 'hit dragon'
- self.parse_exec(agent_id, act)
-
-
-def construct_graph(opt, graph_file=None, save_file=None, freeze=True):
- g = Graph(opt)
- if graph_file is None or not g.load_graph(graph_file):
- edge_p = opt['edge_p']
- seed = opt['seed']
- if seed >= 0:
- random.seed(seed)
- np.random.seed(seed)
- room_ids = [''] * len(DEFAULT_ROOMS)
- for i, v in enumerate(DEFAULT_ROOMS):
- room_ids[i] = g.add_node(v, 'room')
- # ensure that the graph is connected
- N = len(room_ids)
- perm = np.random.permutation(N)
- for i in range(N - 1):
- g.add_path_to(room_ids[perm[i]], room_ids[perm[i + 1]])
- for i in range(N):
- for j in range(i + 1, N):
- if random.random() < edge_p:
- g.add_path_to(room_ids[i], room_ids[j])
-
- container_ids = []
- for i, v in enumerate(DEFAULT_CONTAINERS):
- id = g.add_node(v, ['object', 'container'])
- container_ids.append(id)
- g.add_contained_in(id, room_ids[random.randint(0, N - 1)])
-
- agent_ids = []
- for i, v in enumerate(DEFAULT_AGENTS):
- id = g.add_node(v, 'agent')
- g.add_contained_in(id, room_ids[random.randint(0, N - 1)])
- agent_ids.append(id)
-
- all_ids = room_ids + container_ids + agent_ids
- M = len(all_ids)
-
- object_ids = []
- for ind, o in enumerate(DEFAULT_OBJECTS):
- id = g.add_node(o, 'object')
- g.set_prop(id, DEFAULT_OBJECT_PROPS[ind])
- room_id = all_ids[random.randint(0, M - 1)] if o != 'apple' else all_ids[0]
- # assign all apples to room 0 just to avoid ambiguity of "apple -> cavern"
- g.add_contained_in(id, room_id)
- object_ids.append(id)
-
- g.room_ids, g.container_ids, g.agent_ids, g.object_ids = (
- room_ids,
- container_ids,
- agent_ids,
- object_ids,
- )
-
- if save_file is not None:
- g.save_graph(save_file)
-
- if freeze:
- g._node_freeze = True
-
- g.new_agent('dragon')
-
- return g
diff --git a/projects/mastering_the_dungeon/tasks/graph_world2/worlds.py b/projects/mastering_the_dungeon/tasks/graph_world2/worlds.py
deleted file mode 100644
--- a/projects/mastering_the_dungeon/tasks/graph_world2/worlds.py
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/usr/bin/env python3
-
-##
-## Copyright (c) Facebook, Inc. and its affiliates.
-## This source code is licensed under the MIT license found in the
-## LICENSE file in the root directory of this source tree.
-##
-from parlai.core.worlds import ExecutableWorld
-from projects.mastering_the_dungeon.tasks.graph_world2.graph import construct_graph
-
-
-class GraphWorld2(ExecutableWorld):
- def init_world(self):
- self.g = construct_graph(self.opt)
- for a in self.agents:
- self.g.new_agent(a.id)
-
- def observe(self, agent, act):
- if agent.id == act['id']:
- msg = {}
- msg['text'] = self.g.get_text(agent.id).rstrip('\n')
- msg['id'] = 'world'
- msg['graph'] = self.g # preferably agents don't use the graph directly,
- # but we make available here.
- return msg
- else:
- return None
-
- def execute(self, agent, act):
- # Execute action from agent. We also send an update to all other agents
- # that can observe the change.
- if 'text' in act:
- valid = self.g.parse_exec(agent.id, act['text'])
- if not valid:
- self.g.send_msg(agent.id, 'Invalid action.\n')
- self.g.update_world() # other NPCs can move, etc.
diff --git a/projects/memnn_feedback/__init__.py b/projects/memnn_feedback/__init__.py
deleted file mode 100644
--- a/projects/memnn_feedback/__init__.py
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/usr/bin/env python3
-
-# Copyright (c) Facebook, Inc. and its affiliates.
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-import sys
-
-if sys.version_info < (3, 0):
- raise RuntimeError('ParlAI requires Python 3.')
diff --git a/projects/memnn_feedback/agent/__init__.py b/projects/memnn_feedback/agent/__init__.py
deleted file mode 100755
--- a/projects/memnn_feedback/agent/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/usr/bin/env python3
-
-# Copyright (c) Facebook, Inc. and its affiliates.
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
diff --git a/projects/memnn_feedback/agent/memnn_feedback.py b/projects/memnn_feedback/agent/memnn_feedback.py
deleted file mode 100755
--- a/projects/memnn_feedback/agent/memnn_feedback.py
+++ /dev/null
@@ -1,630 +0,0 @@
-#!/usr/bin/env python3
-
-# Copyright (c) Facebook, Inc. and its affiliates.
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-from parlai.core.agents import Agent
-from parlai.core.dict import DictionaryAgent
-
-import torch
-from torch import optim
-from torch.autograd import Variable
-from torch.nn import CrossEntropyLoss
-
-import os
-import copy
-import random
-
-from .modules import MemNN, Decoder, to_tensors
-
-
-class MemnnFeedbackAgent(Agent):
- """
- Memory Network agent for question answering that supports reward-based learning
- (RBI), forward prediction (FP), and imitation learning (IM).
-
- For more details on settings see: https://arxiv.org/abs/1604.06045.
-
- Models settings 'FP', 'RBI', 'RBI+FP', and 'IM_feedback' assume that
- feedback and reward for the current example immediatly follow the query
- (add ':feedback' argument when specifying task name).
-
- parlai train_model --setting 'FP'
- -m "projects.memnn_feedback.agent.memnn_feedback:MemnnFeedbackAgent"
- -t "projects.memnn_feedback.tasks.dbll_babi.agents:taskTeacher:3_p0.5:feedback"
- """
-
- @staticmethod
- def add_cmdline_args(argparser):
- DictionaryAgent.add_cmdline_args(argparser)
- arg_group = argparser.add_argument_group('MemNN Arguments')
- arg_group.add_argument(
- '-lr', '--learning-rate', type=float, default=0.01, help='learning rate'
- )
- arg_group.add_argument(
- '--embedding-size', type=int, default=128, help='size of token embeddings'
- )
- arg_group.add_argument(
- '--hops', type=int, default=3, help='number of memory hops'
- )
- arg_group.add_argument(
- '--mem-size', type=int, default=100, help='size of memory'
- )
- arg_group.add_argument(
- '--time-features',
- type='bool',
- default=True,
- help='use time features for memory embeddings',
- )
- arg_group.add_argument(
- '--position-encoding',
- type='bool',
- default=False,
- help='use position encoding instead of bag of words embedding',
- )
- arg_group.add_argument(
- '-clip',
- '--gradient-clip',
- type=float,
- default=0.2,
- help='gradient clipping using l2 norm',
- )
- arg_group.add_argument(
- '--output', type=str, default='rank', help='type of output (rank|generate)'
- )
- arg_group.add_argument(
- '--rnn-layers',
- type=int,
- default=2,
- help='number of hidden layers in RNN decoder for generative output',
- )
- arg_group.add_argument(
- '--dropout',
- type=float,
- default=0.1,
- help='dropout probability for RNN decoder training',
- )
- arg_group.add_argument(
- '--optimizer', default='sgd', help='optimizer type (sgd|adam)'
- )
- arg_group.add_argument(
- '--no-cuda',
- action='store_true',
- default=False,
- help='disable GPUs even if available',
- )
- arg_group.add_argument(
- '--gpu', type=int, default=-1, help='which GPU device to use'
- )
- arg_group.add_argument(
- '--setting',
- type=str,
- default='IM',
- help='choose among IM, IM_feedback, RBI, FP, RBI+FP',
- )
- arg_group.add_argument(
- '--num-feedback-cands',
- type=int,
- default=6,
- help='number of feedback candidates',
- )
- arg_group.add_argument(
- '--single_embedder',
- type='bool',
- default=False,
- help='number of embedding matrices in the model',
- )
-
- def __init__(self, opt, shared=None):
- super().__init__(opt, shared)
-
- opt['cuda'] = not opt['no_cuda'] and torch.cuda.is_available()
- if opt['cuda']:
- print('[ Using CUDA ]')
- torch.cuda.device(opt['gpu'])
-
- if not shared:
- self.id = 'MemNN'
- self.dict = DictionaryAgent(opt)
- self.decoder = None
- if opt['output'] == 'generate' or opt['output'] == 'g':
- self.decoder = Decoder(
- opt['embedding_size'],
- opt['embedding_size'],
- opt['rnn_layers'],
- opt,
- self.dict,
- )
- elif opt['output'] != 'rank' and opt['output'] != 'r':
- raise NotImplementedError('Output type not supported.')
-
- if 'FP' in opt['setting']:
- # add extra beta-word to indicate learner's answer
- self.beta_word = 'betaword'
- self.dict.add_to_dict([self.beta_word])
-
- self.model = MemNN(opt, self.dict)
-
- optim_params = [p for p in self.model.parameters() if p.requires_grad]
- lr = opt['learning_rate']
- if opt['optimizer'] == 'sgd':
- self.optimizers = {'memnn': optim.SGD(optim_params, lr=lr)}
- if self.decoder is not None:
- self.optimizers['decoder'] = optim.SGD(
- self.decoder.parameters(), lr=lr
- )
- elif opt['optimizer'] == 'adam':
- self.optimizers = {'memnn': optim.Adam(optim_params, lr=lr)}
- if self.decoder is not None:
- self.optimizers['decoder'] = optim.Adam(
- self.decoder.parameters(), lr=lr
- )
- else:
- raise NotImplementedError('Optimizer not supported.')
-
- if opt['cuda']:
- self.model.share_memory()
- if self.decoder is not None:
- self.decoder.cuda()
-
- if opt.get('model_file') and os.path.isfile(opt['model_file']):
- print('Loading existing model parameters from ' + opt['model_file'])
- self.load(opt['model_file'])
- else:
- if 'model' in shared:
- # model is shared during hogwild
- self.model = shared['model']
- self.dict = shared['dict']
- self.decoder = shared['decoder']
- self.optimizers = shared['optimizer']
- if 'FP' in opt['setting']:
- self.beta_word = shared['betaword']
-
- if hasattr(self, 'model'):
- self.opt = opt
- self.mem_size = opt['mem_size']
- self.loss_fn = CrossEntropyLoss()
- self.gradient_clip = opt.get('gradient_clip', 0.2)
-
- self.model_setting = opt['setting']
- if 'FP' in opt['setting']:
- self.feedback_cands = set([])
- self.num_feedback_cands = opt['num_feedback_cands']
-
- self.longest_label = 1
- self.END = self.dict.end_token
- self.END_TENSOR = torch.LongTensor(self.dict.parse(self.END))
- self.START = self.dict.start_token
- self.START_TENSOR = torch.LongTensor(self.dict.parse(self.START))
-
- self.reset()
- self.last_cands, self.last_cands_list = None, None
-
- def share(self):
- # Share internal states between parent and child instances
- shared = super().share()
-
- if self.opt.get('numthreads', 1) > 1:
- shared['model'] = self.model
- self.model.share_memory()
- shared['optimizer'] = self.optimizers
- shared['dict'] = self.dict
- shared['decoder'] = self.decoder
- if 'FP' in self.model_setting:
- shared['betaword'] = self.beta_word
- return shared
-
- def observe(self, observation):
- observation = copy.copy(observation)
-
- # extract feedback for forward prediction
- # IM setting - no feedback provided in the dataset
- if self.opt['setting'] != 'IM':
- if 'text' in observation:
- split = observation['text'].split('\n')
- feedback = split[-1]
- observation['feedback'] = feedback
- observation['text'] = '\n'.join(split[:-1])
-
- if not self.episode_done:
- # if the last example wasn't the end of an episode, then we need to
- # recall what was said in that example
- prev_dialogue = (
- self.observation['text'] if self.observation is not None else ''
- )
-
- # append answer and feedback (if available) given in the previous example to the previous dialog
- if 'eval_labels' in self.observation:
- prev_dialogue += '\n' + random.choice(self.observation['eval_labels'])
- elif 'labels' in self.observation:
- prev_dialogue += '\n' + random.choice(self.observation['labels'])
- if 'feedback' in self.observation:
- prev_dialogue += '\n' + self.observation['feedback']
-
- observation['text'] = prev_dialogue + '\n' + observation['text']
-
- self.observation = observation
- self.episode_done = observation['episode_done']
- return observation
-
- def reset(self):
- # reset observation and episode_done
- self.observation = None
- self.episode_done = True
-
- def backward(self, loss, retain_graph=False):
- # zero out optimizer and take one optimization step
- for o in self.optimizers.values():
- o.zero_grad()
- loss.backward(retain_graph=retain_graph)
-
- torch.nn.utils.clip_grad_norm(self.model.parameters(), self.gradient_clip)
- for o in self.optimizers.values():
- o.step()
-
- def parse_cands(self, cand_answers):
- """Returns:
- cand_answers = tensor (vector) of token indices for answer candidates
- cand_answers_lengths = tensor (vector) with lengths of each answer candidate
- """
- parsed_cands = [to_tensors(c, self.dict) for c in cand_answers]
- cand_answers_tensor = torch.cat([x[1] for x in parsed_cands])
- max_cands_len = max([len(c) for c in cand_answers])
- cand_answers_lengths = torch.LongTensor(
- len(cand_answers), max_cands_len
- ).zero_()
- for i in range(len(cand_answers)):
- if len(parsed_cands[i][0]) > 0:
- cand_answers_lengths[i, -len(parsed_cands[i][0]) :] = parsed_cands[i][0]
- cand_answers_tensor = Variable(cand_answers_tensor)
- cand_answers_lengths = Variable(cand_answers_lengths)
- return cand_answers_tensor, cand_answers_lengths
-
- def get_cand_embeddings_with_added_beta(self, cands, selected_answer_inds):
- # add beta_word to the candidate selected by the learner to indicate learner's answer
- cand_answers_with_beta = copy.deepcopy(cands)
-
- for i in range(len(cand_answers_with_beta)):
- cand_answers_with_beta[i][selected_answer_inds[i]] += ' ' + self.beta_word
-
- # get candidate embeddings after adding beta_word to the selected candidate
- (
- cand_answers_tensor_with_beta,
- cand_answers_lengths_with_beta,
- ) = self.parse_cands(cand_answers_with_beta)
- cands_embeddings_with_beta = self.model.answer_embedder(
- cand_answers_lengths_with_beta, cand_answers_tensor_with_beta
- )
- if self.opt['cuda']:
- cands_embeddings_with_beta = cands_embeddings_with_beta.cuda()
- return cands_embeddings_with_beta
-
- def predict(self, xs, answer_cands, ys=None, feedback_cands=None):
- is_training = ys is not None
- if is_training and 'FP' not in self.model_setting:
- # Subsample to reduce training time
- answer_cands = [
- list(set(random.sample(c, min(32, len(c))) + self.labels))
- for c in answer_cands
- ]
- else:
- # rank all cands to increase accuracy
- answer_cands = [list(set(c)) for c in answer_cands]
-
- self.model.train(mode=is_training)
-
- # Organize inputs for network (see contents of xs and ys in batchify method)
- inputs = [Variable(x, volatile=is_training) for x in xs]
-
- if self.decoder:
- output_embeddings = self.model(*inputs)
- self.decoder.train(mode=is_training)
- output_lines, loss = self.decode(output_embeddings, ys)
- predictions = self.generated_predictions(output_lines)
- self.backward(loss)
- return predictions
-
- scores = None
- if is_training:
- label_inds = [
- cand_list.index(self.labels[i])
- for i, cand_list in enumerate(answer_cands)
- ]
-
- if 'FP' in self.model_setting:
- if len(feedback_cands) == 0:
- print(
- 'FP is not training... waiting for negative feedback examples'
- )
- else:
- cand_answers_embs_with_beta = self.get_cand_embeddings_with_added_beta(
- answer_cands, label_inds
- )
- scores, forward_prediction_output = self.model(
- *inputs, answer_cands, cand_answers_embs_with_beta
- )
- fp_scores = self.model.get_score(
- feedback_cands, forward_prediction_output, forward_predict=True
- )
- feedback_label_inds = [
- cand_list.index(self.feedback_labels[i])
- for i, cand_list in enumerate(feedback_cands)
- ]
- if self.opt['cuda']:
- feedback_label_inds = Variable(
- torch.cuda.LongTensor(feedback_label_inds)
- )
- else:
- feedback_label_inds = Variable(
- torch.LongTensor(feedback_label_inds)
- )
- loss_fp = self.loss_fn(fp_scores, feedback_label_inds)
- if loss_fp.data[0] > 100000:
- raise Exception(
- "Loss might be diverging. Loss:", loss_fp.data[0]
- )
- self.backward(loss_fp, retain_graph=True)
-
- if self.opt['cuda']:
- label_inds = Variable(torch.cuda.LongTensor(label_inds))
- else:
- label_inds = Variable(torch.LongTensor(label_inds))
-
- if scores is None:
- output_embeddings = self.model(*inputs)
- scores = self.model.get_score(answer_cands, output_embeddings)
-
- predictions = self.ranked_predictions(answer_cands, scores)
-
- if is_training:
- update_params = True
- # don't perform regular training if in FP mode
- if self.model_setting == 'FP':
- update_params = False
- elif 'RBI' in self.model_setting:
- if len(self.rewarded_examples_inds) == 0:
- update_params = False
- else:
- self.rewarded_examples_inds = torch.LongTensor(
- self.rewarded_examples_inds
- )
- if self.opt['cuda']:
- self.rewarded_examples_inds = self.rewarded_examples_inds.cuda()
- # use only rewarded examples for training
- loss = self.loss_fn(
- scores[self.rewarded_examples_inds, :],
- label_inds[self.rewarded_examples_inds],
- )
- else:
- # regular IM training
- loss = self.loss_fn(scores, label_inds)
-
- if update_params:
- self.backward(loss)
- return predictions
-
- def ranked_predictions(self, cands, scores):
- _, inds = scores.data.sort(descending=True, dim=1)
- return [
- [cands[i][j] for j in r if j < len(cands[i])] for i, r in enumerate(inds)
- ]
-
- def decode(self, output_embeddings, ys=None):
- batchsize = output_embeddings.size(0)
- hn = output_embeddings.unsqueeze(0).expand(
- self.opt['rnn_layers'], batchsize, output_embeddings.size(1)
- )
- x = self.model.answer_embedder(
- Variable(torch.LongTensor([1])), Variable(self.START_TENSOR)
- )
- xes = x.unsqueeze(1).expand(x.size(0), batchsize, x.size(1))
-
- loss = 0
- output_lines = [[] for _ in range(batchsize)]
- done = [False for _ in range(batchsize)]
- total_done = 0
- idx = 0
- while (total_done < batchsize) and idx < self.longest_label:
- # keep producing tokens until we hit END or max length for each ex
- if self.opt['cuda']:
- xes = xes.cuda()
- hn = hn.contiguous()
- preds, scores = self.decoder(xes, hn)
- if ys is not None:
- y = Variable(ys[0][:, idx])
- temp_y = y.cuda() if self.opt['cuda'] else y
- loss += self.loss_fn(scores, temp_y)
- else:
- y = preds
- # use the true token as the next input for better training
- xes = self.model.answer_embedder(
- Variable(torch.LongTensor(preds.numel()).fill_(1)), y
- ).unsqueeze(0)
-
- for b in range(batchsize):
- if not done[b]:
- token = self.dict.vec2txt(preds.data[b])
- if token == self.END:
- done[b] = True
- total_done += 1
- else:
- output_lines[b].append(token)
- idx += 1
- return output_lines, loss
-
- def generated_predictions(self, output_lines):
- return [
- [' '.join(c for c in o if c != self.END and c != self.dict.null_token)]
- for o in output_lines
- ]
-
- def parse(self, text):
- """Returns:
- query = tensor (vector) of token indices for query
- query_length = length of query
- memory = tensor (matrix) where each row contains token indices for a memory
- memory_lengths = tensor (vector) with lengths of each memory
- """
- sp = text.split('\n')
- query_sentence = sp[-1]
- query = self.dict.txt2vec(query_sentence)
- query = torch.LongTensor(query)
- query_length = torch.LongTensor([len(query)])
-
- sp = sp[:-1]
- sentences = []
- for s in sp:
- sentences.extend(s.split('\t'))
- if len(sentences) == 0:
- sentences.append(self.dict.null_token)
-
- num_mems = min(self.mem_size, len(sentences))
- memory_sentences = sentences[-num_mems:]
- memory = [self.dict.txt2vec(s) for s in memory_sentences]
- memory = [torch.LongTensor(m) for m in memory]
- memory_lengths = torch.LongTensor([len(m) for m in memory])
- memory = torch.cat(memory)
- return (query, memory, query_length, memory_lengths)
-
- def batchify(self, obs):
- """Returns:
- xs = [memories, queries, memory_lengths, query_lengths]
- ys = [labels, label_lengths] (if available, else None)
- cands = list of candidates for each example in batch
- valid_inds = list of indices for examples with valid observations
- """
- exs = [ex for ex in obs if 'text' in ex]
- valid_inds = [i for i, ex in enumerate(obs) if 'text' in ex]
- if not exs:
- return [None] * 5
-
- if 'RBI' in self.model_setting:
- self.rewarded_examples_inds = [
- i
- for i, ex in enumerate(obs)
- if 'text' in ex and ex.get('reward', 0) > 0
- ]
-
- parsed = [self.parse(ex['text']) for ex in exs]
- queries = torch.cat([x[0] for x in parsed])
- memories = torch.cat([x[1] for x in parsed])
- query_lengths = torch.cat([x[2] for x in parsed])
- memory_lengths = torch.LongTensor(len(exs), self.mem_size).zero_()
- for i in range(len(exs)):
- if len(parsed[i][3]) > 0:
- memory_lengths[i, -len(parsed[i][3]) :] = parsed[i][3]
- xs = [memories, queries, memory_lengths, query_lengths]
-
- ys = None
- self.labels = [random.choice(ex['labels']) for ex in exs if 'labels' in ex]
-
- if len(self.labels) == len(exs):
- parsed = [self.dict.txt2vec(l) for l in self.labels]
- parsed = [torch.LongTensor(p) for p in parsed]
- label_lengths = torch.LongTensor([len(p) for p in parsed]).unsqueeze(1)
- self.longest_label = max(self.longest_label, label_lengths.max())
- padded = [
- torch.cat(
- (
- p,
- torch.LongTensor(self.longest_label - len(p)).fill_(
- self.END_TENSOR[0]
- ),
- )
- )
- for p in parsed
- ]
- labels = torch.stack(padded)
- ys = [labels, label_lengths]
-
- feedback_cands = []
- if 'FP' in self.model_setting:
- self.feedback_labels = [
- ex['feedback']
- for ex in exs
- if 'feedback' in ex and ex['feedback'] is not None
- ]
- self.feedback_cands = self.feedback_cands | set(self.feedback_labels)
-
- if (
- len(self.feedback_labels) == len(exs)
- and len(self.feedback_cands) > self.num_feedback_cands
- ):
- feedback_cands = [
- list(
- set(
- random.sample(self.feedback_cands, self.num_feedback_cands)
- + [feedback]
- )
- )
- for feedback in self.feedback_labels
- ]
-
- cands = [ex['label_candidates'] for ex in exs if 'label_candidates' in ex]
- # Use words in dict as candidates if no candidates are provided
- if len(cands) < len(exs):
- cands = build_cands(exs, self.dict)
- # Avoid rebuilding candidate list every batch if its the same
- if self.last_cands != cands:
- self.last_cands = cands
- self.last_cands_list = [list(c) for c in cands]
- cands = self.last_cands_list
- return xs, ys, cands, valid_inds, feedback_cands
-
- def batch_act(self, observations):
- batchsize = len(observations)
- batch_reply = [{'id': self.getID()} for _ in range(batchsize)]
-
- xs, ys, cands, valid_inds, feedback_cands = self.batchify(observations)
-
- if xs is None or len(xs[1]) == 0:
- return batch_reply
-
- # Either train or predict
- predictions = self.predict(xs, cands, ys, feedback_cands)
-
- for i in range(len(valid_inds)):
- batch_reply[valid_inds[i]]['text'] = predictions[i][0]
- batch_reply[valid_inds[i]]['text_candidates'] = predictions[i]
- return batch_reply
-
- def act(self):
- return self.batch_act([self.observation])[0]
-
- def save(self, path=None):
- path = self.opt.get('model_file', None) if path is None else path
-
- if path:
- checkpoint = {}
- checkpoint['memnn'] = self.model.state_dict()
- checkpoint['memnn_optim'] = self.optimizers['memnn'].state_dict()
- if self.decoder is not None:
- checkpoint['decoder'] = self.decoder.state_dict()
- checkpoint['decoder_optim'] = self.optimizers['decoder'].state_dict()
- checkpoint['longest_label'] = self.longest_label
- with open(path, 'wb') as write:
- torch.save(checkpoint, write)
-
- def load(self, path):
- with open(path, 'rb') as read:
- checkpoint = torch.load(read)
- self.model.load_state_dict(checkpoint['memnn'])
- self.optimizers['memnn'].load_state_dict(checkpoint['memnn_optim'])
- if self.decoder is not None:
- self.decoder.load_state_dict(checkpoint['decoder'])
- self.optimizers['decoder'].load_state_dict(checkpoint['decoder_optim'])
- self.longest_label = checkpoint['longest_label']
-
-
-def build_cands(exs, dict):
- dict_list = list(dict.tok2ind.keys())
- cands = []
- for ex in exs:
- if 'label_candidates' in ex:
- cands.append(ex['label_candidates'])
- else:
- cands.append(dict_list)
- if 'labels' in ex:
- cands[-1] += [l for l in ex['labels'] if l not in dict.tok2ind]
- return cands
diff --git a/projects/memnn_feedback/agent/modules.py b/projects/memnn_feedback/agent/modules.py
deleted file mode 100755
--- a/projects/memnn_feedback/agent/modules.py
+++ /dev/null
@@ -1,301 +0,0 @@
-#!/usr/bin/env python3
-
-# Copyright (c) Facebook, Inc. and its affiliates.
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import torch
-import torch.nn as nn
-from torch.autograd import Variable
-from torch.nn.functional import softmax
-
-from functools import lru_cache
-
-
-class MemNN(nn.Module):
- def __init__(self, opt, dictionary):
- super().__init__()
- self.opt = opt
- self.dict = dictionary
-
- # Prepare features
- self.num_time_features = opt['mem_size']
- num_features = len(self.dict)
- self.extra_features_slots = 0
- if opt['time_features']:
- self.time_features = torch.LongTensor(
- range(num_features, num_features + self.num_time_features)
- )
- num_features += self.num_time_features
- self.extra_features_slots += 1
-
- def embedding():
- return Embed(
- num_features,
- opt['embedding_size'],
- position_encoding=opt['position_encoding'],
- padding_idx=0,
- )
-
- self.answer_embedder = embedding()
-
- if not self.opt['single_embedder']:
- self.query_embedder = embedding()
- self.in_memory_embedder = embedding()
- self.out_memory_embedder = embedding()
- self.feedback_embedder = embedding()
-
- self.memory_hop = Hop(opt['embedding_size'])
-
- self.score = DotScore()
- self.linear = nn.Linear(
- opt['embedding_size'], opt['embedding_size'], bias=False
- )
-
- if opt['cuda']:
- self.score.cuda()
- self.memory_hop.cuda()
- self.linear.cuda()
-
- def time_feature(self, t):
- return self.time_features[min(t, self.num_time_features - 1)]
-
- def update_memories_with_extra_features_(self, memory_lengths, memories):
- memory_lengths = memory_lengths.data
- memories = memories.data
- if self.extra_features_slots > 0:
- num_nonempty_memories = memory_lengths.ne(0).sum()
- updated_memories = memories.new(
- memories.numel() + num_nonempty_memories * self.extra_features_slots
- )
- src_offset = 0
- dst_offset = 0
- for i in range(memory_lengths.size(0)):
- for j in range(self.opt['mem_size']):
- length = memory_lengths[i, j]
- if length > 0:
- if self.opt['time_features']:
- updated_memories[dst_offset] = self.time_feature(j)
- dst_offset += 1
- updated_memories[dst_offset : dst_offset + length] = memories[
- src_offset : src_offset + length
- ]
- src_offset += length
- dst_offset += length
- memory_lengths += memory_lengths.ne(0).long() * self.extra_features_slots
- memories.set_(updated_memories)
-
- def get_score(self, cands, output_embeddings, forward_predict=False):
- last_cand = None
- max_len = max([len(c) for c in cands])
- scores = Variable(output_embeddings.data.new(len(cands), max_len).zero_())
- for i, cand_list in enumerate(cands):
- if last_cand != cand_list:
- candidate_lengths, candidate_indices = to_tensors(cand_list, self.dict)
- candidate_lengths, candidate_indices = (
- Variable(candidate_lengths),
- Variable(candidate_indices),
- )
- candidate_embeddings = None
- if forward_predict and not self.opt['single_embedder']:
- candidate_embeddings = self.feedback_embedder(
- candidate_lengths, candidate_indices
- )
- else:
- candidate_embeddings = self.answer_embedder(
- candidate_lengths, candidate_indices
- )
- if self.opt['cuda']:
- candidate_embeddings = candidate_embeddings.cuda()
- last_cand = cand_list
- scores[i, : len(cand_list)] = self.score.one_to_many(
- output_embeddings[i].unsqueeze(0), candidate_embeddings
- )
- return scores
-
- def forward(
- self,
- memories,
- queries,
- memory_lengths,
- query_lengths,
- cand_answers=None,
- cands_embeddings_with_beta=None,
- ):
- self.update_memories_with_extra_features_(memory_lengths, memories)
-
- if self.opt['single_embedder']:
- in_memory_embeddings = self.answer_embedder(memory_lengths, memories)
- out_memory_embeddings = self.answer_embedder(memory_lengths, memories)
- query_embeddings = self.answer_embedder(query_lengths, queries)
- else:
- in_memory_embeddings = self.in_memory_embedder(memory_lengths, memories)
- out_memory_embeddings = self.out_memory_embedder(memory_lengths, memories)
- query_embeddings = self.query_embedder(query_lengths, queries)
-
- attention_mask = Variable(memory_lengths.data.ne(0), requires_grad=False)
-
- if self.opt['cuda']:
- in_memory_embeddings = in_memory_embeddings.cuda()
- out_memory_embeddings = out_memory_embeddings.cuda()
- query_embeddings = query_embeddings.cuda()
- attention_mask = attention_mask.cuda()
-
- for _ in range(self.opt['hops']):
- query_embeddings = self.memory_hop(
- query_embeddings,
- in_memory_embeddings,
- out_memory_embeddings,
- attention_mask,
- )
-
- # return updated query state if not in forward prediction mode
- if cands_embeddings_with_beta is None:
- return query_embeddings
-
- # hop over candidate answers
- scores = self.get_score(cand_answers, query_embeddings)
- probs = softmax(scores).unsqueeze(1)
-
- forward_prediction_output = torch.bmm(
- probs, cands_embeddings_with_beta
- ).squeeze(1)
- forward_prediction_output = forward_prediction_output + query_embeddings
- forward_prediction_output = self.linear(forward_prediction_output)
- return scores, forward_prediction_output
-
-
-class Embed(nn.Embedding):
- def __init__(self, *args, position_encoding=False, **kwargs):
- self.position_encoding = position_encoding
- super().__init__(*args, **kwargs)
-
- def forward(self, lengths, indices):
- lengths_mat = lengths.data
- indices = indices.data
- if lengths.dim() == 1 or lengths.size(1) == 1:
- lengths_mat = lengths_mat.squeeze().unsqueeze(0)
-
- input = torch.LongTensor(
- lengths_mat.size(0), lengths_mat.size(1), torch.max(lengths_mat)
- )
- pad = self.padding_idx if self.padding_idx is not None else 0
- input.fill_(pad)
- emb_list = []
- offset = 0
- for i, row in enumerate(lengths_mat):
- for j, length in enumerate(row):
- if length > 0:
- input[i, j, :length] = indices[offset : offset + length]
- offset += length
- input = Variable(input)
-
- for i, row in enumerate(lengths_mat):
- emb = super().forward(input[i, :, :])
- if self.position_encoding:
- emb = emb * Variable(self.position_tensor(row, emb))
- emb = torch.sum(emb, dim=1).squeeze(1)
- for j, length in enumerate(row):
- if length > 0:
- emb[j] /= length
- emb_list.append(emb)
- embs = torch.stack(emb_list)
-
- if lengths.dim() == 1:
- embs = embs.squeeze(0)
- elif lengths.size(1) == 1:
- embs = embs.squeeze().unsqueeze(1)
- return embs
-
- @staticmethod
- @lru_cache(maxsize=32)
- def position_matrix(J, d):
- m = torch.Tensor(J, d)
- for k in range(1, d + 1):
- for j in range(1, J + 1):
- m[j - 1, k - 1] = (1 - j / J) - (k / d) * (1 - 2 * j / J)
- return m
-
- @staticmethod
- def position_tensor(sentence_lengths, embeddings):
- t = torch.zeros(embeddings.size())
- embedding_dim = embeddings.size()[-1]
- for i, length in enumerate(sentence_lengths):
- if length > 0:
- t[i, :length, :] = Embed.position_matrix(length, embedding_dim)
- return t
-
-
-class Hop(nn.Module):
- def __init__(self, embedding_size):
- super(Hop, self).__init__()
- self.embedding_size = embedding_size
- self.linear = nn.Linear(embedding_size, embedding_size, bias=False)
-
- def forward(
- self,
- query_embeddings,
- in_memory_embeddings,
- out_memory_embeddings,
- attention_mask=None,
- ):
- attention = torch.bmm(
- in_memory_embeddings, query_embeddings.unsqueeze(2)
- ).squeeze(2)
- if attention_mask is not None:
- # exclude masked elements from the softmax
- attention = (
- attention_mask.float() * attention
- + (1 - attention_mask.float()) * -1e20
- )
- probs = softmax(attention).unsqueeze(1)
- memory_output = torch.bmm(probs, out_memory_embeddings).squeeze(1)
- query_embeddings = self.linear(query_embeddings)
- output = memory_output + query_embeddings
- return output
-
-
-class Decoder(nn.Module):
- def __init__(self, input_size, hidden_size, num_layers, opt, dictionary):
- super().__init__()
- self.dict = dictionary
- self.h2o = nn.Linear(hidden_size, len(dictionary))
- self.dropout = nn.Dropout(opt['dropout'])
- self.rnn = nn.GRU(input_size, hidden_size, num_layers)
-
- def hidden_to_idx(self, hidden, dropout=False):
- """
- Converts hidden state vectors into indices into the dictionary.
- """
- if hidden.size(0) > 1:
- raise RuntimeError('Bad dimensions of tensor:', hidden)
- hidden = hidden.squeeze(0)
- scores = self.h2o(hidden)
- if dropout:
- scores = self.dropout(scores)
- _, idx = scores.max(1)
- return idx, scores
-
- def forward(self, input, state):
- output, state = self.rnn(input, state)
- return self.hidden_to_idx(output, dropout=self.training)
-
-
-class DotScore(nn.Module):
- def one_to_one(self, query_embeddings, answer_embeddings, reply_embeddings=None):
- return (query_embeddings * answer_embeddings).sum(dim=1).squeeze(1)
-
- def one_to_many(self, query_embeddings, answer_embeddings, reply_embeddings=None):
- return query_embeddings.mm(answer_embeddings.t())
-
-
-def to_tensors(sentences, dictionary):
- lengths = []
- indices = []
- for sentence in sentences:
- tokens = dictionary.txt2vec(sentence)
- lengths.append(len(tokens))
- indices.extend(tokens)
- lengths = torch.LongTensor(lengths)
- indices = torch.LongTensor(indices)
- return lengths, indices
diff --git a/projects/memnn_feedback/tasks/dbll_babi/__init__.py b/projects/memnn_feedback/tasks/dbll_babi/__init__.py
deleted file mode 100644
--- a/projects/memnn_feedback/tasks/dbll_babi/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/usr/bin/env python3
-
-# Copyright (c) Facebook, Inc. and its affiliates.
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
diff --git a/projects/memnn_feedback/tasks/dbll_babi/agents.py b/projects/memnn_feedback/tasks/dbll_babi/agents.py
deleted file mode 100755
--- a/projects/memnn_feedback/tasks/dbll_babi/agents.py
+++ /dev/null
@@ -1,181 +0,0 @@
-#!/usr/bin/env python3
-
-# Copyright (c) Facebook, Inc. and its affiliates.
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-#
-# Accessing the tasks can be done with something like:
-#
-# parlai display_data -t "projects.memnn_feedback.tasks.dbll_babi.agents:taskTeacher:3_p0.5:feedback"
-#
-# which specifies task 2, and policy with 0.5 answers correct, see the paper
-# for more details: https://arxiv.org/abs/1604.06045
-
-from parlai.core.teachers import FbDialogTeacher
-from .build import build
-
-import copy
-import os
-
-tasks = {}
-tasks[1] = 'rl1_pure_imitation'
-tasks[2] = 'rl2_pos_neg'
-tasks[3] = 'rl3_with_ans'
-tasks[4] = 'rl4_with_hints'
-tasks[5] = 'rl5_told_sf'
-tasks[6] = 'rl6_only_some_rewards'
-tasks[7] = 'rl7_no_feedback'
-tasks[8] = 'rl8_imitation_plus_rl'
-tasks[9] = 'rl9_ask_for_answer'
-tasks[10] = 'rl10_ask_for_sf'
-
-_suffixes = {'train': 'train', 'test': 'test', 'valid': 'dev'}
-
-
-def _path(subdir, task, opt, dt=''):
- build(opt)
- if dt == '':
- dt = opt['datatype'].split(':')[0]
- task_name = '%s_%s' % (task.split('_')[1], tasks[int(task.split('_')[0])])
- return os.path.join(
- opt['datapath'],
- 'DBLL',
- 'dbll',
- '{subdir}_{task}_{suffix}.txt'.format(
- subdir=subdir, task=task_name, suffix=_suffixes[dt]
- ),
- )
-
-
-class TaskTeacher(FbDialogTeacher):
- def __init__(self, opt, shared=None):
- params = opt['task'].split(':')[2]
- opt = copy.deepcopy(opt)
- opt['datafile'] = _path(os.path.join('babi', 'babi1'), params, opt)
- opt['cands_datafile'] = _path(
- os.path.join('babi', 'babi1'), params, opt, 'train'
- )
- self.opt = opt
- super().__init__(opt, shared)
-
- def setup_data(self, path):
- """
- Reads feedback for an example along with text and labels if 'feedback' argument
- is specified.
- """
- if self.opt['task'].split(':')[-1] == 'feedback':
- return self.setup_data_with_feedback(path)
- else:
- return super().setup_data(path)
-
- def setup_data_with_feedback(self, path):
- """
- Reads data in the fbdialog format. This method is very similar to
- FbDialogTeacher.setup_data(..). The difference is that in this method the
- feedback is appended to the query from the current example; in the default setup
- the feedback is appended to the x from the next example.
-
- The data would look something like this:
-
- Mary moved to the bedroom.
- Mary travelled to the garden.
- Where is John?
- No, that's wrong.
- [labels: garden]
-
- To append feedback to the current example, modify the task name like this:
- parlai display_data -t dbll_babi:task:2_p0.5:f
- Default setup:
- parlai display_data -t dbll_babi:task:2_p0.5
- """
- print("[loading fbdialog data:" + path + "]")
- with open(path) as read:
- start = True
- x = ''
-
- y = None
-
- reward = 0
- dialog_index = 0
- read_feedback = False
- for line in read:
- line = line.strip().replace('\\n', '\n')
- if len(line) == 0:
- continue
-
- # first, get conversation index -- '1' means start of episode
- space_idx = line.find(' ')
- conv_id = line[:space_idx]
-
- # split line into constituent parts, if available:
- # x<tab>y<tab>reward<tab>label_candidates
- # where y, reward, and label_candidates are optional
- split = line[space_idx + 1 :].split('\t')
-
- # remove empty items and strip each one
- for i in range(len(split)):
- word = split[i].strip()
- if len(word) == 0:
- split[i] = ''
- else:
- split[i] = word
- # Empty reward string same as None
- if len(split) > 2 and split[2] == '':
- split[2] = None
-
- # now check if we're at a new episode
- if conv_id == '1':
- dialog_index += 1
- x = x.strip()
- if x:
- yield [x, None, reward], start
- start = True
- reward = 0
- # start a new episode
- if self.cloze:
- x = 'Fill in the blank in the last sentence.\n{x}'.format(
- x=split[0]
- )
- else:
- x = split[0]
- else:
- if x:
- # otherwise add current x to what we have so far
- x = '{x}\n{next_x}'.format(x=x, next_x=split[0])
- else:
- x = split[0]
- if len(split) > 2 and split[2]:
- reward += float(split[2])
-
- if len(split) > 1 and split[1]:
- read_feedback = True
- # split labels
- y = split[1].split('|')
-
- if read_feedback and not split[1]:
- split[0] = x
- split[1] = y
- if len(split) > 2:
- split[2] = reward
- else:
- split.append(reward)
- if start:
- yield split, True
- start = False
- else:
- yield split, False
- # reset x in case there is unlabeled data still left
- x = ''
- reward = 0
- y = None
- read_feedback = False
-
-
-# Defaults to task 2 with p=0.5.
-class DefaultTeacher(FbDialogTeacher):
- def __init__(self, opt, shared=None):
- task = '2_p0.5'
- opt = copy.deepcopy(opt)
- opt['datafile'] = _path(os.path.join('babi', 'babi1'), task, opt)
- opt['cands_datafile'] = _path(os.path.join('babi', 'babi1'), task, opt, 'train')
- super().__init__(opt, shared)
diff --git a/projects/memnn_feedback/tasks/dbll_babi/build.py b/projects/memnn_feedback/tasks/dbll_babi/build.py
deleted file mode 100644
--- a/projects/memnn_feedback/tasks/dbll_babi/build.py
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/usr/bin/env python3
-
-# Copyright (c) Facebook, Inc. and its affiliates.
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-# Download and build the data if it does not exist.
-
-import parlai.core.build_data as build_data
-import os
-
-
-def build(opt):
- dpath = os.path.join(opt['datapath'], 'DBLL')
- version = None
-
- if not build_data.built(dpath, version_string=version):
- print('[building data: ' + dpath + ']')
- if build_data.built(dpath):
- # An older version exists, so remove these outdated files.
- build_data.remove_dir(dpath)
- build_data.make_dir(dpath)
-
- # Download the data.
- fname = 'dbll.tgz'
- url = 'http://parl.ai/downloads/dbll/' + fname
- build_data.download(url, dpath, fname)
- build_data.untar(dpath, fname)
-
- # Mark the data as built.
- build_data.mark_done(dpath, version_string=version)
diff --git a/projects/memnn_feedback/tasks/dialog_babi_feedback/__init__.py b/projects/memnn_feedback/tasks/dialog_babi_feedback/__init__.py
deleted file mode 100644
--- a/projects/memnn_feedback/tasks/dialog_babi_feedback/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/usr/bin/env python3
-
-# Copyright (c) Facebook, Inc. and its affiliates.
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
diff --git a/projects/memnn_feedback/tasks/dialog_babi_feedback/agents.py b/projects/memnn_feedback/tasks/dialog_babi_feedback/agents.py
deleted file mode 100644
--- a/projects/memnn_feedback/tasks/dialog_babi_feedback/agents.py
+++ /dev/null
@@ -1,183 +0,0 @@
-#!/usr/bin/env python3
-
-# Copyright (c) Facebook, Inc. and its affiliates.
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-#
-# Accessing the tasks can be done with something like:
-#
-# parlai train_model --setting 'RBI' -m "projects.memnn_feedback.agent.memnn_feedback:MemnnFeedbackAgent"
-# -t "projects.memnn_feedback.tasks.dialog_babi_feedback.agents:taskTeacher:1_p0.5:feedback"
-#
-# which specifies task 1, and policy with 0.5 answers correct with reward-based learning, see the papers
-# for more details: https://arxiv.org/abs/1604.06045 and https://arxiv.org/abs/1605.07683
-
-from parlai.core.teachers import FbDialogTeacher
-from .build import build
-
-import os
-
-tasks = {}
-tasks[1] = 'rl1_API_calls_with_ans'
-tasks[2] = 'rl2_API_refine_with_ans'
-tasks[3] = 'rl3_options_with_ans'
-tasks[4] = 'rl4_phone_address_with_ans'
-tasks[5] = 'rl5_full_dialogs_with_ans'
-
-
-def _path(task, opt):
- # Build the data if it doesn't exist.
- build(opt)
- task_name = '%s_%s' % (task.split('_')[1], tasks[int(task.split('_')[0])])
- task = task.split('_')[0]
- task_name = 'dialog-babi_' + task_name
- prefix = os.path.join(
- opt['datapath'], 'dialog-bAbI-feedback', 'dialog-bAbI-feedback'
- )
- suffix = ''
- dt = opt['datatype'].split(':')[0]
- if dt == 'train':
- suffix = 'trn'
- elif dt == 'test':
- suffix = 'tst'
- elif dt == 'valid':
- suffix = 'dev'
- datafile = os.path.join(
- prefix, '{task}_{type}.txt'.format(task=task_name, type=suffix)
- )
-
- cands_datafile = os.path.join(prefix, 'dialog-babi-candidates.txt')
- return datafile, cands_datafile
-
-
-# The knowledge base of facts that can be used to answer questions.
-class KBTeacher(FbDialogTeacher):
- def __init__(self, opt, shared=None):
- build(opt)
- opt['datafile'] = os.path.join(
- opt['datapath'],
- 'dialog-bAbI-feedback',
- 'dialog-bAbI-feedback-tasks',
- 'dialog-babi-kb-all.txt',
- )
- super().__init__(opt, shared)
-
-
-# Single task.
-class TaskTeacher(FbDialogTeacher):
- def __init__(self, opt, shared=None):
- paths = _path(opt['task'].split(':')[2], opt)
- opt['datafile'], opt['cands_datafile'] = paths
- super().__init__(opt, shared)
-
- def setup_data(self, path):
- """
- Reads feedback for an example along with text and labels if 'feedback' argument
- is specified.
- """
- if self.opt['task'].split(':')[-1] == 'feedback':
- return self.setup_data_with_feedback(path)
- else:
- return super().setup_data(path)
-
- def setup_data_with_feedback(self, path):
- """
- Reads data in the fbdialog format. This method is very similar to
- FbDialogTeacher.setup_data(..). The difference is that in this method the
- feedback is appended to the query from the current example; in the default setup
- the feedback is appended to the x from the next example.
-
- The data would look something like this:
-
- Mary moved to the bedroom.
- Mary travelled to the garden.
- Where is John?
- No, that's wrong.
- [labels: garden]
-
- To append feedback to the current example, modify the task name like this:
- parlai display_data -t dbll_babi:task:2_p0.5:f
- Default setup:
- parlai display_data -t dbll_babi:task:2_p0.5
- """
- print("[loading fbdialog data:" + path + "]")
- with open(path) as read:
- start = True
- x = ''
-
- y = None
-
- reward = 0
- dialog_index = 0
- read_feedback = False
- for line in read:
- line = line.strip().replace('\\n', '\n')
- if len(line) == 0:
- continue
-
- # first, get conversation index -- '1' means start of episode
- space_idx = line.find(' ')
- conv_id = line[:space_idx]
-
- # split line into constituent parts, if available:
- # x<tab>y<tab>reward<tab>label_candidates
- # where y, reward, and label_candidates are optional
- split = line[space_idx + 1 :].split('\t')
-
- # remove empty items and strip each one
- for i in range(len(split)):
- word = split[i].strip()
- if len(word) == 0:
- split[i] = ''
- else:
- split[i] = word
- # Empty reward string same as None
- if len(split) > 2 and split[2] == '':
- split[2] = None
-
- # now check if we're at a new episode
- if conv_id == '1':
- dialog_index += 1
- x = x.strip()
- if x:
- yield [x, None, reward], start
- start = True
- reward = 0
- # start a new episode
- if self.cloze:
- x = 'Fill in the blank in the last sentence.\n{x}'.format(
- x=split[0]
- )
- else:
- x = split[0]
- else:
- if x:
- # otherwise add current x to what we have so far
- x = '{x}\n{next_x}'.format(x=x, next_x=split[0])
- else:
- x = split[0]
- if len(split) > 2 and split[2]:
- reward += float(split[2])
-
- if len(split) > 1 and split[1]:
- read_feedback = True
- # split labels
- y = split[1].split('|')
-
- if read_feedback and not split[1]:
- split[0] = x
- split[1] = y
- if len(split) > 2:
- split[2] = reward
- else:
- split.append(reward)
- if start:
- yield split, True
- start = False
- else:
- yield split, False
- # reset x in case there is unlabeled data still left
- x = ''
- reward = 0
- y = None
- read_feedback = False
diff --git a/projects/memnn_feedback/tasks/dialog_babi_feedback/build.py b/projects/memnn_feedback/tasks/dialog_babi_feedback/build.py
deleted file mode 100644
--- a/projects/memnn_feedback/tasks/dialog_babi_feedback/build.py
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/usr/bin/env python3
-
-# Copyright (c) Facebook, Inc. and its affiliates.
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-# Download and build the data if it does not exist.
-
-import parlai.core.build_data as build_data
-import os
-
-
-def build(opt):
- dpath = os.path.join(opt['datapath'], 'dialog-babi-feedback')
- version = None
-
- if not build_data.built(dpath, version_string=version):
- print('[building data: ' + dpath + ']')
- if build_data.built(dpath):
- # An older version exists, so remove these outdated files.
- build_data.remove_dir(dpath)
- build_data.make_dir(dpath)
-
- # Download the data.
- fname = 'dialog-babi-feedback.zip'
- url = ' http://parl.ai/downloads/dialog-babi-feedback/' + fname
-
- build_data.download(url, dpath, fname)
- build_data.untar(dpath, fname)
-
- # Mark the data as built.
- build_data.mark_done(dpath, version_string=version)
diff --git a/projects/personachat/__init__.py b/projects/personachat/__init__.py
deleted file mode 100644
--- a/projects/personachat/__init__.py
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/usr/bin/env python3
-
-# Copyright (c) Facebook, Inc. and its affiliates.
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-import sys
-
-if sys.version_info < (3, 0):
- raise RuntimeError('ParlAI requires Python 3.')
diff --git a/projects/personachat/kvmemnn/__init__.py b/projects/personachat/kvmemnn/__init__.py
deleted file mode 100644
--- a/projects/personachat/kvmemnn/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/usr/bin/env python3
-
-# Copyright (c) Facebook, Inc. and its affiliates.
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
diff --git a/projects/personachat/kvmemnn/kvmemnn.py b/projects/personachat/kvmemnn/kvmemnn.py
deleted file mode 100644
--- a/projects/personachat/kvmemnn/kvmemnn.py
+++ /dev/null
@@ -1,940 +0,0 @@
-#!/usr/bin/env python3
-
-# Copyright (c) Facebook, Inc. and its affiliates.
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-from parlai.core.agents import Agent
-from parlai.core.dict import DictionaryAgent
-from parlai.utils.misc import round_sigfigs # , maintain_dialog_history
-
-from .modules import Kvmemnn
-
-import torch
-from torch.autograd import Variable
-from torch import optim
-import torch.nn as nn
-import time
-from collections import deque
-
-import copy
-import os
-import random
-import pickle
-
-
-def maintain_dialog_history(
- history,
- observation,
- reply='',
- historyLength=1,
- useReplies="labels",
- dict=None,
- useStartEndIndices=True,
- usePersonas=True,
-):
- """
- Keeps track of dialog history, up to a truncation length.
-
- Either includes replies from the labels, model, or not all using param 'replies'.
- """
-
- def parse(txt):
- txt = txt.lower()
- txt = txt.replace("n't", " not")
- if dict is not None:
- vec = dict.txt2vec(txt)
- if useStartEndIndices:
- parsed_x = deque([dict[dict.start_token]])
- parsed_x.extend(vec)
- parsed_x.append(dict[dict.end_token])
- return parsed_x
- else:
- return vec
- else:
- return [txt]
-
- if 'dialog' not in history:
- history['dialog'] = deque(maxlen=historyLength)
- history['persona'] = []
- history['episode_done'] = False
- history['labels'] = []
-
- if history['episode_done']:
- history['dialog'].clear()
- history['persona'] = []
- history['labels'] = []
- history['episode_done'] = False
-
- # we only keep the last one..that works well for IR model, so..
- history['dialog'].clear()
-
- if useReplies != 'none':
- if len(history['labels']) > 0:
- r = history['labels'][0]
- history['dialog'].extend(parse(r))
- else: # if useReplies == 'model':
- if reply != '':
- history['dialog'].extend(parse(reply))
-
- if 'text' in observation:
- txts = observation['text'].split('\n')
- for txt in txts:
- if usePersonas and 'persona:' in txt:
- history['persona'].append(
- Variable(torch.LongTensor(parse(txt)).unsqueeze(0))
- )
- else:
- utt = parse(txt)
- history['dialog'].extend(utt)
- history['last_utterance'] = utt
-
- history['episode_done'] = observation['episode_done']
- if 'labels' in observation:
- history['labels'] = observation['labels']
- elif 'eval_labels' in observation:
- history['labels'] = observation['eval_labels']
-
- return history['dialog'], history['persona']
-
-
-def load_cands(path):
- """
- Load global fixed set of candidate labels that the teacher provides every example
- (the true labels for a specific example are also added to this set, so that it's
- possible to get the right answer).
- """
- if path is None:
- return None
- cands = []
- lines_have_ids = False
- cands_are_replies = False
- cnt = 0
- with open(path) as read:
- for line in read:
- line = line.strip().replace('\\n', '\n')
- if len(line) > 0:
- cnt = cnt + 1
- # If lines are numbered we strip them of numbers.
- if cnt == 1 and line[0:2] == '1 ':
- lines_have_ids = True
- # If tabs then the label_candidates are all the replies.
- if '\t' in line and not cands_are_replies:
- cands_are_replies = True
- cands = []
- if lines_have_ids:
- space_idx = line.find(' ')
- line = line[space_idx + 1 :]
- if cands_are_replies:
- sp = line.split('\t')
- if len(sp) > 1 and sp[1] != '':
- cands.append(sp[1])
- else:
- cands.append(line)
- else:
- cands.append(line)
- return cands
-
-
-class KvmemnnAgent(Agent):
- """
- Simple implementation of the memnn algorithm with 1 hop.
- """
-
- OPTIM_OPTS = {
- 'adadelta': optim.Adadelta, # type: ignore
- 'adagrad': optim.Adagrad, # type: ignore
- 'adam': optim.Adam,
- 'adamax': optim.Adamax, # type: ignore
- 'asgd': optim.ASGD, # type: ignore
- 'lbfgs': optim.LBFGS, # type: ignore
- 'rmsprop': optim.RMSprop, # type: ignore
- 'rprop': optim.Rprop, # type: ignore
- 'sgd': optim.SGD,
- }
-
- @staticmethod
- def dictionary_class():
- return DictionaryAgent
-
- @staticmethod
- def add_cmdline_args(argparser):
- """
- Add command-line arguments specifically for this agent.
- """
- KvmemnnAgent.dictionary_class().add_cmdline_args(argparser)
- agent = argparser.add_argument_group('Kvmemnn Arguments')
- agent.add_argument('--hops', type=int, default=1, help='num hops')
- agent.add_argument(
- '--lins', type=int, default=0, help='num lins projecting after hops'
- )
- agent.add_argument(
- '-esz',
- '--embeddingsize',
- type=int,
- default=128,
- help='size of the token embeddings',
- )
- agent.add_argument(
- '-enorm',
- '--embeddingnorm',
- type=float,
- default=10,
- help='max norm of word embeddings',
- )
- agent.add_argument(
- '-shareEmb',
- '--share-embeddings',
- type='bool',
- default=True,
- help='whether LHS and RHS share embeddings',
- )
- agent.add_argument(
- '-lr', '--learningrate', type=float, default=0.005, help='learning rate'
- )
- agent.add_argument(
- '-margin', '--margin', type=float, default=0.3, help='margin'
- )
- agent.add_argument(
- '-loss', '--loss', default='cosine', choices={'cosine', 'nll'}
- )
- agent.add_argument(
- '-opt',
- '--optimizer',
- default='sgd',
- choices=KvmemnnAgent.OPTIM_OPTS.keys(),
- help='Choose between pytorch optimizers. '
- 'Any member of torch.optim is valid and will '
- 'be used with default params except learning '
- 'rate (as specified by -lr).',
- )
- agent.add_argument(
- '-tr',
- '--truncate',
- type=int,
- default=-1,
- help='truncate input & output lengths to speed up '
- 'training (may reduce accuracy). This fixes all '
- 'input and output to have a maximum length.',
- )
- agent.add_argument(
- '-k',
- '--neg-samples',
- type=int,
- default=10,
- help='number k of negative samples per example',
- )
- agent.add_argument(
- '--parrot-neg', type=int, default=0, help='include query as a negative'
- )
- agent.add_argument(
- '--take-next-utt', type='bool', default=False, help='take next utt'
- )
- agent.add_argument(
- '--twohop-range',
- type=int,
- default=100,
- help='2 hop range constraint for num rescored utterances',
- )
- agent.add_argument(
- '--twohop-blend',
- type=float,
- default=0,
- help='2 hop blend in the first hop scores if > 0',
- )
- agent.add_argument(
- '--kvmemnn-debug',
- type='bool',
- default=False,
- help='print debug information',
- )
- agent.add_argument(
- '--tfidf',
- type='bool',
- default=False,
- help='Use frequency based normalization for embeddings.',
- )
- agent.add_argument(
- '-cs',
- '--cache-size',
- type=int,
- default=1000,
- help='size of negative sample cache to draw from',
- )
- agent.add_argument(
- '-hist',
- '--history-length',
- default=100,
- type=int,
- help='Number of past tokens to remember. ',
- )
- agent.add_argument(
- '-histr',
- '--history-replies',
- default='label',
- type=str,
- choices=['none', 'model', 'label'],
- help='Keep replies in the history, or not.',
- )
- agent.add_argument(
- '--interactive-mode', default=False, type='bool', choices=[True, False]
- )
- agent.add_argument(
- '--loadcands',
- type='bool',
- default=True,
- help='Load candidates to rank from .candspair files, or not.',
- )
-
- def __init__(self, opt, shared=None):
- """
- Set up model if shared params not set, otherwise no work to do.
- """
- super().__init__(opt, shared)
- opt = self.opt
- if opt.get('batchsize', 1) > 1:
- raise RuntimeError(
- 'Kvmemnn model does not support batchsize > 1, '
- 'try training with numthreads > 1 instead.'
- )
- self.reset_metrics()
- # all instances needs truncate param
- self.id = 'Kvmemnn'
- self.NULL_IDX = 0
- self.start2 = 99
- # set up tensors once
- self.cands = torch.LongTensor(1, 1, 1)
- self.ys_cache = []
- self.ys_cache_sz = opt['cache_size']
- self.truncate = opt['truncate'] if opt['truncate'] > 0 else None
- self.history = {}
- if shared:
- torch.set_num_threads(1)
- if 'threadindex' in shared:
- self.threadindex = shared['threadindex']
- else:
- self.threadindex = 1
- # set up shared properties
- self.dict = shared['dict']
- # answers contains a batch_size list of the last answer produced
- self.model = shared['model'] # Kvmemnn(opt, len(self.dict))
- if 'fixedX' in shared:
- self.fixedX = shared['fixedX']
- self.fixedCands = shared['fixedCands']
- self.fixedCands_txt = shared['fixedCands_txt']
- self.fixedCands2 = shared['fixedCands2']
- self.fixedCands_txt2 = shared['fixedCands_txt2']
- else:
- print("[ creating KvmemnnAgent ]")
- # this is not a shared instance of this class, so do full init
- self.threadindex = -1
- torch.set_num_threads(1)
-
- if (opt['dict_file'] is None and opt.get('model_file')) or os.path.isfile(
- opt['model_file'] + '.dict'
- ):
- # set default dict-file if not set
- opt['dict_file'] = opt['model_file'] + '.dict'
- # load dictionary and basic tokens & vectors
- self.dict = DictionaryAgent(opt)
- if 'loss' not in opt:
- opt['loss'] = 'cosine'
- self.model = Kvmemnn(opt, len(self.dict), self.dict)
- if opt.get('model_file') and os.path.isfile(opt['model_file']):
- self.load(opt['model_file'])
- self.model.share_memory()
-
- self.fixedCands = False
- self.fixedX = None
- path = opt['model_file'] + '.candspair'
- if os.path.isfile(path) and opt.get('loadcands') is not False:
- print("[loading candidates: " + path + "*]")
- fc = load_cands(path)
- fcs = []
- for c in fc:
- fcs.append(Variable(torch.LongTensor(self.parse(c)).unsqueeze(0)))
- self.fixedCands = fcs
- self.fixedCands_txt = fc
- fc2 = load_cands(path + "2")
- fcs2 = []
- for c2 in fc2:
- fcs2.append(Variable(torch.LongTensor(self.parse(c2)).unsqueeze(0)))
- self.fixedCands2 = fcs2
- self.fixedCands_txt2 = fc2
- print("[caching..]")
- xsq = Variable(torch.LongTensor([self.parse('nothing')]))
- xe, ye = self.model(xsq, [], None, self.fixedCands)
- self.fixedX = ye
- print("=init done=")
-
- if self.opt['loss'] == 'cosine':
- self.criterion = torch.nn.CosineEmbeddingLoss(
- margin=opt['margin'], size_average=False
- )
- elif self.opt['loss'] == 'nll':
- self.criterion = nn.CrossEntropyLoss(ignore_index=-100)
- else:
- raise RuntimeError('unspecified loss')
- # self.criterion = torch.nn.MultiMarginLoss(p=1, margin=0.1)
- self.reset()
- # can be used to look at embeddings:
- # self.dict_neighbors('coffee')
- self.take_next_utt = True
- self.cands_done = []
- if 'interactive_mode' in opt:
- self.interactiveMode = self.opt['interactive_mode']
- else:
- self.interactiveMode = False
- if self.interactiveMode:
- print("[ Interactive mode ]")
-
- def override_opt(self, new_opt):
- """
- Set overridable opts from loaded opt file.
-
- Print out each added key and each overriden key. Only override args specific to
- the model.
- """
- model_args = {
- 'hiddensize',
- 'embeddingsize',
- 'numlayers',
- 'optimizer',
- 'encoder',
- 'decoder',
- 'lookuptable',
- 'attention',
- 'attention_length',
- 'fixed_candidates_file',
- }
- for k, v in new_opt.items():
- if k not in model_args:
- # skip non-model args
- continue
- if k not in self.opt:
- print('Adding new option [ {k}: {v} ]'.format(k=k, v=v))
- elif self.opt[k] != v:
- print(
- 'Overriding option [ {k}: {old} => {v}]'.format(
- k=k, old=self.opt[k], v=v
- )
- )
- self.opt[k] = v
- return self.opt
-
- def parse(self, text):
- """
- Convert string to token indices.
- """
- text = text.lower()
- text = text.replace("n't", " not")
- vec = self.dict.txt2vec(text)
- if vec == []:
- vec = [self.dict[self.dict.null_token]]
- return vec
-
- def t2v(self, text):
- p = self.dict.txt2vec(text)
- return Variable(torch.LongTensor(p).unsqueeze(1))
-
- def v2t(self, vec):
- """
- Convert token indices to string of tokens.
- """
- if type(vec) == Variable:
- vec = vec.data
- if type(vec) == torch.LongTensor and vec.dim() == 2:
- vec = vec.squeeze(0)
- if type(vec) == torch.Tensor and vec.dim() == 2:
- vec = vec.squeeze(0)
- new_vec = []
- for i in vec:
- new_vec.append(i)
- return self.dict.vec2txt(new_vec)
-
- def zero_grad(self):
- """
- Zero out optimizer.
- """
- self.optimizer.zero_grad()
-
- def update_params(self):
- """
- Do one optimization step.
- """
- self.optimizer.step()
-
- def reset(self):
- """
- Reset observation and episode_done.
- """
- self.observation = None
- self.episode_done = True
- self.cands_done = []
- self.history = {}
- # set up optimizer
- lr = self.opt['learningrate']
- optim_class = KvmemnnAgent.OPTIM_OPTS[self.opt['optimizer']]
- kwargs = {'lr': lr}
- self.optimizer = optim_class(self.model.parameters(), **kwargs)
-
- def share(self):
- """
- Share internal states between parent and child instances.
- """
- shared = super().share()
- shared['dict'] = self.dict
- shared['model'] = self.model
- if self.fixedX is not None:
- shared['fixedX'] = self.fixedX
- shared['fixedCands'] = self.fixedCands
- shared['fixedCands_txt'] = self.fixedCands_txt
- shared['fixedCands2'] = self.fixedCands2
- shared['fixedCands_txt2'] = self.fixedCands_txt2
- return shared
-
- def observe(self, observation):
- self.episode_done = observation['episode_done']
- # shallow copy observation (deep copy can be expensive)
- obs = observation.copy()
- obs['query'], obs['mem'] = maintain_dialog_history(
- self.history,
- obs,
- historyLength=self.opt['history_length'],
- useReplies=self.opt['history_replies'],
- dict=self.dict,
- useStartEndIndices=False,
- )
- self.observation = obs
- return obs
-
- def report2(self):
- def clip(f):
- return round_sigfigs(f)
-
- metrics = self.metrics
- if metrics['exs'] == 0:
- report = {'mean_rank': self.opt['neg_samples']}
- else:
- maxn = 0
- for _ in range(100):
- n = self.model.lt.weight[5].norm(2)[0].item()
- if n > maxn:
- maxn = n
-
- report = {
- 'exs': clip(metrics['total_total']),
- 'loss': clip(metrics['loss'] / metrics['exs']),
- 'mean_rank': clip(metrics['mean_rank'] / metrics['exs']),
- 'mlp_time': clip(metrics['mlp_time'] / metrics['exs']),
- 'tot_time': clip(metrics['tot_time'] / metrics['exs']),
- 'max_norm': clip(n),
- }
- return report
-
- def reset_metrics(self, keep_total=False):
- if keep_total:
- self.metrics = {
- 'exs': 0,
- 'mean_rank': 0,
- 'loss': 0,
- 'total_total': self.metrics['total_total'],
- 'mlp_time': 0,
- 'tot_time': 0,
- 'max_weight': 0,
- 'mean_weight': 0,
- }
- else:
- self.metrics = {
- 'total_total': 0,
- 'mean_rank': 0,
- 'exs': 0,
- 'mlp_time': 0,
- 'tot_time': 0,
- 'loss': 0,
- 'max_weight': 0,
- 'mean_weight': 0,
- }
-
- def compute_metrics(self, loss, scores, mlp_time, non_mlp_time):
- metrics = {}
- pos = scores[0]
- cnt = 0
- for i in range(1, len(scores)):
- if scores[i] >= pos:
- cnt += 1
- metrics['mean_rank'] = cnt
- metrics['loss'] = loss
- metrics['tot_time'] = mlp_time + non_mlp_time
- metrics['mlp_time'] = mlp_time
- return metrics
-
- def same(self, y1, y2):
- """
- Check if two tensors are the same, within small margin of error.
- """
- if len(y1) != len(y2):
- return False
- if abs((y1 - y2).sum().data.sum()) > 0.00001:
- return False
- return True
-
- def get_negs(self, xs, ys):
- negs = []
- # for neg in self.ys_cache:
- cache_sz = len(self.ys_cache) - 1
- if cache_sz < 1:
- return negs
- k = self.opt['neg_samples']
- for _ in range(1, k * 3):
- index = random.randint(0, cache_sz)
- neg = self.ys_cache[index]
- if not self.same(ys.squeeze(0), neg.squeeze(0)):
- negs.append(neg)
- if len(negs) >= k:
- break
- if self.opt['parrot_neg'] > 0:
- utt = self.history['last_utterance']
- if len(utt) > 2:
- query = Variable(torch.LongTensor(utt).unsqueeze(0))
- negs.append(query)
- return negs
-
- def dict_neighbors(self, word, useRHS=False):
- input = self.t2v(word)
- W = self.model.encoder.lt.weight
- q = W[input[0].item()]
- if useRHS:
- W = self.model.encoder2.lt.weight
- score = torch.Tensor(W.size(0))
- for i in range(W.size(0)):
- score[i] = torch.nn.functional.cosine_similarity(q, W[i], dim=0)[0].item()
- val, ind = score.sort(descending=True)
- for i in range(20):
- print(
- str(ind[i])
- + " ["
- + str(val[i])
- + "]: "
- + self.v2t(torch.Tensor([ind[i]]))
- )
-
- def predict(self, xs, ys=None, cands=None, cands_txt=None, obs=None):
- """
- Produce a prediction from our model.
-
- Update the model using the targets if available, otherwise rank candidates as
- well if they are available and param is set.
- """
- self.start = time.time()
- if xs is None:
- return [{}]
- is_training = ys is not None
- if is_training: #
- negs = self.get_negs(xs, ys)
- if len(negs) > 0:
- self.model.train()
- self.zero_grad()
- if self.opt['loss'] == 'cosine':
- xe, ye = self.model(xs, obs[0]['mem'], ys, negs)
- y = Variable(-torch.ones(xe.size(0)))
- y[0] = 1
- loss = self.criterion(xe, ye, y)
- else:
- x = self.model(xs, obs[0]['mem'], ys, negs)
- y = Variable(torch.LongTensor([0]))
- loss = self.criterion(x.unsqueeze(0), y)
- loss.backward()
- self.update_params()
- rest = 0
- if self.start2 != 99:
- rest = self.start - self.start2
- self.start2 = time.time()
- if self.opt['loss'] == 'cosine':
- pred = nn.CosineSimilarity().forward(xe, ye)
- else:
- pred = x
- metrics = self.compute_metrics(
- loss.item(), pred.squeeze(0), self.start2 - self.start, rest
- )
- return [{'metrics': metrics}]
- else:
- fixed = False
- if hasattr(self, 'fixedCands') and self.fixedCands:
- self.take_next_utt = True
- self.twohoputt = True
- self.tricks = True
- else:
- self.take_next_utt = False
- self.twohoputt = False
- self.tricks = False
- if cands is None or cands[0] is None or self.take_next_utt:
- # cannot predict without candidates.
- if self.fixedCands or self.take_next_utt:
- cands_txt2 = [self.fixedCands_txt2]
- fixed = True
- else:
- return [{}]
- # test set prediction uses candidates
- self.model.eval()
- if fixed:
- if obs[0]['episode_done']:
- self.cands_done = []
-
- if xs is None:
- xs = Variable(torch.LongTensor([self.parse('nothing')]))
- xs = xs.clone()
- if self.tricks:
- vv = self.history['last_utterance']
- if len(vv) == 0:
- xsq = Variable(torch.LongTensor([self.parse('nothing')]))
- else:
- xsq = Variable(torch.LongTensor([vv]))
- else:
- xsq = xs
- mems = obs[0]['mem']
- if self.tricks:
- mems = []
- if self.fixedX is None:
- xe, ye = self.model(xsq, mems, ys, self.fixedCands)
- self.fixedX = ye
- else:
- # fixed cand embed vectors are cached, dont't recompute
- blah = Variable(torch.LongTensor([1]))
- xe, ye = self.model(xsq, mems, ys, [blah])
- ye = self.fixedX
- pred = nn.CosineSimilarity().forward(xe, ye)
- origxe = xe
- origpred = pred
- val, ind = pred.sort(descending=True)
- ypred = cands_txt2[0][ind[0].item()] # reply to match
- if self.opt.get('kvmemnn_debug', False):
- print("twohop-range:", self.opt.get('twohop_range', 100))
- for i in range(10):
- txt1 = self.fixedCands_txt[ind[i].item()]
- txt2 = cands_txt2[0][ind[i].item()]
- print(i, txt1, '\n ', txt2)
- tc = [ypred]
- if self.twohoputt:
- # now we rerank original cands against this prediction
- zq = []
- z = []
- ztxt = []
- newwords = {}
- r = self.opt.get('twohop_range', 100)
- for i in range(r):
- c = self.fixedCands2[ind[i].item()]
- ctxt = self.fixedCands_txt2[ind[i].item()]
- if i < 10:
- zq.append(c)
- z.append(c)
- ztxt.append(ctxt)
- for w in c[0]:
- newwords[w.item()] = True
- xs2 = torch.cat(zq, 1)
-
- if (self.interactiveMode and self.twohoputt) or cands[0] is None:
- # used for nextutt alg in demo mode, get 2nd hop
- blah = Variable(torch.LongTensor([1]))
- if self.tricks:
- xe, ye = self.model(xs2, obs[0]['mem'], ys, z)
- else:
- xe, ye = self.model(xs2, obs[0]['mem'], ys, [blah])
- ye = self.fixedX
- blend = self.opt.get('twohop_blend', 0)
- if blend > 0:
- xe = (1 - blend) * xe + blend * origxe
- pred = nn.CosineSimilarity().forward(xe, ye)
- for c in self.cands_done:
- for i in range(len(ztxt)):
- if ztxt[i] == c:
- # interactive heuristic: don't repeat yourself
- pred[i] = -1000
- val, ind = pred.sort(descending=True)
- # predict the highest scoring candidate, and return it.
- # print(" [query: " + self.v2t(xsq) + "]")
- ps = []
- for c in obs[0]['mem']:
- ps.append(self.v2t(c))
- # print(" [persona: " + '|'.join(ps) + "]")
- # print(" [1st hop qmatch: " + ypredorig + "]")
- # print(" [1st hop nextut: " + ypred + "]")
- if self.tricks:
- ypred = ztxt[ind[0].item()] # match
- self.cands_done.append(ypred)
- else:
- ypred = self.fixedCands_txt[ind[0].item()] # match
- self.cands_done.append(ind[0].item())
- # print(" [2nd hop nextut: " + ypred2 + "]")
- tc = [ypred]
- self.history['labels'] = [ypred]
- # print(" [final pred: " + ypred + "]")
- ret = [{'text': ypred, 'text_candidates': tc}]
- return ret
- elif self.take_next_utt and not self.interactiveMode:
- xe, ye = self.model(xs2, obs[0]['mem'], ys, cands[0])
- pred = nn.CosineSimilarity().forward(xe, ye)
- xe, ye = self.model(xs, obs[0]['mem'], ys, cands[0])
- origpred = nn.CosineSimilarity().forward(xe, ye)
- if 'alpha' not in self.opt:
- alpha = 0.1
- else:
- alpha = self.opt['alpha']
- pred = alpha * pred + 1 * origpred
- val, ind = pred.sort(descending=True)
- # predict the highest scoring candidate, and return it.
- ypred = cands_txt[0][ind[0].item()] # match
- tc = []
- for i in range(len(ind)):
- tc.append(cands_txt[0][ind[i].item()])
- else:
- if self.opt['loss'] == 'cosine':
- xe, ye = self.model(xs, obs[0]['mem'], ys, cands[0])
- pred = nn.CosineSimilarity().forward(xe, ye)
- else:
- x = self.model(xs, obs[0]['mem'], ys, cands[0])
- pred = x # .squeeze()
- val, ind = pred.sort(descending=True)
- ypred = cands_txt[0][ind[0].item()] # match
- tc = []
- for i in range(min(100, ind.size(0))):
- tc.append(cands_txt[0][ind[i].item()])
- ret = [{'text': ypred, 'text_candidates': tc}]
- return ret
- return [{}] * xs.size(0)
-
- def batchify(self, observations):
- """
- Convert a list of observations into input & target tensors.
- """
-
- def valid(obs):
- # check if this is an example our model should actually process
- return 'query' in obs and len(obs['query']) > 0
-
- try:
- # valid examples and their indices
- valid_inds, exs = zip(
- *[(i, ex) for i, ex in enumerate(observations) if valid(ex)]
- )
- except ValueError:
- # zero examples to process in this batch, so zip failed to unpack
- return None, None, None, None
-
- # `x` text is already tokenized and truncated
- # sort by length so we can use pack_padded
- parsed_x = [ex['query'] for ex in exs]
- x_lens = [len(x) for x in parsed_x]
- ind_sorted = sorted(range(len(x_lens)), key=lambda k: -x_lens[k])
-
- exs = [exs[k] for k in ind_sorted]
- valid_inds = [valid_inds[k] for k in ind_sorted]
- parsed_x = [parsed_x[k] for k in ind_sorted]
-
- labels_avail = any(['labels' in ex for ex in exs])
-
- max_x_len = max([len(x) for x in parsed_x])
- for x in parsed_x:
- x += [self.NULL_IDX] * (max_x_len - len(x))
- xs = torch.LongTensor(parsed_x)
- xs = Variable(xs)
-
- # set up the target tensors
- ys = None
- labels = None
- if labels_avail:
- # randomly select one of the labels to update on, if multiple
- labels = [random.choice(ex.get('labels', [''])) for ex in exs]
- # parse each label and append END
- parsed_y = [deque(maxlen=self.truncate) for _ in labels]
- for dq, y in zip(parsed_y, labels):
- dq.extendleft(reversed(self.parse(y)))
- max_y_len = max(len(y) for y in parsed_y)
- for y in parsed_y:
- y += [self.NULL_IDX] * (max_y_len - len(y))
- if len(parsed_y[0]) == 0:
- return None, None, None, None
- else:
- ys = torch.LongTensor(parsed_y)
- ys = Variable(ys)
-
- cands = []
- cands_txt = []
- if ys is None:
- # only build candidates in eval mode.
- for o in observations:
- if 'label_candidates' in o and o['label_candidates'] is not None:
- cs = []
- ct = []
- for c in o['label_candidates']:
- cs.append(
- Variable(torch.LongTensor(self.parse(c)).unsqueeze(0))
- )
- ct.append(c)
- cands.append(cs)
- cands_txt.append(ct)
- else:
- cands.append(None)
- cands_txt.append(None)
- return xs, ys, cands, cands_txt
-
- def add_to_ys_cache(self, ys):
- if ys is None or len(ys) == 0:
- return
- if len(self.ys_cache) < self.ys_cache_sz:
- self.ys_cache.append(copy.deepcopy(ys))
- else:
- ind = random.randint(0, self.ys_cache_sz - 1)
- self.ys_cache[ind] = copy.deepcopy(ys)
-
- def batch_act(self, observations):
- batchsize = len(observations)
- # initialize a table of replies with this agent's id
- batch_reply = [{'id': self.getID()} for _ in range(batchsize)]
-
- if batchsize == 0 or 'text' not in observations[0]:
- return [{'text': 'dunno'}]
-
- # convert the observations into batches of inputs and targets
- # valid_inds tells us the indices of all valid examples
- # e.g. for input [{}, {'text': 'hello'}, {}, {}], valid_inds is [1]
- # since the other three elements had no 'text' field
- xs, ys, cands, cands_txt = self.batchify(observations)
- batch_reply = self.predict(xs, ys, cands, cands_txt, observations)
- self.add_to_ys_cache(ys)
- return batch_reply
-
- def act(self):
- # call batch_act with this batch of one
- return self.batch_act([self.observation])[0]
-
- def shutdown(self):
- # """Save the state of the model when shutdown."""
- super().shutdown()
-
- def save(self, path=None):
- """
- Save model parameters if model_file is set.
- """
- path = self.opt.get('model_file', None) if path is None else path
- if path and hasattr(self, 'model'):
- data = {}
- data['model'] = self.model.state_dict()
- data['optimizer'] = self.optimizer.state_dict()
- data['opt'] = self.opt
- with open(path, 'wb') as handle:
- torch.save(data, handle)
- with open(path + ".opt", 'wb') as handle:
- pickle.dump(self.opt, handle, protocol=pickle.HIGHEST_PROTOCOL)
-
- def load(self, path):
- """
- Return opt and model states.
- """
- with open(path, 'rb') as read:
- print('Loading existing model params from ' + path)
- data = torch.load(read)
- self.model.load_state_dict(data['model'])
- self.reset()
- self.optimizer.load_state_dict(data['optimizer'])
- self.opt = self.override_opt(data['opt'])
diff --git a/projects/personachat/kvmemnn/modules.py b/projects/personachat/kvmemnn/modules.py
deleted file mode 100644
--- a/projects/personachat/kvmemnn/modules.py
+++ /dev/null
@@ -1,145 +0,0 @@
-#!/usr/bin/env python3
-
-# Copyright (c) Facebook, Inc. and its affiliates.
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import math
-import torch
-import torch.nn as nn
-from torch.autograd import Variable
-
-
-class Kvmemnn(nn.Module):
- def __init__(self, opt, num_features, dict):
- super().__init__()
- self.lt = nn.Embedding(
- num_features,
- opt['embeddingsize'],
- 0,
- sparse=True,
- max_norm=opt['embeddingnorm'],
- )
- if not opt['tfidf']:
- dict = None
- self.encoder = Encoder(self.lt, dict)
- if not opt['share_embeddings']:
- self.lt2 = nn.Embedding(
- num_features,
- opt['embeddingsize'],
- 0,
- sparse=True,
- max_norm=opt['embeddingnorm'],
- )
- self.encoder2 = Encoder(self.lt2, dict)
- else:
- self.encoder2 = self.encoder
- self.opt = opt
- self.softmax = nn.Softmax(dim=1)
- self.cosine = nn.CosineSimilarity()
-
- self.lin1 = nn.Linear(opt['embeddingsize'], opt['embeddingsize'], bias=False)
- self.lin2 = nn.Linear(opt['embeddingsize'], opt['embeddingsize'], bias=False)
- self.hops = 1
- self.lins = 0
- if 'hops' in opt:
- self.hops = opt['hops']
- if 'lins' in opt:
- self.lins = opt['lins']
- self.cosineEmbedding = True
- if opt['loss'] == 'nll':
- self.cosineEmbedding = False
-
- def forward(self, xs, mems, ys=None, cands=None):
- xs_enc = []
- xs_emb = self.encoder(xs)
-
- if len(mems) > 0 and self.hops > 0:
- mem_enc = []
- for m in mems:
- mem_enc.append(self.encoder(m))
- mem_enc.append(xs_emb)
- mems_enc = torch.cat(mem_enc)
- self.layer_mems = mems
- layer2 = self.cosine(xs_emb, mems_enc).unsqueeze(0)
- self.layer2 = layer2
- layer3 = self.softmax(layer2)
- self.layer3 = layer3
- lhs_emb = torch.mm(layer3, mems_enc)
-
- if self.lins > 0:
- lhs_emb = self.lin1(lhs_emb)
- if self.hops > 1:
- layer4 = self.cosine(lhs_emb, mems_enc).unsqueeze(0)
- layer5 = self.softmax(layer4)
- self.layer5 = layer5
- lhs_emb = torch.mm(layer5, mems_enc)
- if self.lins > 1:
- lhs_emb = self.lin2(lhs_emb)
- else:
- if self.lins > 0:
- lhs_emb = self.lin1(xs_emb)
- else:
- lhs_emb = xs_emb
- if ys is not None:
- # training
- if self.cosineEmbedding:
- ys_enc = []
- xs_enc.append(lhs_emb)
- ys_enc.append(self.encoder2(ys))
- for c in cands:
- xs_enc.append(lhs_emb)
- c_emb = self.encoder2(c)
- ys_enc.append(c_emb)
- else:
- xs_enc.append(lhs_emb.dot(self.encoder2(ys)))
- for c in cands:
- c_emb = self.encoder2(c)
- xs_enc.append(lhs_emb.dot(c_emb))
- else:
- # test
- if self.cosineEmbedding:
- ys_enc = []
- for c in cands:
- xs_enc.append(lhs_emb)
- c_emb = self.encoder2(c)
- ys_enc.append(c_emb)
- else:
- for c in cands:
- c_emb = self.encoder2(c)
- xs_enc.append(lhs_emb.dot(c_emb))
- if self.cosineEmbedding:
- return torch.cat(xs_enc), torch.cat(ys_enc)
- else:
- return torch.cat(xs_enc)
-
-
-class Encoder(nn.Module):
- def __init__(self, shared_lt, dict):
- super().__init__()
- self.lt = shared_lt
- if dict is not None:
- l = len(dict)
- freqs = torch.Tensor(l)
- for i in range(l):
- ind = dict.ind2tok[i]
- freq = dict.freq[ind]
- freqs[i] = 1.0 / (1.0 + math.log(1.0 + freq))
- self.freqs = freqs
- else:
- self.freqs = None
-
- def forward(self, xs):
- xs_emb = self.lt(xs)
- if self.freqs is not None:
- # tfidf embeddings
- l = xs.size(1)
- w = Variable(torch.Tensor(l))
- for i in range(l):
- w[i] = self.freqs[xs.data[0][i]]
- w = w.mul(1 / w.norm())
- xs_emb = xs_emb.squeeze(0).t().matmul(w.unsqueeze(1)).t()
- else:
- # basic embeddings (faster)
- xs_emb = xs_emb.mean(1)
- return xs_emb
diff --git a/projects/personachat/mturk/personachat_eval/__init__.py b/projects/personachat/mturk/personachat_eval/__init__.py
deleted file mode 100644
--- a/projects/personachat/mturk/personachat_eval/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/usr/bin/env python3
-
-# Copyright (c) Facebook, Inc. and its affiliates.
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
diff --git a/projects/personachat/mturk/personachat_eval/extract_and_save_personas.py b/projects/personachat/mturk/personachat_eval/extract_and_save_personas.py
deleted file mode 100644
--- a/projects/personachat/mturk/personachat_eval/extract_and_save_personas.py
+++ /dev/null
@@ -1,71 +0,0 @@
-#!/usr/bin/env python3
-
-# Copyright (c) Facebook, Inc. and its affiliates.
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-import pickle
-import os
-from parlai.core.params import ParlaiParser
-from parlai.agents.repeat_label.repeat_label import RepeatLabelAgent
-from parlai.core.worlds import create_task
-
-
-def extract_and_save(opt):
- agent = RepeatLabelAgent(opt)
- world = create_task(opt, agent)
- teacher = world.agents[0]
-
- personas_path = opt.get('personas_path')
- if not os.path.exists(personas_path):
- os.makedirs(personas_path)
-
- new_episode = True
- personas = []
- while not teacher.epoch_done():
- act = teacher.act()
- if new_episode:
- persona_text = act['text'].split('\n')[:-1]
- if opt.get('persona_type') == 'both':
- persona_1 = [p for p in persona_text if 'your persona:' in p]
- persona_2 = [p for p in persona_text if 'partner\'s persona:' in p]
- persona_1 = [p[p.find(':') + 1 :] for p in persona_1]
- persona_2 = [p[p.find(':') + 1 :] for p in persona_2]
- personas += [persona_1, persona_2]
- else:
- persona = [p for p in persona_text if 'persona:' in p]
- persona = [p[p.find(':') + 1 :] for p in persona]
- personas.append(persona)
- new_episode = act.get('episode_done')
- else:
- new_episode = act.get('episode_done')
-
- for idx, persona in enumerate(personas):
- with open('{}/{}.pkl'.format(personas_path, idx), 'wb') as f:
- pickle.dump(persona, f)
- print('---Finished extracting and saving personas, to {}'.format(personas_path))
-
-
-def main(opt):
- print('---Extracting and saving personas---')
- teacher_name = 'personachat:{}'.format(opt.get('persona_type'))
- teacher_name += 'Revised' if opt.get('revised') else 'Original'
- opt['task'] = teacher_name
- opt['datatype'] = 'train:ordered:stream'
- opt['numthreads'] = 1
- opt['batchsize'] = 1
- extract_and_save(opt)
-
-
-if __name__ == '__main__':
- parser = ParlaiParser()
- parser.add_argument(
- '--persona-type',
- default='both',
- type=str,
- choices=['both', 'self', 'other'],
- help='Which personas to load from personachat',
- )
- parser.add_argument(
- '--revised', default=False, type='bool', help='Whether to use revised personas'
- )
- opt = parser.parse_args()
diff --git a/projects/personachat/mturk/personachat_eval/init.py b/projects/personachat/mturk/personachat_eval/init.py
deleted file mode 100644
--- a/projects/personachat/mturk/personachat_eval/init.py
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/usr/bin/env python3
-
-# Copyright (c) Facebook, Inc. and its affiliates.
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
diff --git a/projects/personachat/mturk/personachat_eval/run.py b/projects/personachat/mturk/personachat_eval/run.py
deleted file mode 100644
--- a/projects/personachat/mturk/personachat_eval/run.py
+++ /dev/null
@@ -1,139 +0,0 @@
-#!/usr/bin/env python3
-
-# Copyright (c) Facebook, Inc. and its affiliates.
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-from parlai.core.params import ParlaiParser
-from parlai.mturk.core.mturk_manager import MTurkManager
-from worlds import PersonaChatEvalWorld, PersonaProfileWorld, PersonasGenerator
-from task_config import task_config
-
-import os
-
-
-def main():
- """
- This task consists of one agent, model or MTurk worker, talking to an MTurk worker
- to negotiate a deal.
- """
- argparser = ParlaiParser(False, False)
- argparser.add_parlai_data_path()
- argparser.add_mturk_args()
- argparser.add_argument(
- '-min_t', '--min_turns', default=5, type=int, help='minimum number of turns'
- )
- argparser.add_argument(
- '-mt', '--max_turns', default=10, type=int, help='maximal number of chat turns'
- )
- argparser.add_argument(
- '-mx_rsp_time',
- '--max_resp_time',
- default=150,
- type=int,
- help='time limit for entering a dialog message',
- )
- argparser.add_argument(
- '-mx_psn_time',
- '--max_persona_time',
- type=int,
- default=300,
- help='time limit for turker' 'entering the persona',
- )
- argparser.add_argument(
- '--ag_shutdown_time',
- default=120,
- type=int,
- help='time limit for entering a dialog message',
- )
- argparser.add_argument(
- '--persona-type',
- default='both',
- type=str,
- choices=['both', 'self', 'other'],
- help='Which personas to load from personachat',
- )
- argparser.add_argument(
- '--revised', default=True, type='bool', help='Whether to use revised personas'
- )
- argparser.add_argument(
- '-rt', '--range_turn', default='5,7', help='sample range of number of turns'
- )
- opt = argparser.parse_args()
- opt['task'] = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
- if 'data_path' not in opt:
- opt['data_path'] = os.getcwd() + '/data/' + opt['task']
- opt.update(task_config)
-
- mturk_agent_ids = ['PERSON_1']
-
- mturk_manager = MTurkManager(opt=opt, mturk_agent_ids=mturk_agent_ids)
-
- persona_generator = PersonasGenerator(opt)
- mturk_manager.setup_server()
-
- # SET MODEL AGENT OPT HERE
- model_agent_opt = {}
-
- try:
- mturk_manager.start_new_run()
- mturk_manager.create_hits()
-
- if not opt['is_sandbox']:
- blocked_worker_list = []
- for w in blocked_worker_list:
- mturk_manager.block_worker(
- w,
- 'We found that you have unexpected behaviors in our previous HITs. For more questions please email us.',
- )
-
- def run_onboard(worker):
- worker.persona_generator = persona_generator
- world = PersonaProfileWorld(opt, worker)
- world.parley()
- world.shutdown()
-
- mturk_manager.set_onboard_function(onboard_function=run_onboard)
- mturk_manager.ready_to_accept_workers()
-
- def check_worker_eligibility(worker):
- return True
-
- def assign_worker_roles(workers):
- for index, worker in enumerate(workers):
- worker.id = mturk_agent_ids[index % len(mturk_agent_ids)]
-
- def run_conversation(mturk_manager, opt, workers):
- agents = workers[0]
- conv_idx = mturk_manager.conversation_index
- world = PersonaChatEvalWorld(
- opt=opt,
- agents=[agents],
- range_turn=[int(s) for s in opt['range_turn'].split(',')],
- max_turn=opt['max_turns'],
- max_resp_time=opt['max_resp_time'],
- model_agent_opt=model_agent_opt,
- world_tag='conversation t_{}'.format(conv_idx),
- )
- world.reset_random()
- while not world.episode_done():
- world.parley()
- world.save_data()
-
- world.shutdown()
- world.review_work()
-
- mturk_manager.start_task(
- eligibility_function=check_worker_eligibility,
- assign_role_function=assign_worker_roles,
- task_function=run_conversation,
- )
-
- except BaseException:
- raise
- finally:
- mturk_manager.expire_all_unassigned_hits()
- mturk_manager.shutdown()
-
-
-if __name__ == '__main__':
- main()
diff --git a/projects/personachat/mturk/personachat_eval/task_config.py b/projects/personachat/mturk/personachat_eval/task_config.py
deleted file mode 100644
--- a/projects/personachat/mturk/personachat_eval/task_config.py
+++ /dev/null
@@ -1,63 +0,0 @@
-#!/usr/bin/env python3
-
-# Copyright (c) Facebook, Inc. and its affiliates.
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-task_config = {}
-
-"""A short and descriptive title about the kind of task the HIT contains.
-On the Amazon Mechanical Turk web site, the HIT title appears in search results,
-and everywhere the HIT is mentioned.
-"""
-task_config[
- 'hit_title'
-] = 'Play a character and chat! [You can keep accepting new HITs]'
-
-
-"""A description includes detailed information about the kind of task the HIT contains.
-On the Amazon Mechanical Turk web site, the HIT description appears in the expanded
-view of search results, and in the HIT and assignment screens.
-"""
-task_config[
- 'hit_description'
-] = 'You will chat to another person while adopting a specific persona.'
-
-
-"""One or more words or phrases that describe the HIT, separated by commas.
-On MTurk website, these words are used in searches to find HITs.
-"""
-task_config['hit_keywords'] = 'chat,dialog'
-
-
-"""A detailed task description that will be shown on the HIT task preview page
-and on the left side of the chat page. Supports HTML formatting.
-"""
-task_config[
- 'task_description'
-] = '''
-<br>
-<b><h4>Task Description</h4></b>
-<br>
-(You can keep accepting new HITs after you finish your current one, so keep working on it if you like the task!)
-<br>
-<b>In this task you will chitchat with another worker, playing the part of a given character.</b>
-For example, your given character could be: <br><br> I am a vegetarian. I like swimming. My father used to work for Ford. My favorite band is Maroon5. I got a new job last month, which is about advertising design.
-<br>
-<br>
-Chat with the other person naturally and <b><span style="color:blue">try to get to know each other, i.e.
-both ask questions and answer questions of your chat partner
-at the same time sticking to your own characters<span style="color:blue"></b>.
-<br>
-<br>
-<b><span style="color:blue">You will get bonus for high quality dialogs.</span></b>
-<b>Send short messages, <span style="color:red">max 15 words</span>.</b>
-<b>Do not trivially copy the character descriptions into the message.</b>
-After a given number of turns, click “DONE" to finish the chat.
-There is a <b>2 min</b> time limit for each turn.
-<br>
-<br>
-- Do not reference the task or MTurk itself during the conversation.
-<br>
-<b><span style="color:red">- No racism, sexism or otherwise offensive comments, or the submission will be rejected and we will report to Amazon.</b></span>
-'''
diff --git a/projects/personachat/mturk/personachat_eval/worlds.py b/projects/personachat/mturk/personachat_eval/worlds.py
deleted file mode 100644
--- a/projects/personachat/mturk/personachat_eval/worlds.py
+++ /dev/null
@@ -1,621 +0,0 @@
-#!/usr/bin/env python3
-
-# Copyright (c) Facebook, Inc. and its affiliates.
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-from parlai.core.agents import create_agent
-from parlai.mturk.core.worlds import MTurkOnboardWorld
-from parlai.mturk.core.agents import TIMEOUT_MESSAGE
-from parlai.core.worlds import validate, MultiAgentDialogWorld
-from joblib import Parallel, delayed
-from extract_and_save_personas import main as main_extract
-import numpy as np
-import time
-import os
-import pickle
-import random
-
-ONBOARD_MSG = '\nWelcome! Below is your persona \
- (you can find it on the left side of the chat)\n \
- When you are ready to start your conversation, \
- click the "I am ready, continue" button below\n'
-START_MSG = '\nSuccessfully matched. \
- Now let\'s get to know each other through the chat! \n\
- You need to finish at least <b>{} chat turns</b>, \
- after which you can click the "Done" button to end the chat. \n \
- <b>You can track your character description on the left.</b> {} \n\
- <span style="color:blue"><b>Please try to speak to the other person \
- as if you are the character assigned.</b></span> \n \
- <span style="color:blue"><b>Do not trivially copy \
- the character descriptions into the message.</b></span>'
-CHAT_NOT_DONE_MSG = 'Sorry, we need at least <b>{} more turn(s)</b> to finish. \
- Please send a new message:'
-TIMEOUT_MSG = '<b> The other person has timed out. \
- Please click the "Done with this HIT" button below to finish this HIT.\
- </b>'
-EXCEED_MIN_TURNS_MSG = '\n {} chat turns finished! \n \
- You can click the "Done" button to end the chat if it\'s your turn \
- or keep chatting.'
-UNEXPECTED_DISCONNECTION_MSG = 'The other worker unexpectedly diconnected. \n \
- Please click <span style="color:blue"><b>Done with this HIT</b>\
- </span> button below to finish this HIT.'
-CHAT_ENDED_MSG = 'One of you ended the chat. Thanks for your time! \n\
- Please click <span style="color:blue"><b>Done with this HIT</b>\
- </span> button below to finish this HIT.'
-RETRIEVED_PASSAGES_INST_MSG = 'Please take a look at the relevant passages \
- to your left before answering'
-RETRIEVED_PASSAGES_MSG = '<span style="color:blue"> - {}\
- </span>'
-WAITING_MSG = 'Please wait while we match you with another worker...'
-
-
-class PersonasGenerator(object):
- def __init__(self, opt):
- self.personas_idx_stack_path = os.path.join(
- os.getcwd(), './personas_idx_stack.pkl'
- )
-
- self.personas_path = '{}/data/personas-{}'.format(
- os.getcwd(),
- opt['persona_type'] + 'Revised' if opt['revised'] else 'Original',
- )
- if not os.path.exists(self.personas_path):
- opt['personas_path'] = self.personas_path
- main_extract(opt)
- self.personas_name_list = []
-
- for f_name in os.listdir(self.personas_path):
- if f_name.endswith('.pkl'):
- self.personas_name_list.append(f_name)
-
- if os.path.exists(self.personas_idx_stack_path):
- with open(self.personas_idx_stack_path, 'rb') as handle:
- self.idx_stack = pickle.load(handle)
- else:
- self.idx_stack = []
- self.add_idx_stack()
- self.save_idx_stack()
- pass
-
- def add_idx_stack(self):
- stack = [i for i in range(len(self.personas_name_list))]
- random.seed()
- random.shuffle(stack)
- self.idx_stack = stack + self.idx_stack
-
- def pop_persona(self):
- if len(self.idx_stack) == 0:
- self.add_idx_stack()
- idx = self.idx_stack.pop()
- print("\n******* Pop persona {} from stack. *******\n".format(idx))
- data = np.load(
- os.path.join(self.personas_path, self.personas_name_list[int(idx)])
- )
- return (idx, data)
-
- def push_persona(self, idx):
- self.idx_stack.append(idx)
-
- def save_idx_stack(self):
- with open(self.personas_idx_stack_path, 'wb') as handle:
- pickle.dump(self.idx_stack, handle)
-
-
-class PersonaProfileWorld(MTurkOnboardWorld):
- """
- A world that provides a persona to the MTurkAgent.
- """
-
- def __init__(self, opt, mturk_agent):
- self.task_type = 'sandbox' if opt['is_sandbox'] else 'live'
- self.max_persona_time = opt['max_persona_time']
- super().__init__(opt, mturk_agent)
-
- def parley(self):
- persona_idx, data = self.mturk_agent.persona_generator.pop_persona()
- model_persona_idx, model_data = self.mturk_agent.persona_generator.pop_persona()
- self.mturk_agent.persona_idx = persona_idx
- self.mturk_agent.persona_data = data
- self.mturk_agent.model_persona = [model_persona_idx, model_data]
- self.mturk_agent.persona_pair = [
- (persona_idx, data),
- (model_persona_idx, model_data),
- ]
- persona_text = ''
- for s in data:
- persona_text += '<b><span style="color:blue">' '{}\n</span></b>'.format(
- s.strip()
- )
-
- self.mturk_agent.observe(
- {
- 'id': 'SYSTEM',
- 'show_persona': True,
- 'text': ONBOARD_MSG + '<br>' + persona_text + '<br>',
- }
- )
-
- act = self.mturk_agent.act(timeout=self.max_persona_time)
-
- # timeout
- if act['episode_done'] or (('text' in act and act['text'] == TIMEOUT_MESSAGE)):
-
- self.mturk_agent.persona_generator.push_persona(
- self.mturk_agent.persona_idx
- )
- self.mturk_agent.persona_generator.save_idx_stack()
- self.episodeDone = True
- return
-
- if 'text' not in act:
- control_msg = {'id': 'SYSTEM', 'text': WAITING_MSG}
- self.mturk_agent.observe(validate(control_msg))
- self.episodeDone = True
-
-
-class PersonaChatEvalWorld(MultiAgentDialogWorld):
- def __init__(
- self,
- opt,
- agents=None,
- shared=None,
- range_turn=(4, 7),
- max_turn=10,
- max_resp_time=120,
- model_agent_opt=None,
- world_tag='NONE',
- agent_timeout_shutdown=120,
- ):
- self.turn_idx = 0
- self.range_turn = range_turn
- self.max_turn = max_turn
- self.n_turn = np.random.randint(self.range_turn[0], self.range_turn[1]) + 1
- self.dialog = []
- self.task_type = 'sandbox' if opt['is_sandbox'] else 'live'
- self.chat_done = False
- self.n_personas = []
- self.fluency_score = len(agents) * [-1]
- self.eng_score = len(agents) * [-1]
- self.consistent_score = len(agents) * [-1]
- self.persona_picked = len(agents) * [None]
- self.world_tag = world_tag
-
- # set up model agent
- self.model_agent = create_agent(model_agent_opt)
-
- # below are timeout protocols
- self.max_resp_time = max_resp_time # in secs
- self.agent_timeout_shutdown = agent_timeout_shutdown
- super().__init__(opt, agents, shared)
- self.personas = [
- (ag.persona_data if hasattr(ag, 'persona_data') else None)
- for ag in self.agents
- ]
-
- def parley(self):
- self.turn_idx += 1
-
- control_msg = {'episode_done': False}
- control_msg['id'] = 'SYSTEM'
-
- print(self.world_tag + ' is at turn {}...'.format(self.turn_idx))
-
- '''If at first turn, we need to give each agent their persona'''
- if self.turn_idx == 1:
- for idx, agent in enumerate(self.agents):
- persona_text = ''
- for s in self.personas[idx]:
- persona_text += (
- '<b><span style="color:blue">'
- '{}\n</span></b>'.format(s.strip())
- )
- control_msg['persona_text'] = persona_text
- control_msg['text'] = self.get_instruction(
- tag='start', agent_id=agent.id
- )
- agent.observe(validate(control_msg))
- if idx == 0:
- time.sleep(3)
-
- '''If we get to the min turns, inform turker that they can end if they
- want
- '''
- if self.turn_idx == self.n_turn + 1:
- for idx, agent in enumerate(self.agents):
- control_msg['text'] = self.get_instruction(idx, tag='exceed_min_turns')
- control_msg['exceed_min_turns'] = True
- agent.observe(validate(control_msg))
-
- '''Otherwise, we proceed accordingly'''
- acts = []
- # MTurk agent turn
- idx = 0
- agent = self.agents[0] # Mturk agent
- acts.append(agent.act(timeout=self.max_resp_time))
- if acts[idx] is not None:
- if acts[idx]['text'] == 'PERSONA':
- _text = ''
- for s in agent.model_persona[1]['persona']:
- _text += (
- '<b><span style="color:blue">' + s.strip() + '</span></b><br>'
- )
- control_msg['text'] = 'The model persona is: \n' + _text
- agent.observe(control_msg)
- return
- while self.is_msg_tooshortlong(acts[idx], agent) or self.is_exact_match(
- acts[idx], agent
- ):
- acts[idx] = agent.act()
-
- if acts[idx]['episode_done']:
- self.chat_done = True
- self.check_timeout(acts[idx])
- for ag in self.agents:
- if ag != agent and ag.some_agent_disconnected:
- control_msg[
- 'text'
- ] = 'The other worker unexpectedly diconnected. \n \
- Please click <span style="color:blue"><b>Done with this HIT</b></span> button below to exit this HIT. No rejections.'
- ag.observe(validate(control_msg))
- return
- if self.turn_idx > self.n_turn:
- acts = [None]
-
- # Fluency Check
- for idx, agent in enumerate(self.agents):
- control_msg[
- 'text'
- ] = 'Now the conversation is completed! \n Please evaluate the other person\'s \
- <span style="color:blue"><b>fluency</b></span> during this conversation by \
- <b>entering a score from [1, 2, 3, 4, 5]</b> below, \
- <span style="color:blue">fluency reflects whether the other people\'s words are accurate, and whether you can read it quickly and with ease.</span>\
- (1 means "not fluent at all" and 5 means "extremely fluent", e.g., You can enter 3 for an OK fluency)'
- agent.observe(validate(control_msg))
- acts[idx] = agent.act(timeout=self.max_resp_time)
- while acts[idx]['text'] not in ['1', '2', '3', '4', '5']:
- control_msg[
- 'text'
- ] = "The score you entered must be in [1, 2, 3, 4, 5]. Remember to click the SEND button and not the DONE button. Please try again:"
- agent.observe(validate(control_msg))
- acts[idx] = agent.act(timeout=self.max_resp_time)
- if 'text' in acts[idx] and acts[idx]['text'] in [
- '1',
- '2',
- '3',
- '4',
- '5',
- ]:
- self.fluency_score[idx] = int(acts[idx]['text'])
-
- # Engagingness Check
- for idx, agent in enumerate(self.agents):
- control_msg[
- 'text'
- ] = 'Now please evaluate the other people\'s \
- <span style="color:blue"><b>engagingness DISREGARDING the fluency</b></span> \
- during this conversation by <b>entering a score from [1, 2, 3, 4, 5]</b> below: \
- (1 means "not engaging at all" and 5 means "extremely engaging", e.g., You can enter 3 for an OK dialog)'
- agent.observe(validate(control_msg))
- acts[idx] = agent.act(timeout=self.max_resp_time)
- while acts[idx]['text'] not in ['1', '2', '3', '4', '5']:
- control_msg[
- 'text'
- ] = "The score you entered must be in [1, 2, 3, 4, 5]. Remember to click the SEND button and not the DONE button. Please try again:"
- agent.observe(validate(control_msg))
- acts[idx] = agent.act(timeout=self.max_resp_time)
- if 'text' in acts[idx] and acts[idx]['text'] in [
- '1',
- '2',
- '3',
- '4',
- '5',
- ]:
- self.eng_score[idx] = int(acts[idx]['text'])
-
- # Check Consistency
- for idx, agent in enumerate(self.agents):
- control_msg[
- 'text'
- ] = 'Now please evaluate the other people\'s \
- <span style="color:blue"><b>consistency of persona</b></span> \
- (e.g., "I have a dog" followed by "I have no pets" is not consistent)\
- during this conversation by <b>entering a score from [1, 2, 3, 4, 5]</b> below: \
- (1 means "not consistent at all" and 5 means "extremely consistent", e.g., You can enter 3 for an OK consistency)'
- agent.observe(validate(control_msg))
- acts[idx] = agent.act(timeout=self.max_resp_time)
- while acts[idx]['text'] not in ['1', '2', '3', '4', '5']:
- control_msg[
- 'text'
- ] = "The score you entered must be in [1, 2, 3, 4, 5]. Remember to click the SEND button and not the DONE button. Please try again:"
- agent.observe(validate(control_msg))
- acts[idx] = agent.act(timeout=self.max_resp_time)
- if 'text' in acts[idx] and acts[idx]['text'] in [
- '1',
- '2',
- '3',
- '4',
- '5',
- ]:
- self.consistent_score[idx] = int(acts[idx]['text'])
-
- # Persona Selection
- for idx, agent in enumerate(self.agents):
- model_idx = agent.model_persona[0]
- self_idx = agent.persona_idx
- false_idx_list = [
- x
- for x in range(
- len(agent.persona_generator.personas_name_list)
- )
- ]
- false_idx_list.remove(self_idx)
- false_idx_list.remove(model_idx)
- false_idx = random.choice(false_idx_list)
- false_data = np.load(
- os.path.join(
- agent.persona_generator.personas_path,
- agent.persona_generator.personas_name_list[false_idx],
- )
- )
- cand_text = []
- for dt in [agent.model_persona[1], false_data]:
- if dt == agent.model_persona[1]:
- is_correct = True
- else:
- is_correct = False
- _text = ''
- for s in dt:
- _text += (
- '<b><span style="color:blue">'
- + s.strip()
- + '</span></b><br>'
- )
- cand_text.append((is_correct, _text))
- random.shuffle(cand_text)
-
- control_msg['text'] = (
- 'Now we show you two personas below, please select the one that is more likely to match \
- with the person you just talked to, by entering 1 or 2: \n'
- + '1.<br>'
- + cand_text[0][1]
- + '<br>'
- + '2.<br>'
- + cand_text[1][1]
- )
- agent.observe(validate(control_msg))
- acts[idx] = agent.act(timeout=self.max_resp_time)
- while acts[idx]['text'] not in ['1', '2']:
- control_msg[
- 'text'
- ] = "The Persona index you entered must be 1 or 2. Remember to click the SEND button and not the DONE button. Please try again:"
- agent.observe(validate(control_msg))
- acts[idx] = agent.act(timeout=self.max_resp_time)
-
- if 'text' in acts[idx] and acts[idx]['text'] in ['1', '2']:
- self.persona_picked[idx] = cand_text[
- int(acts[idx]['text']) - 1
- ][0]
-
- for ag in self.agents:
- ag.observe(validate(acts[idx]))
- control_msg[
- 'text'
- ] = 'One of you ended the chat. Thanks for your time! \nPlease click <span style="color:blue"><b>Done with this HIT</b></span> button below to finish this HIT.'
- ag.observe(validate(control_msg))
- return
-
- self.dialog.append((idx, acts[idx]['text']))
- acts[idx]['eval_labels'] = ['__NULL__']
- self.model_agent.observe(acts[idx])
-
- # Model_agent turn
- idx = 1
- acts.append(self.model_agent.act())
-
- for (sb_0, sb_1) in [
- (' .', '.'),
- (' ,', ','),
- (' ?', '?'),
- (' !', '!'),
- ('i ', 'I '),
- ]:
- acts[idx]['text'] = acts[idx]['text'].replace(sb_0, sb_1)
- acts[idx]['text'].capitalize()
- acts[idx]['id'] = 'PERSON_2'
- acts[idx]['message_id'] = (
- acts[0]['message_id'][:-1] + '0'
- if acts[0]['message_id'][-1] != '0'
- else acts[0]['message_id'][:-1] + '1'
- )
- self.dialog.append((idx, acts[idx]['text']))
- time.sleep(len(acts[idx]['text'].split(' ')) * 0.5)
- agent.observe(acts[idx])
-
- def episode_done(self):
- return self.chat_done
-
- def get_instruction(self, agent_id=None, tag='first'):
- if tag == 'start':
- return (
- '\nSuccessfully matched. Now let\'s get to know each other through the chat! \n\
- You need to finish at least <b>'
- + str(self.n_turn)
- + ' chat turns</b>, \
- after that you can click the "Done" button to end the chat. \n \
- <b>You can track your character description on the left.</b> \
- \n <span style="color:blue"><b>Please try to speak to the other person as if you are the character assigned.</b></span> \n \
- <span style="color:blue"><b>Do not trivially copy the character descriptions into the message.</b></span>'
- )
-
- if tag == 'chat_not_done':
- return (
- 'Sorry, we need at least <b>'
- + str(self.n_turn + 1 - self.turn_idx)
- + ' more turn(s)</b> to finish. '
- + 'Please send a new message:'
- )
-
- if tag == 'timeout':
- return '<b>{}</b> is timeout. \
- Please click the "Done with this HIT" button below to exit this HIT. No rejections.'.format(
- agent_id
- )
-
- if tag == 'exceed_min_turns':
- return '\n {} chat turns finished! \n Keep chatting or you can click the "Done" button to end the chat if it\'s your turn.'.format(
- self.n_turn
- )
-
- def save_data(self):
- # save persona_idx_stack
- convo_finished = True
- bad_workers = []
- for ag in self.agents:
- if (
- ag.hit_is_abandoned
- or ag.hit_is_returned
- or ag.disconnected
- or ag.hit_is_expired
- ):
- bad_workers.append(ag.worker_id)
- convo_finished = False
- if (
- not convo_finished
- or self.dialog == []
- or self.eng_score[0] == -1
- or self.fluency_score[0] == -1
- or self.consistent_score[0] == -1
- ):
- for ag in self.agents:
- ag.not_approve = True
- ag.persona_generator.push_persona(ag.persona_idx)
- print(
- "\n******* Push persona {} back to stack. *******\n".format(
- ag.persona_idx
- )
- )
-
- data_path = self.opt['data_path']
- if not os.path.exists(data_path):
- os.makedirs(data_path)
- if convo_finished:
- filename = os.path.join(
- data_path,
- '{}_{}_{}.pkl'.format(
- time.strftime("%Y%m%d-%H%M%S"),
- np.random.randint(0, 1000),
- self.task_type,
- ),
- )
- else:
- filename = os.path.join(
- data_path,
- '{}_{}_{}_incomplete.pkl'.format(
- time.strftime("%Y%m%d-%H%M%S"),
- np.random.randint(0, 1000),
- self.task_type,
- ),
- )
- print(self.world_tag + ': Data successfully saved at {}.'.format(filename))
- self.personas.append(self.agents[0].model_persona[1])
- pickle.dump(
- {
- 'personas': self.personas,
- 'dialog': self.dialog,
- 'workers': [ag.worker_id for ag in self.agents],
- 'bad_workers': bad_workers,
- 'n_turn': self.n_turn,
- 'fluency_score': self.fluency_score,
- 'eng_score': self.eng_score,
- 'consistent_score': self.consistent_score,
- 'persona_picked': self.persona_picked,
- 'n_personas': self.n_personas,
- },
- open(filename, 'wb'),
- )
-
- def is_exact_match(self, act, ag, tolerance=0):
- if act['episode_done']:
- return False
-
- control_msg = {'episode_done': False}
- control_msg['id'] = 'SYSTEM'
-
- text = act['text']
- if text not in ['', ' ', ' ', ' ']:
- n_word_match = 0
- for per in ag.persona_data:
- per_parse = per.split(' ')
- regular_words = ['', ' ', 'I', 'I\'m', 'My', 'i']
- for r_w in regular_words:
- if r_w in per_parse:
- per_parse.remove(r_w)
- per_subseq = [
- ' '.join(per_parse[i : i + len(per_parse) - tolerance])
- for i in range(tolerance + 1)
- ]
- for pp in per_subseq:
- if pp in ['', ' ', ' ', ' ']:
- per_subseq.remove(pp)
- n_word_match += sum([(paa in text) for paa in per_subseq])
- if n_word_match > 0:
- control_msg[
- 'text'
- ] = 'We found that you <b><span style="color:red">trivially copied character descriptions</span></b>. Please rephrase your message again.'
- ag.observe(validate(control_msg))
- return True
- else:
- return False
-
- def is_msg_tooshortlong(self, act, ag, th_min=5, th_max=17):
- if act['episode_done']:
- return False
-
- control_msg = {'episode_done': False}
- control_msg['id'] = 'SYSTEM'
-
- msg_len = len(act['text'].split(' '))
- if msg_len < th_min:
- control_msg[
- 'text'
- ] = 'Your message is too short, please make it more than <b><span style="color:red">5 words</span></b>.'
- ag.observe(validate(control_msg))
- return True
- if msg_len > th_max:
- control_msg[
- 'text'
- ] = 'Your message is too long, please make it less than <b><span style="color:red">15 words</span></b>.'
- ag.observe(validate(control_msg))
- return True
- return False
-
- def reset_random(self):
- self.n_turn = np.random.randint(self.range_turn[0], self.range_turn[1]) + 1
-
- def check_timeout(self, act):
- if act['text'] == '[TIMEOUT]' and act['episode_done']:
- control_msg = {'episode_done': True}
- control_msg['id'] = 'SYSTEM'
- control_msg['text'] = self.get_instruction(
- agent_id=act['id'], tag='timeout'
- )
- for ag in self.agents:
- if ag.id != act['id']:
- ag.observe(validate(control_msg))
- self.chat_done = True
- return True
- else:
- return False
-
- def review_work(self):
- global review_agent
-
- def review_agent(ag):
- if hasattr(ag, 'not_approve'):
- pass
- else:
- ag.approve_work()
-
- Parallel(n_jobs=len(self.agents), backend='threading')(
- delayed(review_agent)(agent) for agent in self.agents
- )
diff --git a/projects/personachat/scripts/__init__.py b/projects/personachat/scripts/__init__.py
deleted file mode 100644
--- a/projects/personachat/scripts/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/usr/bin/env python3
-
-# Copyright (c) Facebook, Inc. and its affiliates.
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
diff --git a/projects/personachat/scripts/kvmemnn_eval.py b/projects/personachat/scripts/kvmemnn_eval.py
deleted file mode 100644
--- a/projects/personachat/scripts/kvmemnn_eval.py
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/usr/bin/env python3
-
-# Copyright (c) Facebook, Inc. and its affiliates.
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-from parlai.core.build_data import download_models
-from parlai.core.params import ParlaiParser
-from parlai.scripts.eval_model import eval_model
-
-'''Evaluate pre-trained model trained for hits@1 metric
-Key-Value Memory Net model trained on personachat using persona 'self'
-'''
-
-if __name__ == '__main__':
- parser = ParlaiParser(add_model_args=True)
- parser.add_argument('-n', '--num-examples', default=100000000)
- parser.add_argument('-d', '--display-examples', type='bool', default=False)
- parser.add_argument('-ltim', '--log-every-n-secs', type=float, default=2)
- parser.set_defaults(
- task='personachat:self',
- model='projects.personachat.kvmemnn.kvmemnn:Kvmemnn',
- model_file='models:personachat/kvmemnn/kvmemnn/persona-self_rephraseTrn-True_rephraseTst-False_lr-0.1_esz-500_margin-0.1_tfidf-False_shareEmb-True_hops1_lins0_model',
- datatype='test',
- numthreads=8,
- )
- opt = parser.parse_args()
- # build all profile memory models
- fnames = ['kvmemnn.tgz']
- opt['model_type'] = 'kvmemnn' # for builder
- download_models(opt, fnames, 'personachat')
-
- # add additional model args
- opt['interactive_mode'] = False
-
- eval_model(parser)
diff --git a/projects/personachat/scripts/kvmemnn_interactive.py b/projects/personachat/scripts/kvmemnn_interactive.py
deleted file mode 100644
--- a/projects/personachat/scripts/kvmemnn_interactive.py
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/usr/bin/env python3
-
-# Copyright (c) Facebook, Inc. and its affiliates.
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-from parlai.core.build_data import download_models
-from parlai.core.params import ParlaiParser
-from parlai.scripts.interactive import interactive
-
-'''Interact with pre-trained model
-Key-Value Memory Net model trained on personachat using persona 'self'
-[Note: no persona in this example code is actually given to the model.]
-'''
-
-if __name__ == '__main__':
- parser = ParlaiParser(add_model_args=True)
- parser.add_argument('-d', '--display-examples', type='bool', default=False)
- parser.set_params(
- task='parlai.agents.local_human.local_human:LocalHumanAgent',
- model='projects.personachat.kvmemnn.kvmemnn:KvmemnnAgent',
- model_file='models:personachat/kvmemnn/kvmemnn/persona-self_rephraseTrn-True_rephraseTst-False_lr-0.1_esz-500_margin-0.1_tfidf-False_shareEmb-True_hops1_lins0_model',
- interactive_mode=True,
- )
- opt = parser.parse_args()
- # build all profile memory models
- fnames = ['kvmemnn.tgz']
- opt['model_type'] = 'kvmemnn' # for builder
- download_models(opt, fnames, 'personachat')
- interactive(opt)
diff --git a/projects/personachat/scripts/languagemodel_opensub2018_interactive.py b/projects/personachat/scripts/languagemodel_opensub2018_interactive.py
deleted file mode 100644
--- a/projects/personachat/scripts/languagemodel_opensub2018_interactive.py
+++ /dev/null
@@ -1,38 +0,0 @@
-#!/usr/bin/env python3
-
-# Copyright (c) Facebook, Inc. and its affiliates.
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-from parlai.core.build_data import download_models
-from parlai.core.params import ParlaiParser
-from parlai.scripts.interactive import interactive
-from parlai.agents.language_model.language_model import LanguageModelAgent
-
-'''Interact with pre-trained model
-Language model trained on Opensubtitles 2018 dataset
-Run from ParlAI directory
-'''
-
-if __name__ == '__main__':
- parser = ParlaiParser(add_model_args=True)
- parser.add_argument('-d', '--display-examples', type='bool', default=False)
- LanguageModelAgent.add_cmdline_args(parser)
- parser.set_params(
- dict_file='models:personachat/language_model/languagemodel_esz512_hid1024_nl2.pt.dict',
- sampling_mode=True,
- task='parlai.agents.local_human.local_human:LocalHumanAgent',
- model='language_model',
- model_file='models:personachat/language_model/languagemodel_esz512_hid1024_nl2.pt',
- )
-
- opt = parser.parse_args()
- opt['model_type'] = 'language_model' # for builder
- # build all profile memory models
- fnames = [
- 'languagemodel_esz512_hid1024_nl2.pt',
- 'languagemodel_esz512_hid1024_nl2.pt.opt',
- 'languagemodel_esz512_hid1024_nl2.pt.dict',
- ]
- download_models(opt, fnames, 'personachat', version='v3.0')
-
- interactive(opt)
diff --git a/projects/personality_captions/transresnet/transresnet.py b/projects/personality_captions/transresnet/transresnet.py
--- a/projects/personality_captions/transresnet/transresnet.py
+++ b/projects/personality_captions/transresnet/transresnet.py
@@ -65,13 +65,6 @@ def add_cmdline_args(argparser):
return arg_group
def __init__(self, opt, shared=None):
- if opt.get('numthreads', 1) > 1:
- raise RuntimeError(
- 'Warning: You cannot use multithreading with '
- 'this agent, as the current metrics do not '
- 'support sharing of lists (for median rank '
- 'calculation). Please set --numthreads to 1'
- )
self.metrics = {
'hits@1/100': 0.0,
'loss': 0.0,
diff --git a/projects/polyencoder/__init__.py b/projects/polyencoder/__init__.py
deleted file mode 100644
--- a/projects/polyencoder/__init__.py
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/usr/bin/env python3
-
-# Copyright (c) Facebook, Inc. and its affiliates.
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-import sys
-
-if sys.version_info < (3, 0):
- raise RuntimeError('ParlAI requires Python 3.')
diff --git a/projects/taskntalk/__init__.py b/projects/taskntalk/__init__.py
deleted file mode 100644
--- a/projects/taskntalk/__init__.py
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/usr/bin/env python3
-
-# Copyright (c) Facebook, Inc. and its affiliates.
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
diff --git a/projects/taskntalk/coopgame_agent.py b/projects/taskntalk/coopgame_agent.py
deleted file mode 100644
--- a/projects/taskntalk/coopgame_agent.py
+++ /dev/null
@@ -1,382 +0,0 @@
-#!/usr/bin/env python3
-
-# Copyright (c) Facebook, Inc. and its affiliates.
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-from parlai.core.agents import Agent
-from parlai.core.dict import DictionaryAgent
-from .modules import ImgNet, ListenNet, StateNet, SpeakNet, PredictNet
-
-import torch
-from torch.autograd import Variable
-from torch import optim
-from torch.autograd import backward as autograd_backward
-
-
-class CooperativeGameAgent(Agent):
- """
- Base class for both, the questioner and answerer.
-
- It can be extended to create custom players of games, other than questioner and
- answerer. It has separate modules to listen (observe), speak (act) and update its
- internal state. Each module is a collection of one or more pytorch modules, and can
- be extended and replaced in the agent as per task requirements.
- """
-
- OPTIM_OPTS = {
- 'adadelta': optim.Adadelta, # type: ignore
- 'adagrad': optim.Adagrad, # type: ignore
- 'adam': optim.Adam,
- 'adamax': optim.Adamax, # type: ignore
- 'asgd': optim.ASGD, # type: ignore
- 'lbfgs': optim.LBFGS, # type: ignore
- 'rmsprop': optim.RMSprop, # type: ignore
- 'rprop': optim.Rprop, # type: ignore
- 'sgd': optim.SGD,
- }
-
- @staticmethod
- def dictionary_class():
- """
- If different strategy for tokenization and de-tokenization of actions is
- required, override this method to return custom subclass.
- """
- return DictionaryAgent
-
- @staticmethod
- def add_cmdline_args(argparser):
- """
- Add command-line arguments specifically for this agent.
- """
- group = argparser.add_argument_group('Cooperative Game Agent Arguments')
- group.add_argument(
- '--optimizer',
- default='adam',
- choices=CooperativeGameAgent.OPTIM_OPTS.keys(),
- help='Choose between pytorch optimizers. Any member of '
- 'torch.optim is valid and will be used with '
- 'default params except learning rate (as specified '
- 'by -lr).',
- )
- group.add_argument(
- '--learning-rate', default=1e-2, type=float, help='Initial learning rate'
- )
- group.add_argument(
- '--no-cuda',
- action='store_true',
- default=False,
- help='disable GPUs even if available',
- )
- group.add_argument(
- '--gpuid',
- type=int,
- default=-1,
- help='which GPU device to use (defaults to cpu)',
- )
- DictionaryAgent.add_cmdline_args(argparser)
-
- def __init__(self, opt, shared=None):
- super().__init__(opt, shared)
- self.id = 'CooperativeGameAgent'
- self.actions = []
-
- # initialize short (h) and long (c) term states
- self.reset()
-
- # basic modules for listening, state update and speaking
- # questioner will have `PredictNet`, answerer will have `ImgNet`
- self.listen_net = ListenNet(opt['in_vocab_size'], opt['embed_size'])
- self.state_net = StateNet(opt['embed_size'], opt['state_size'])
- self.speak_net = SpeakNet(opt['state_size'], opt['out_vocab_size'])
-
- # setup optimizer according to command-line arguments
- self.optimizer = self.setup_optimizer()
- # setup dictionary agent
- self.dict_agent = CooperativeGameAgent.dictionary_class()()
-
- # transfer agent to GPU if applicable
- self.use_cuda = not opt.get('no_cuda') and torch.cuda.is_available()
- if self.use_cuda:
- print('[ Using CUDA for %s ]' % self.id)
- torch.cuda.set_device(opt['gpuid'])
- for module in self.modules:
- module = module.cuda()
-
- @property
- def modules(self):
- """
- Property to return a list of pytorch modules.
-
- Override this method while subclassing, if extra modules are added (for example,
- image feature extractor in answerer).
- """
- return [self.listen_net, self.state_net, self.speak_net]
-
- def setup_optimizer(self):
- """
- Return a ``torch.nn.optim.optimizer`` according to command-line argument
- ``--optimizer``.
-
- Override this method to setup optimizer with non-default parameters or use
- custom optimizer not available as choice.
- """
- optim_class = CooperativeGameAgent.OPTIM_OPTS[self.opt['optimizer']]
- kwargs = {'lr': self.opt['learning_rate']}
- if self.opt['optimizer'] == 'sgd':
- kwargs['momentum'] = 0.95
- kwargs['nesterov'] = True
- return optim_class([module.parameters() for module in self.modules], **kwargs)
-
- def tokenize(self, text):
- """
- Convert text observaton (string) to a ``torch.autograd.Variable`` of tokens
- using ``DictionaryAgent``.
- """
- text_tokens = self.dict.txt2vec(text)
- token_vars = Variable(torch.Tensor(text_tokens))
- if self.use_cuda:
- token_vars = token_vars.cuda()
- return token_vars
-
- def detokenize(self, vec):
- """
- Convert a ``torch.autograd.Variable`` of tokens into a string.
- """
- text_tokens = vec
- if isinstance(text_tokens, Variable):
- text_tokens = list(text_tokens.data)
- return self.dict.vec2txt(text_tokens)
-
- def observe(self, observation):
- """
- Update state, given a previous reply by other agent.
-
- In case of questioner, it can be goal description at start of episode.
- """
- self.observation = observation
-
- # if episode not done, tokenize, embed and update state
- # at the end of dialog episode, perform backward pass and step
- if not observation.get('episode_done', False):
- text_tokens = self.tokenize(observation['text'])
- token_embeds = self.listen_net(text_tokens)
- if 'image' in observation:
- token_embeds = torch.cat((token_embeds, observation['image']), 1)
- token_embeds = token_embeds.squeeze(1)
- self.h_state, self.c_state = self.state_net(
- token_embeds, (self.h_state, self.c_state)
- )
- else:
- if observation.get('reward', None):
- for action in self.actions:
- action.reinforce(observation['reward'])
- autograd_backward(
- self.actions, [None for _ in self.actions], retain_graph=True
- )
- # clamp all gradients between (-5, 5)
- for module in self.modules:
- for parameter in module.parameters():
- parameter.grad.data.clamp_(min=-5, max=5)
- self.optimizer.step()
- else:
- # start of dialog episode
- self.optimizer.zero_grad()
- self.reset()
-
- def act(self):
- """
- Based on current state, utter a reply (string) for next round.
- """
- out_distr = self.speak_net(self.h_state)
- if self.opt['datatype'] == 'train':
- action = out_distr.multinomial()
- else:
- _, action = out_distr.max(1)
- action = action.unsqueeze(1)
- self.actions.append(action)
- action_text = self.detokenize(action.squeeze(1))
- return {'text': action_text, 'id': self.id}
-
- def reset(self, retain_actions=False):
- """
- Reset internal state (and actions, if specified).
- """
- # TODO(kd): share state across other instances during batch training
- self.h_state = Variable(torch.zeros(1, self.opt['hidden_size']))
- self.c_state = Variable(torch.zeros(1, self.opt['hidden_size']))
- if self.use_cuda:
- self.h_state, self.c_state = self.h_state.cuda(), self.c_state.cuda()
-
- if not retain_actions:
- self.actions = []
-
-
-class QuestionerAgent(CooperativeGameAgent):
- """
- Base class for questioner agent.
-
- It is blindfolded, and has an extra ``predict`` method, which performs action at the
- end of dialog episode to accomplish the goal.
- """
-
- @staticmethod
- def add_cmdline_args(argparser):
- """
- Add command-line arguments specifically for this agent.
-
- Default values at according to (Kottur et al. 2017).
- """
- DictionaryAgent.add_cmdline_args(argparser)
- group = argparser.add_argument_group('Questioner Agent Arguments')
- group.add_argument(
- '--q-in-vocab',
- default=13,
- type=int,
- help='Input vocabulary for questioner. Usually includes '
- 'total distinct words spoken by answerer, '
- 'questioner itself, and words by which the '
- 'goal is described.',
- )
- group.add_argument(
- '--q-embed-size',
- default=20,
- type=int,
- help='Size of word embeddings for questioner',
- )
- group.add_argument(
- '--q-state-size',
- default=100,
- type=int,
- help='Size of hidden state of questioner',
- )
- group.add_argument(
- '--q-out-vocab',
- default=3,
- type=int,
- help='Output vocabulary for questioner',
- )
- group.add_argument(
- '--q-num-pred',
- default=12,
- type=int,
- help='Size of output to be predicted (for goal).',
- )
- super().add_cmdline_args(argparser)
-
- def __init__(self, opt, shared=None):
- # transfer opt for super class to use
- opt['in_vocab_size'] = opt['q_in_vocab']
- opt['embed_size'] = opt['q_embed_size']
- opt['state_size'] = opt['q_state_size']
- opt['out_vocab_size'] = opt['q_out_vocab']
-
- # add a module for prediction (override self.modules later)
- self.predict_net = PredictNet(
- opt['embed_size'], opt['state_size'], opt['num_pred']
- )
- super().__init__(opt, shared)
- self.id = 'QuestionerAgent'
-
- @property
- def modules(self):
- # override and include predict_net as well
- return [self.listen_net, self.state_net, self.speak_net, self.predict_net]
-
- def predict(self, tasks, num_tokens):
- """
- Extra method to be executed at the end of episode to carry out goal and decide
- reward on the basis of prediction.
- """
- guess_tokens = []
- for _ in range(num_tokens):
- # explicit task dependence
- task_embeds = self.listen_net(tasks)
- prediction = self.predict_net(task_embeds, (self.h_state, self.c_state))
- guess_tokens.append(prediction)
- return guess_tokens
-
-
-class AnswererAgent(CooperativeGameAgent):
- """
- Base class for answerer agent.
-
- It holds visual information, and has an extra ``img_embed`` method, which extracts
- features from visual content.
- """
-
- @staticmethod
- def add_cmdline_args(argparser):
- """
- Add command-line arguments specifically for this agent.
-
- Default values at according to (Kottur et al. 2017).
- """
- DictionaryAgent.add_cmdline_args(argparser)
- group = argparser.add_argument_group('Questioner Agent Arguments')
- group.add_argument(
- '--a-in-vocab',
- default=13,
- type=int,
- help='Input vocabulary for questioner. Usually includes '
- 'total distinct words spoken by answerer, questioner '
- 'itself, and words by which the goal is described.',
- )
- group.add_argument(
- '--a-embed-size',
- default=20,
- type=int,
- help='Size of word embeddings for questioner',
- )
- group.add_argument(
- '--a-state-size',
- default=100,
- type=int,
- help='Size of hidden state of questioner',
- )
- group.add_argument(
- '--a-out-vocab',
- default=3,
- type=int,
- help='Output vocabulary for questioner',
- )
- group.add_argument(
- '--a-img-feat-size',
- default=12,
- type=int,
- help='Size of output to be predicted (for goal).',
- )
- group.add_argument(
- '--a-memoryless',
- default=False,
- action='store_true',
- help='Whether to remember previous questions/answers ' 'encountered.',
- )
- super().add_cmdline_args(argparser)
-
- def __init__(self, opt, shared=None):
- # transfer opt for super class to use
- opt['in_vocab_size'] = opt['a_in_vocab']
- opt['embed_size'] = opt['a_embed_size']
- opt['state_size'] = opt['a_state_size']
- opt['out_vocab_size'] = opt['a_out_vocab']
-
- # add a module for grounding visual content
- # opt['a_img_input_size'] should be specified through custom arg or
- # subclass, if needed
- self.img_net = ImgNet(opt['a_img_feat_size'], opt.get('a_img_input_size', None))
- super().__init__(opt, shared)
- self.id = 'AnswererAgent'
-
- @property
- def modules(self):
- # override and include img_net as well
- return [self.img_net, self.listen_net, self.state_net, self.speak_net]
-
- def img_embed(self, image):
- """
- Extra method to be executed at the end of episode to carry out goal and decide
- reward on the basis of prediction.
- """
- features = self.img_net(image)
- return features
diff --git a/projects/taskntalk/modules.py b/projects/taskntalk/modules.py
deleted file mode 100644
--- a/projects/taskntalk/modules.py
+++ /dev/null
@@ -1,120 +0,0 @@
-#!/usr/bin/env python3
-
-# Copyright (c) Facebook, Inc. and its affiliates.
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import math
-import torch
-from torch import nn
-
-
-def xavier_init(module):
- """
- Xavier initializer for module parameters.
- """
- for parameter in module.parameters():
- if len(parameter.data.shape) == 1:
- # 1D vector means bias
- parameter.data.fill_(0)
- else:
- fan_in = parameter.data.size(0)
- fan_out = parameter.data.size(1)
- parameter.data.normal_(0, math.sqrt(2 / (fan_in + fan_out)))
-
-
-class ImgNet(nn.Module):
- """
- Module to embed the visual information. Used by answerer agent. In ``forward``:
- Embed image attributes and concatenate them together.
-
- **Note:** ``parlai.core.image_featurizers.ImageLoader`` can also be
- used instead.
- """
-
- def __init__(self, feature_size, input_size=None):
- super().__init__()
- # input_size is needed for modules which require input_size specification
- # nn.Embedding requires input size to be specified, while nn.Conv2d doesn't
- self.net = nn.Embedding(input_size, feature_size)
- xavier_init(self)
-
- def forward(self, image):
- embeds = self.net(image)
- features = torch.cat(embeds.transpose(0, 1), 1)
- return features
-
-
-class ListenNet(nn.Module):
- """
- Module for listening the sequence spoken by other agent.
-
- In ``forward``: Generate token embeddings.
- """
-
- def __init__(self, in_size, embed_size):
- super().__init__()
- self.net = nn.Embedding(in_size, embed_size)
- xavier_init(self)
-
- def forward(self, text_tokens):
- embeds = self.net(text_tokens)
- return embeds
-
-
-class StateNet(nn.Module):
- """
- Module for containing the state update mechanism for an agent.
-
- In
- ``forward``: Update states by passing the embeddings through LSTMCell.
- """
-
- def __init__(self, embed_size, state_size):
- super().__init__()
- self.net = nn.LSTMCell(embed_size, state_size)
- xavier_init(self)
-
- def forward(self, states, embeds):
- states = self.net(embeds, states)
- return states
-
-
-class SpeakNet(nn.Module):
- """
- Module for speaking a token based on current state.
-
- In ``forward``: Return a probability distribution of utterances of tokens.
- """
-
- def __init__(self, state_size, out_size):
- super().__init__()
- self.net = nn.Linear(state_size, out_size)
- self.softmax = nn.Softmax()
- xavier_init(self)
-
- def forward(self, state):
- out_distr = self.softmax(self.net(state))
- return out_distr
-
-
-class PredictNet(nn.Module):
- """
- Module to make a prediction as per goal.
-
- Used by questioner agent. In
- ``forward``: Return a probability distribution of utterances of tokens.
- """
-
- def __init__(self, embed_size, state_size, out_size):
- super().__init__()
- self.net_lstm = nn.LSTMCell(embed_size, state_size)
- self.net_mlp = nn.Linear(state_size, out_size)
- self.softmax = nn.Softmax()
- xavier_init(self)
-
- def forward(self, task_embeds, states):
- states = self.net_lstm(task_embeds, states)
- out_distr = self.softmax(self.predict_net(states[1]))
- _, prediction = out_distr.max(1)
- return prediction
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -35,7 +35,7 @@
url='http://parl.ai/',
python_requires='>=3.6',
packages=find_packages(
- exclude=('data', 'docs', 'examples', 'tests', 'parlai_internal',)
+ exclude=('data', 'docs', 'examples', 'tests', 'parlai_internal*',)
),
install_requires=reqs,
include_package_data=True,
| diff --git a/tests/test_examples.py b/tests/test_examples.py
--- a/tests/test_examples.py
+++ b/tests/test_examples.py
@@ -26,7 +26,6 @@ def test_generation(self):
learningrate=LR,
batchsize=BATCH_SIZE,
num_epochs=NUM_EPOCHS,
- numthreads=1,
hidden_size=16,
gradient_clip=1.0,
skip_generation=True,
diff --git a/tests/test_hogwild.py b/tests/test_hogwild.py
deleted file mode 100644
--- a/tests/test_hogwild.py
+++ /dev/null
@@ -1,61 +0,0 @@
-#!/usr/bin/env python3
-
-# Copyright (c) Facebook, Inc. and its affiliates.
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-
-import unittest
-
-import parlai.utils.testing as testing_utils
-
-NUM_EXS = 100
-# ideally we want one choice which is a nice modulo with NUM_EXS, and one that isn't
-NUM_THREADS_CHOICES = [2, 8]
-BATCHSIZE_CHOICES = [1, 8]
-
-
-@testing_utils.skipIfGPU
-class TestHogwild(unittest.TestCase):
- """
- Check that hogwild is doing the right number of examples.
- """
-
- def test_hogwild_train(self):
- """
- Test the trainer eval with numthreads > 1 and batchsize in [1,2,3].
- """
- opt = dict(
- task='integration_tests:repeat:{}'.format(1),
- evaltask='integration_tests:repeat:{}'.format(NUM_EXS),
- model='repeat_label',
- display_examples=False,
- num_epochs=10,
- )
- for nt in NUM_THREADS_CHOICES:
- for bs in BATCHSIZE_CHOICES:
- opt['numthreads'] = nt
- opt['batchsize'] = bs
-
- valid, test = testing_utils.train_model(opt)
- self.assertEqual(valid['exs'], NUM_EXS)
- self.assertEqual(test['exs'], NUM_EXS)
-
- def test_hogwild_eval(self):
- """
- Test eval with numthreads > 1 and batchsize in [1,2,3].
- """
- opt = dict(
- task='integration_tests:repeat:{}'.format(NUM_EXS), model='repeat_label'
- )
- for nt in NUM_THREADS_CHOICES:
- for bs in BATCHSIZE_CHOICES:
- opt['numthreads'] = nt
- opt['batchsize'] = bs
-
- valid, test = testing_utils.eval_model(opt)
- self.assertEqual(valid['exs'], NUM_EXS)
- self.assertEqual(test['exs'], NUM_EXS)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/tests/test_import.py b/tests/test_import.py
--- a/tests/test_import.py
+++ b/tests/test_import.py
@@ -26,11 +26,6 @@ def test_import_world(self):
assert World
- def test_import_threadutils(self):
- from parlai.utils.thread import SharedTable
-
- assert SharedTable
-
def test_import_dialog(self):
from parlai.core.teachers import DialogTeacher
diff --git a/tests/test_memnn.py b/tests/test_memnn.py
--- a/tests/test_memnn.py
+++ b/tests/test_memnn.py
@@ -4,7 +4,6 @@
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
-import os
import unittest
import parlai.utils.testing as testing_utils
@@ -31,7 +30,6 @@ def test_labelcands_nomemnn(self):
lr=LR,
batchsize=BATCH_SIZE,
num_epochs=NUM_EPOCHS,
- numthreads=1,
no_cuda=True,
embedding_size=32,
gradient_clip=1.0,
@@ -46,7 +44,6 @@ def test_labelcands_nomemnn(self):
self.assertGreater(valid['hits@1'], 0.95)
self.assertGreater(test['hits@1'], 0.95)
- @testing_utils.skipIfGPU
@testing_utils.retry()
def test_labelcands_multi(self):
"""
@@ -59,7 +56,6 @@ def test_labelcands_multi(self):
lr=LR,
batchsize=BATCH_SIZE,
num_epochs=NUM_EPOCHS,
- numthreads=min(4, os.cpu_count()),
no_cuda=True,
embedding_size=32,
gradient_clip=1.0,
diff --git a/tests/test_metrics.py b/tests/test_metrics.py
--- a/tests/test_metrics.py
+++ b/tests/test_metrics.py
@@ -121,7 +121,7 @@ class TestMetrics(unittest.TestCase):
"""
def test_simpleadd(self):
- m = Metrics(threadsafe=False)
+ m = Metrics()
m.add('key', SumMetric(1))
m.add('key', SumMetric(2))
assert m.report()['key'] == 3
@@ -133,62 +133,50 @@ def test_simpleadd(self):
m.add('key', SumMetric(2.5))
assert m.report()['key'] == 4.0
- # shouldn't throw exception
- m.flush()
-
def test_shared(self):
- m = Metrics(threadsafe=False)
- m2 = Metrics(threadsafe=False, shared=m.share())
- m3 = Metrics(threadsafe=False, shared=m.share())
+ m = Metrics()
+ m2 = Metrics(shared=m.share())
+ m3 = Metrics(shared=m.share())
m2.add('key', SumMetric(1))
m3.add('key', SumMetric(2))
- m2.flush() # just make sure this doesn't throw exception, it's a no-op
m.add('key', SumMetric(3))
assert m.report()['key'] == 6
- # shouldn't throw exception
- m.flush()
- m2.flush()
- m3.flush()
-
def test_multithreaded(self):
- m = Metrics(threadsafe=True)
- m2 = Metrics(threadsafe=True, shared=m.share())
- m3 = Metrics(threadsafe=True, shared=m.share())
+ # legacy test, but left because it's just another test
+ m = Metrics()
+ m2 = Metrics(shared=m.share())
+ m3 = Metrics(shared=m.share())
m2.add('key', SumMetric(1))
- m2.flush()
m3.add('key', SumMetric(2))
- m3.flush()
m.add('key', SumMetric(3))
- m.flush()
m.report()['key'] == 6
def test_verymultithreaded(self):
- m = Metrics(threadsafe=True)
+ # legacy test, but useful all the same, for ensuring
+ # metrics doesn't care about the order things are done
+ m = Metrics()
nt = 128
- ms = [Metrics(threadsafe=True, shared=m.share()) for _ in range(nt)]
+ ms = [Metrics(shared=m.share()) for _ in range(nt)]
# intentionally just over the int overflow
for _ in range(32768 + 1):
ms[random.randint(0, nt - 1)].add('key', SumMetric(1))
thread_ids = list(range(nt))
random.shuffle(thread_ids)
- for tid in thread_ids:
- ms[tid].flush()
-
assert m.report()['key'] == 32768 + 1
def test_largebuffer(self):
- m = Metrics(threadsafe=True)
- m2 = Metrics(threadsafe=True, shared=m.share())
+ # legacy test. left as just another test
+ m = Metrics()
+ m2 = Metrics(shared=m.share())
# intentionally just over the int overflow
for _ in range(32768 + 1):
m2.add('key', SumMetric(1))
- m2.flush()
assert m.report()['key'] == 32768 + 1
diff --git a/tests/test_quickstart.sh b/tests/test_quickstart.sh
--- a/tests/test_quickstart.sh
+++ b/tests/test_quickstart.sh
@@ -14,7 +14,7 @@ parlai train --help | grep -- --task > /dev/null
# view a task & train a model
parlai display_data -t babi:task10k:1
-parlai train_model -t babi:task10k:1 -mf /tmp/babi_memnn -bs 1 -nt 4 -eps 5 -m memnn --no-cuda
+parlai train_model -t babi:task10k:1 -mf /tmp/babi_memnn -bs 1 -eps 2 -m memnn --no-cuda
parlai display_model -t babi:task10k:1 -mf /tmp/babi_memnn -ecands vocab
# train a transformer on twitter
diff --git a/tests/test_seq2seq.py b/tests/test_seq2seq.py
--- a/tests/test_seq2seq.py
+++ b/tests/test_seq2seq.py
@@ -26,7 +26,6 @@ def test_ranking(self):
learningrate=LR,
batchsize=BATCH_SIZE,
num_epochs=3,
- numthreads=1,
embeddingsize=16,
hiddensize=16,
rnn_class='gru',
@@ -90,7 +89,6 @@ def test_badinput(self):
batchsize=10,
datatype='train:ordered:stream',
num_epochs=1,
- numthreads=1,
embeddingsize=16,
hiddensize=16,
inference='greedy',
@@ -98,35 +96,6 @@ def test_badinput(self):
)
-class TestHogwildSeq2seq(unittest.TestCase):
- @testing_utils.skipIfGPU
- def test_generation_multi(self):
- """
- This test uses a multi-turn task and multithreading.
- """
- valid, test = testing_utils.train_model(
- dict(
- task='integration_tests:multiturn_nocandidate',
- model='seq2seq',
- learningrate=LR,
- batchsize=BATCH_SIZE,
- num_epochs=NUM_EPOCHS * 2,
- numthreads=2,
- no_cuda=True,
- embeddingsize=16,
- hiddensize=16,
- rnn_class='gru',
- attention='general',
- gradient_clip=1.0,
- dropout=0.0,
- lookuptable='all',
- )
- )
-
- self.assertLess(valid['ppl'], 1.2)
- self.assertLess(test['ppl'], 1.2)
-
-
class TestBackwardsCompatibility(unittest.TestCase):
"""
Tests that a binary file continues to work over time.
diff --git a/tests/test_threadutils.py b/tests/test_threadutils.py
deleted file mode 100644
--- a/tests/test_threadutils.py
+++ /dev/null
@@ -1,111 +0,0 @@
-#!/usr/bin/env python3
-
-# Copyright (c) Facebook, Inc. and its affiliates.
-# This source code is licensed under the MIT license found in the
-# LICENSE file in the root directory of this source tree.
-from parlai.utils.thread import SharedTable
-from multiprocessing import Process
-import parlai.utils.testing as testing_utils
-import unittest
-import random
-import time
-
-
-@testing_utils.skipIfGPU
-class TestSharedTable(unittest.TestCase):
- """
- Make sure the package is alive.
- """
-
- def test_init_from_dict(self):
- d = {'a': 0, 'b': 1, 'c': 1.0, 'd': True, 1: False, 2: 2.0}
- st = SharedTable(d)
- for k, v in d.items():
- assert st[k] == v
-
- def test_get_set_del(self):
- st = SharedTable({'key': 0})
- try:
- st['none']
- self.fail('did not fail on nonexistent key')
- except KeyError:
- pass
-
- st['key'] = 1
- assert st['key'] == 1
-
- st['key'] += 1
- assert st['key'] == 2
-
- try:
- st['key'] = 2.1
- self.fail('cannot change type of value for set keys')
- except TypeError:
- pass
-
- del st['key']
- assert 'key' not in st, 'key should have been removed from table'
-
- try:
- st['key'] = True
- self.fail('cannot change removed key')
- except KeyError:
- pass
-
- def test_iter_keys(self):
- st = SharedTable({'key': 0, 'ctr': 0.0, 'val': False, 'other': 1})
- assert len(st) == 4
- del st['key']
- assert len(st) == 3, 'length should decrease after deleting key'
- keyset1 = set(iter(st))
- keyset2 = set(st.keys())
- assert keyset1 == keyset2, 'iterating should return keys'
- assert len(keyset1) == 3, ''
-
- def test_concurrent_access(self):
- st = SharedTable({'cnt': 0})
-
- def inc():
- for _ in range(50):
- with st.get_lock():
- st['cnt'] += 1
- time.sleep(random.randint(1, 5) / 10000)
-
- threads = []
- for _ in range(5): # numthreads
- threads.append(Process(target=inc))
- for t in threads:
- t.start()
- for t in threads:
- t.join()
- assert st['cnt'] == 250
-
- def test_torch(self):
- try:
- import torch
- except ImportError:
- # pass by default if no torch available
- return
-
- st = SharedTable({'a': torch.FloatTensor([1]), 'b': torch.LongTensor(2)})
- assert st['a'][0] == 1.0
- assert len(st) == 2
- assert 'b' in st
- del st['b']
- assert 'b' not in st
- assert len(st) == 1
-
- if torch.cuda.is_available():
- st = SharedTable(
- {'a': torch.cuda.FloatTensor([1]), 'b': torch.cuda.LongTensor(2)}
- )
- assert st['a'][0] == 1.0
- assert len(st) == 2
- assert 'b' in st
- del st['b']
- assert 'b' not in st
- assert len(st) == 1
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/tests/test_transformers.py b/tests/test_transformers.py
--- a/tests/test_transformers.py
+++ b/tests/test_transformers.py
@@ -289,7 +289,6 @@ def test_beamsearch(self):
learningrate=7e-3,
batchsize=32,
num_epochs=10,
- numthreads=1,
n_layers=1,
n_heads=1,
ffn_size=32,
@@ -518,7 +517,6 @@ def test_badinput(self):
batchsize=10,
datatype='train:ordered:stream',
num_epochs=1,
- numthreads=1,
no_cuda=True,
embedding_size=16,
skip_generation=True,
| Retire hogwild
Pytorch 1.6 is coming out. Test it with the new hogwild. It will almost certainly fail.
Then delete all of the hogwild code. Everywhere. RIP.
| 2020-08-02T03:33:31Z | [] | [] |
|
facebookresearch/ParlAI | 2,941 | facebookresearch__ParlAI-2941 | [
"2912"
] | 0fe83071f796c73930c79761d14d7b27070baced | diff --git a/parlai/core/worlds.py b/parlai/core/worlds.py
--- a/parlai/core/worlds.py
+++ b/parlai/core/worlds.py
@@ -1110,8 +1110,10 @@ def __init__(self, opt: Opt, world: Union[DialogPartnerWorld, MultiWorld]):
def reset(self):
super().reset()
+ self._task_acts = [None for _ in range(self._BUFFER_SIZE)]
self._obs = [None for _ in range(self._BUFFER_SIZE)]
self._scores = [None for _ in range(self._BUFFER_SIZE)]
+ self.acts = [None, None]
self.number_parleys = 0
self.total_exs = 0
@@ -1183,6 +1185,12 @@ def parley(self):
self.worlds[i].parley_init()
act = self.worlds[i].get_task_agent().act()
+
+ # we log the task act and the index of the act
+ # in the buffer for world logging purposes
+ self._task_acts[i] = act # for world logging
+ self._task_acts[i]['dyn_batch_idx'] = i
+
obs = self.worlds[i].get_model_agent().observe(act)
self._obs[i] = obs
@@ -1245,15 +1253,20 @@ def parley(self):
# great, this batch is good to go! let's run it!
acts = self.world.get_model_agent().batch_act([self._obs[i] for i in batch])
+ self.acts = [[self._task_acts[i] for i in batch], acts]
# broadcast the results back to all the models
for i, act in zip(batch, acts):
# we need to make sure that the teachers saw the result
self.worlds[i].get_task_agent().observe(act)
# and that the agent copies saw their own voice
self.worlds[i].get_model_agent().self_observe(act)
-
# move these worlds forward
act = self.worlds[i].get_task_agent().act()
+ # we log the task act and the index of the act
+ # in the buffer for world logging purposes
+ self._task_acts[i] = act
+ self._task_acts[i]['dyn_batch_idx'] = i
+ # save the observations to form a batch
obs = self.worlds[i].get_model_agent().observe(act)
self._scores[i] = self._score(obs)
self._obs[i] = obs
diff --git a/parlai/utils/world_logging.py b/parlai/utils/world_logging.py
--- a/parlai/utils/world_logging.py
+++ b/parlai/utils/world_logging.py
@@ -8,7 +8,7 @@
Useful utilities for logging actions/observations in a world.
"""
-from parlai.core.worlds import BatchWorld
+from parlai.core.worlds import BatchWorld, DynamicBatchWorld
from parlai.utils.misc import msg_to_str
from parlai.utils.conversations import Conversations
import parlai.utils.logging as logging
@@ -81,15 +81,21 @@ def _add_episode(self, episode):
self._logs.append(episode)
def _is_batch_world(self, world):
- return isinstance(world, BatchWorld) and len(world.worlds) > 1
+ return (
+ isinstance(world, BatchWorld) or isinstance(world, DynamicBatchWorld)
+ ) and len(world.worlds) > 1
def _log_batch(self, world):
batch_act = world.get_acts()
parleys = zip(*batch_act)
for i, parley in enumerate(parleys):
- self._add_msgs(parley, idx=i)
- if world.worlds[i].episode_done():
- self.reset_world(idx=i)
+ # in dynamic batching, we only return `batchsize` acts, but the
+ # 'dyn_batch_idx' key in the task act corresponds the episode index
+ # in the buffer
+ idx = parley[0]['dyn_batch_idx'] if 'dyn_batch_idx' in parley[0] else i
+ self._add_msgs(parley, idx=idx)
+ if world.worlds[idx].episode_done():
+ self.reset_world(idx=idx)
def log(self, world):
"""
@@ -140,6 +146,7 @@ def write_parlai_format(self, outfile):
fw.write('\n')
def write_conversations_format(self, outfile, world):
+ logging.info(f'Saving log to {outfile} in Conversations format')
Conversations.save_conversations(
self._logs,
outfile,
| diff --git a/tests/test_dynamicbatching.py b/tests/test_dynamicbatching.py
--- a/tests/test_dynamicbatching.py
+++ b/tests/test_dynamicbatching.py
@@ -4,12 +4,14 @@
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
-from typing import Dict, Any
-
-import unittest
from parlai.core.opt import Opt
-import parlai.utils.testing as testing_utils
from parlai.tasks.integration_tests.agents import NUM_TEST, EXAMPLE_SIZE
+from parlai.utils.conversations import Conversations
+import parlai.utils.testing as testing_utils
+
+import os
+from typing import Dict, Any
+import unittest
_TASK = 'integration_tests:variable_length'
@@ -97,6 +99,34 @@ def test_multiworld_stream(self):
datatype='train:stream',
)
+ def test_world_logging(self):
+ with testing_utils.tempdir() as tmpdir:
+ save_report = os.path.join(tmpdir, 'report')
+ testing_utils.eval_model(
+ dict(
+ model_file='zoo:unittest/transformer_generator2/model',
+ task='integration_tests:multiturn_candidate',
+ save_world_logs=True,
+ report_filename=save_report,
+ truncate=1024,
+ dynamic_batching='full',
+ batchsize=4,
+ )
+ )
+ convo_fle = (
+ str(save_report)
+ + '_integration_tests:multiturn_candidate_replies.jsonl'
+ )
+ convos = Conversations(convo_fle)
+ for convo in convos:
+ self.assertEquals(len(convo), 2 * 4) # each episode is 4 turns
+ # now assert that they are all from the same dynamic batch index
+ dyn_batch_idx = convo[0]['dyn_batch_idx']
+ for i, turn in enumerate(convo):
+ if i % 2 == 0 and i > 0:
+ # we log the batch index in the teacher acts only
+ self.assertEquals(dyn_batch_idx, turn['dyn_batch_idx'])
+
def test_weird_batchsize(self):
# intentionally a difficult number
self._test_correct_processed(NUM_TEST, batchsize=7)
| World logging not working with eval_model.py with dynamic batching
Use this to open other questions or issues, and provide context here.
When I tried `multiprocessing_eval.py --dynamic-batching full`, it gave me this error `AttributeError: 'DynamicBatchWorld' object has no attribute 'acts'`. It seems dynamic batching is not supported in `world_logging`?
| 2020-08-05T16:45:43Z | [] | [] |
|
facebookresearch/ParlAI | 3,140 | facebookresearch__ParlAI-3140 | [
"3139"
] | d2283004c72222392da404313eb0390fad9f90af | diff --git a/parlai/core/teachers.py b/parlai/core/teachers.py
--- a/parlai/core/teachers.py
+++ b/parlai/core/teachers.py
@@ -63,6 +63,15 @@
import argparse
+ERROR_MESSAGE_NO_DATAFILE = (
+ "{class_name} is expected to set self.opt['datafile'] inside `__init__` "
+ "before calling `super().__init__`. This will passed to setup_data, "
+ "indicating what data to load. If you don't know what to use, set "
+ "`opt['datafile'] = parlai.utils.data.DatatypeHelper.fold(opt['datatype'])` "
+ "to receive the fold name in setup_data."
+)
+
+
ChunkOutput = TypeVar('ChunkOutput')
@@ -539,6 +548,10 @@ def __init__(self, opt, shared=None):
if shared and shared.get('data'):
self.data = data_class(opt, shared=shared['data'], **kwargs)
else:
+ if 'datafile' not in self.opt:
+ raise KeyError(
+ ERROR_MESSAGE_NO_DATAFILE.format(class_name=self.__class__.__name__)
+ )
self.data = data_class(
opt,
data_loader=self.setup_data,
@@ -548,6 +561,26 @@ def __init__(self, opt, shared=None):
self.reset()
+ @abstractmethod
+ def setup_data(self, datafile: str):
+ """
+ The core method which the user should override.
+
+ Yields the data, one message at a time, as well as markers indicating
+ new episodes.
+
+ :param str datafile:
+ If the initializer set a 'datafile' field within the initalization,
+ this will be provided here. Otherwise, datafile will be the fold:
+ either "train", "valid", or "test".
+
+ :return:
+ Yields pairs (message, new_episode) containing a Message object
+ and whether the message marks the beginning of a totally new
+ episode.
+ """
+ pass
+
def reset(self):
"""
Reset the dialog to the start of the epoch, reset all metrics.
@@ -675,6 +708,12 @@ def __init__(self, opt, data_loader=None, cands=None, shared=None, **kwargs):
else:
self.image_loader = ImageLoader(opt)
self.data = []
+
+ if 'datafile' not in opt:
+ raise KeyError(
+ ERROR_MESSAGE_NO_DATAFILE.format(class_name=self.__class__.__name__)
+ )
+
self._load(data_loader, opt['datafile'])
self.cands = None if cands is None else set(c for c in cands)
@@ -893,6 +932,10 @@ def __init__(self, opt, data_loader=None, cands=None, shared=None, **kwargs):
else:
# main instance holds the stream and shares pointer to it
self.data_loader = data_loader
+ if 'datafile' not in opt:
+ raise KeyError(
+ ERROR_MESSAGE_NO_DATAFILE.format(class_name=self.__class__.__name__)
+ )
self.datafile = opt['datafile']
self.reset_data = None
self.is_reset = True
@@ -903,8 +946,8 @@ def __init__(self, opt, data_loader=None, cands=None, shared=None, **kwargs):
self.rank = get_rank()
self.num_workers = num_workers()
- self.is_distributed_and_is_eval = self.num_workers > 1 and any(
- x in opt['datatype'] for x in ('valid', 'test', 'train:evalmode')
+ self.is_distributed_and_is_eval = (
+ self.num_workers > 1 and not DatatypeHelper.is_training(opt['datatype'])
)
def share(self):
@@ -1578,7 +1621,7 @@ def __init__(self, opt, shared=None):
self.task = opt['task'].split(':')[1] if ':' in opt['task'] else opt['task']
self.data_path = self.get_data_path(opt)
self.data = self.load_data(self.data_path, self.opt)
- self.datatype = opt.get('datatype').split(':')[0]
+ self.datatype = DatatypeHelper.fold(opt['datatype'])
# Example of available models: 'resnet152', 'resnext101_32x48d_wsl',
# and ImageLoader supports other resnet and resnext models too
@@ -1758,7 +1801,7 @@ def load_data(self, data_path, opt):
Can be override by subclass.
"""
- dt = opt['datatype'].split(':')[0]
+ dt = DatatypeHelper.fold(opt['datatype'])
# Sometimes file is named "val" instead of "valid"
if dt not in ['train', 'valid', 'val', 'test']:
@@ -1956,7 +1999,7 @@ def __init__(self, opt: Opt, shared=None):
self.tasks.extend(create_task_agent_from_taskname(opt_singletask))
self.task_idx = -1
self.new_task = True
- self.random = opt.get('datatype') == 'train'
+ self.random = DatatypeHelper.should_shuffle(opt['datatype'])
# Make multi-task task probabilities.
self.cum_task_weights = [1] * len(self.tasks)
self.task_choices = range(len(self.tasks))
@@ -2135,7 +2178,7 @@ def __init__(self, opt, shared=None):
def _get_data_folder(self):
if not self.opt.get('datafile'):
raise RuntimeError(
- 'Must specify datafile or override this function '
+ 'Must specify datafile or override this function (_get_data_folder) '
'to return the data folder.'
)
diff --git a/parlai/utils/data.py b/parlai/utils/data.py
--- a/parlai/utils/data.py
+++ b/parlai/utils/data.py
@@ -13,6 +13,21 @@ class DatatypeHelper:
Helper class to determine properties from datatype strings.
"""
+ @classmethod
+ def fold(cls, datatype: str) -> str:
+ """
+ Extract the fold part of the datatype.
+
+ :param datatype:
+ parlai datatype
+
+ :return: the fold
+
+ >>> DatatypeHelper.fold("train:ordered")
+ ... "train"
+ """
+ return datatype.split(':')[0]
+
@classmethod
def should_cycle(cls, datatype: str) -> bool:
"""
| diff --git a/tests/test_teachers.py b/tests/test_teachers.py
--- a/tests/test_teachers.py
+++ b/tests/test_teachers.py
@@ -369,12 +369,30 @@ def setup_data(self, datafile):
yield Message({'text': str(j), 'label': str(j * 2)}), j == 1
+class NoDatafileTeacher(DialogTeacher):
+ def setup_data(self, datafile):
+ yield Message({'text': datafile, 'label': datafile}), True
+
+
class ViolationTeacher(_MockTeacher):
def setup_data(self, datafile):
yield {'text': 'foo', 'episode_done': True}, True
class TestDialogTeacher(unittest.TestCase):
+ def test_nodatafile(self):
+ for dt in [
+ 'train:ordered',
+ 'train:stream:ordered',
+ 'valid',
+ 'test',
+ 'valid:stream',
+ 'test:stream',
+ ]:
+ opt = Opt({'datatype': dt, 'datapath': '/tmp', 'task': 'test'})
+ with self.assertRaises(KeyError):
+ NoDatafileTeacher(opt)
+
def _verify_act(self, act, goal_text, goal_label, episode_done):
assert 'eval_labels' in act or 'labels' in act
labels = act.get('labels', act.get('eval_labels'))
diff --git a/tests/test_utils.py b/tests/test_utils.py
--- a/tests/test_utils.py
+++ b/tests/test_utils.py
@@ -10,6 +10,7 @@
from copy import deepcopy
import time
import unittest
+from parlai.utils.data import DatatypeHelper
class TestUtils(unittest.TestCase):
@@ -139,5 +140,57 @@ def test_uppercase(self):
assert string_utils.uppercase("tEst") == "TEst"
+class TestDatatypeHelper(unittest.TestCase):
+ def test_fold(self):
+ assert DatatypeHelper.fold("train") == "train"
+ assert DatatypeHelper.fold("train:ordered") == "train"
+ assert DatatypeHelper.fold("train:stream") == "train"
+ assert DatatypeHelper.fold("train:stream:ordered") == "train"
+ assert DatatypeHelper.fold("train:evalmode") == "train"
+ assert DatatypeHelper.fold("train:stream:evalmode") == "train"
+
+ assert DatatypeHelper.fold("valid") == "valid"
+ assert DatatypeHelper.fold("valid:stream") == "valid"
+
+ assert DatatypeHelper.fold("test") == "test"
+ assert DatatypeHelper.fold("test:stream") == "test"
+
+ def test_should_cycle(self):
+ assert DatatypeHelper.should_cycle("train") is True
+ assert DatatypeHelper.should_cycle("train:evalmode") is False
+ assert DatatypeHelper.should_cycle("train:ordered") is False
+ assert DatatypeHelper.should_cycle("train:stream") is True
+
+ assert DatatypeHelper.should_cycle("valid") is False
+ assert DatatypeHelper.should_cycle("valid:stream") is False
+
+ assert DatatypeHelper.should_cycle("test") is False
+ assert DatatypeHelper.should_cycle("test:stream") is False
+
+ def test_should_shuffle(self):
+ assert DatatypeHelper.should_shuffle("train") is True
+ assert DatatypeHelper.should_shuffle("train:evalmode") is False
+ assert DatatypeHelper.should_shuffle("train:ordered") is False
+ assert DatatypeHelper.should_shuffle("train:stream") is False
+
+ assert DatatypeHelper.should_shuffle("valid") is False
+ assert DatatypeHelper.should_shuffle("valid:stream") is False
+
+ assert DatatypeHelper.should_shuffle("test") is False
+ assert DatatypeHelper.should_shuffle("test:stream") is False
+
+ def test_is_training(self):
+ assert DatatypeHelper.is_training("train") is True
+ assert DatatypeHelper.is_training("train:evalmode") is False
+ assert DatatypeHelper.is_training("train:ordered") is True
+ assert DatatypeHelper.is_training("train:stream") is True
+
+ assert DatatypeHelper.is_training("valid") is False
+ assert DatatypeHelper.is_training("valid:stream") is False
+
+ assert DatatypeHelper.is_training("test") is False
+ assert DatatypeHelper.is_training("test:stream") is False
+
+
if __name__ == '__main__':
unittest.main()
| Document that when subclassing DialogTeacher `opt["datafile"]` needs to be populated
Documentation should indicate that `opt["datafile"]` needs to be populated (if `shared` is not) before calling `__init__.super()` when subclassing DialogTeacher as the current error/traceback doesn't clearly indicate what the issue is (example below) without diving deeply into the code.
Alternatively catch this in DialogTeacher and produce a more meaningful error, or provide a default in DialogTeacher if `opt["datafile"]` isn't set.
```
Traceback (most recent call last):
File ".../parlai/core/worlds.py", line 1211, in _create_task_agents
task_agents = my_module.create_agents(opt) # type: ignore
AttributeError: module 'parlai_fb.tasks.work_in_progress.agents' has no attribute 'create_agents'
During handling of the above exception, another exception occurred:
...
<snip>
...
super().__init__(opt, shared)
File ".../parlai/core/teachers.py", line 546, in __init__
**kwargs,
File ".../parlai/core/teachers.py", line 678, in __init__
self._load(data_loader, opt['datafile'])
KeyError: 'datafile'
```
| 2020-10-01T04:04:44Z | [] | [] |
|
facebookresearch/ParlAI | 3,196 | facebookresearch__ParlAI-3196 | [
"3182"
] | 080b30068fff4e02fd73f4d2a8dbbef24a3575f1 | diff --git a/parlai/core/script.py b/parlai/core/script.py
--- a/parlai/core/script.py
+++ b/parlai/core/script.py
@@ -126,6 +126,9 @@ class _SupercommandParser(ParlaiParser):
"""
def __init__(self, *args, **kwargs):
+ # used to target help messages more correctly, see GH #3182
+ self._help_subparser = None
+
from parlai.utils.strings import colorize
logo = ""
@@ -147,6 +150,22 @@ def add_extra_args(self, args):
for _, v in sa.choices.items():
v.add_extra_args(args)
+ def parse_known_args(self, args=None, namespace=None, nohelp=False):
+ known, unused = super().parse_known_args(args, namespace, nohelp)
+ if hasattr(known, '_subparser'):
+ # keep this around to keep the print message more in tune
+ self._help_subparser = known._subparser
+ return known, unused
+
+ def print_help(self):
+ """
+ Print help, possibly deferring to the appropriate subcommand.
+ """
+ if self._help_subparser:
+ self._help_subparser.print_help()
+ else:
+ return super().print_help()
+
def add_subparsers(self, **kwargs):
return super().add_subparsers(**kwargs)
| diff --git a/tests/test_script.py b/tests/test_script.py
--- a/tests/test_script.py
+++ b/tests/test_script.py
@@ -92,8 +92,17 @@ def test_help(self):
script.superscript_main(args=['help'])
assert 'test_script' in output.getvalue()
assert 'hidden_script' not in output.getvalue()
+ # showing help for the super command, not the subcommand
+ assert '--foo' not in output.getvalue()
with testing_utils.capture_output() as output:
script.superscript_main(args=['helpall'])
assert 'test_script' in output.getvalue()
assert 'hidden_script' in output.getvalue()
+
+ def test_subcommand_help(self):
+ with testing_utils.capture_output() as output:
+ with self.assertRaises(SystemExit):
+ script.superscript_main(args=['test_script', 'foo'])
+ assert 'parlai test_script' in output.getvalue()
+ assert '--foo' in output.getvalue()
| Help incorrectly shows when flag isn't recognized
**Bug description**
```
$ parlai i -mf /checkpoint/parlai/zoo/
q_function/generative2.7B_bst_0331/model --display-fi 1
usage: parlai [-h] [--helpall] [--version] COMMAND ...
_
/")
//)
==//'=== ParlAI
/
optional arguments:
-h, --help show this help message and exit
--helpall List all commands, including advanced ones.
--version Prints version info and exit.
Commands:
display_data (dd) Display data from a task
display_model (dm) Display model predictions.
eval_model (em, eval) Evaluate a model
train_model (tm, train) Train a model
interactive (i) Interactive chat with a model on the command line
safe_interactive Like interactive, but adds a safety filter
self_chat Generate self-chats of a model
Parse Error: unrecognized arguments: --display-fi 1
```
Note that the parlai supercommand help shows, and not the interactive.
| 2020-10-14T04:20:45Z | [] | [] |
|
facebookresearch/ParlAI | 3,347 | facebookresearch__ParlAI-3347 | [
"3255"
] | 23595e563a5271c52b60c32d24936e80391a2523 | diff --git a/parlai/core/metrics.py b/parlai/core/metrics.py
--- a/parlai/core/metrics.py
+++ b/parlai/core/metrics.py
@@ -12,7 +12,7 @@
from collections import Counter
import functools
import datetime
-from typing import Union, List, Optional, Tuple, Set, Any, Dict
+from typing import Union, List, Optional, Tuple, Set, Any, Dict, Counter as TCounter
import torch
@@ -23,7 +23,13 @@
DEFAULT_METRICS = {'bleu-4', 'accuracy', 'f1'}
ROUGE_METRICS = {'rouge-1', 'rouge-2', 'rouge-L'}
BLEU_METRICS = {'bleu-1', 'bleu-2', 'bleu-3', 'bleu-4'}
-ALL_METRICS = DEFAULT_METRICS | ROUGE_METRICS | BLEU_METRICS
+DISTINCT_METRICS = {
+ 'interdistinct-1',
+ 'interdistinct-2',
+ 'intradistinct-1',
+ 'intradistinct-2',
+}
+ALL_METRICS = DEFAULT_METRICS | ROUGE_METRICS | BLEU_METRICS | DISTINCT_METRICS
try:
@@ -523,6 +529,60 @@ def compute_many(
)
+class IntraDistinctMetric(AverageMetric):
+ """
+ Compute intra-distinct (per-utterance).
+ """
+
+ @classmethod
+ def _ngram(cls, seq, n: int):
+ for i in range(len(seq) - n + 1):
+ yield tuple(seq[i : i + n])
+
+ @classmethod
+ def compute(cls, text: str, ngram: int = 1):
+ """
+ :param text:
+ The text to compute metric over
+ :param ngram:
+ n-gram length
+ """
+ tokens = normalize_answer(text).split()
+ counts = Counter(cls._ngram(tokens, ngram))
+ # computed per-example, macro averaged across examples
+ intra = max(len(counts), 1e-12) / max(sum(counts.values()), 1e-5)
+ return IntraDistinctMetric(intra, 1.0)
+
+
+class InterDistinctMetric(Metric):
+ """
+ Compute inter-distinct metric over corpus-level.
+ """
+
+ def __init__(self, counts: TCounter[Tuple]):
+ """
+ :param counts:
+ collections.Counter of ngram -> frequency
+ """
+ self._counts = counts
+
+ def __add__(self, other):
+ return InterDistinctMetric(self._counts + other._counts)
+
+ def value(self):
+ return max(len(self._counts), 1e-12) / max(sum(self._counts.values()), 1e-5)
+
+ @classmethod
+ def _ngram(cls, seq, n):
+ for i in range(len(seq) - n + 1):
+ yield tuple(seq[i : i + n])
+
+ @classmethod
+ def compute(cls, text, ngram=1):
+ tokens = normalize_answer(text).split()
+ return InterDistinctMetric(Counter(cls._ngram(tokens, ngram)))
+
+
def normalize_answer(s):
"""
Lower text and remove punctuation, articles and extra whitespace.
@@ -679,6 +739,8 @@ def _infer_metrics(cli_arg: str) -> Set[str]:
col |= ROUGE_METRICS
elif n == 'bleu':
col |= BLEU_METRICS
+ elif n == 'distinct':
+ col |= DISTINCT_METRICS
elif n == 'all':
col |= ALL_METRICS
else:
@@ -732,6 +794,16 @@ def evaluate_response(self, observation: Message, labels: List[str]) -> None:
self.add('rouge_2', r2)
if 'rouge-L' in self._metrics_list and rL:
self.add('rouge_L', rL)
+ # compute distinct-k
+ for k in [1, 2]:
+ if f'interdistinct-{k}' in self._metrics_list:
+ self.add(
+ f'interdistinct-{k}', InterDistinctMetric.compute(prediction, k)
+ )
+ if f'intradistinct-{k}' in self._metrics_list:
+ self.add(
+ f'intradistinct-{k}', IntraDistinctMetric.compute(prediction, k)
+ )
# Ranking metrics.
self._update_ranking_metrics(observation, labels)
| diff --git a/tests/test_metrics.py b/tests/test_metrics.py
--- a/tests/test_metrics.py
+++ b/tests/test_metrics.py
@@ -18,6 +18,8 @@
TimerMetric,
aggregate_unnamed_reports,
aggregate_named_reports,
+ InterDistinctMetric,
+ IntraDistinctMetric,
)
from parlai.core.torch_classifier_agent import ConfusionMatrixMetric, WeightedF1Metric
@@ -150,7 +152,7 @@ def test_multithreaded(self):
m2.add('key', SumMetric(1))
m3.add('key', SumMetric(2))
m.add('key', SumMetric(3))
- m.report()['key'] == 6
+ assert m.report()['key'] == 6
def test_verymultithreaded(self):
# legacy test, but useful all the same, for ensuring
@@ -385,5 +387,30 @@ def test_classifier_metrics(self):
assert agg['weighted_f1'] == (0.5 + (2 / 3) * 0.5 + (4 / 7) * 0.5) / 2
+class TestDistinct(unittest.TestCase):
+ def test_inter_distinct(self):
+ # 3 n-grams, all appearing once
+ m = InterDistinctMetric.compute("this is some test", 2)
+ self.assertAlmostEqual(m, 1.0)
+ # 3-grams, each appearing twice
+ self.assertAlmostEqual(m + m, 0.5)
+
+ def test_inter_distinct_unigram(self):
+ m1 = InterDistinctMetric.compute("this test", 1)
+ self.assertAlmostEqual(m1, 1.0, delta=0.001)
+ m2 = InterDistinctMetric.compute("another test", 1)
+ self.assertAlmostEqual(m2, 1.0, delta=0.001)
+ # we now have 4 tokens, 3 words
+ self.assertAlmostEqual(m1 + m2, 3 / 4)
+
+ def test_intra_distinct(self):
+ # 4/5 are unique
+ m1 = IntraDistinctMetric.compute("this is some test test", 1)
+ self.assertAlmostEqual(m1, 4 / 5)
+ m2 = IntraDistinctMetric.compute("this test test test test", 1)
+ self.assertAlmostEqual(m2, 2 / 5)
+ self.assertAlmostEqual(m1 + m2, 3 / 5)
+
+
if __name__ == '__main__':
unittest.main()
| Could eval_model compute the distinct metrics?
Hi, I would like to compute **the distinct metrics** using the `eval_model` command.
But I saw the CLI Arguments (as follows), it likely could not support to compute **the distinct metrics**. Is it?
```
-mcs, --metrics | List of metrics to show/compute, e.g. all, default, or give a list split by, like ppl,f1, accuracy, hits@1, rouge, bleu. the rouge metrics will be computed as rouge-1, rouge-2, and rouge-l
```
| We don't have any implementation of that right now. I would welcome a PR implementing it.
Implementation would need to go here:
https://github.com/facebookresearch/ParlAI/blob/fb44a721cd5288fa097622e3d0b95e3379f29e74/parlai/core/metrics.py#L719-L735
What's the definition of distinct? Unique n-grams over generations from the entire corpus, or unique n-grams per utterance?
The distinct metric was first introduced in [A Diversity-Promoting Objective Function for Neural Conversation Models](https://arxiv.org/pdf/1510.03055.pdf), the original paper said as follows
> We report the degree of diversity by calculating the number of distinct unigrams and bigrams in generated responses. The value is scaled by the total number of generated tokens to avoid favoring long sentences (shown as distinct-1 and distinct-2 in Tables 2 and 3).
But they didn't release their implementation, we usually compute the n-grams over generations from the entire corpus (it will be very higher than the original paper if computed per utterance)
[Here](https://github.com/PaddlePaddle/models/blob/release/1.6/PaddleNLP/Research/Dialogue-PLATO/plato/metrics/metrics.py) is an implementation. I will add it to ParlAI later :(
Hm, the method I linked you to assumes metrics are computed per-utterance, independently across the corpus. Computing IntraDistinct should be relatively straightforward. InterDistinct is a bit trickier, as we need to keep the ngrams across the corpus. Something like this should work, at least when not in distributed mode.
```python
class InterDistinctMetric(Metric):
def __init__(self, counts):
self._counts = counts
def __add__(self, other):
return DistinctMetric(self._counts + other._counts)
def value(self):
return (len(self._counts) + 1e-12) / (sum(self._counts.values()) + 1e-5)
@classmethod
def _ngram(cls, seq, n):
for i in range(len(seq) - n + 1):
yield seq[i : i + n]
@classmethod
def compute(cls, text, ngram=1):
tokens = normalize_answer(text).split()
return InterDistinctMetric(Counter(cls._ngram(tokens, ngram)))
```
OK, thanks! I'll test this. | 2021-01-05T23:26:44Z | [] | [] |
facebookresearch/ParlAI | 3,627 | facebookresearch__ParlAI-3627 | [
"3626"
] | aa71230f36597771bff316d39e9b65da82747512 | diff --git a/parlai/core/torch_agent.py b/parlai/core/torch_agent.py
--- a/parlai/core/torch_agent.py
+++ b/parlai/core/torch_agent.py
@@ -1687,7 +1687,7 @@ def batchify(self, obs_batch, sort=False):
)
if any('label_truncated_length' in ex for ex in exs):
label_truncated_lengths = torch.LongTensor(
- [ex.get('label_truncated_length') for ex in exs]
+ [ex.get('label_truncated_length', 0) for ex in exs]
)
field = 'labels' if labels_avail else 'eval_labels'
diff --git a/parlai/tasks/cornell_movie/agents.py b/parlai/tasks/cornell_movie/agents.py
--- a/parlai/tasks/cornell_movie/agents.py
+++ b/parlai/tasks/cornell_movie/agents.py
@@ -4,44 +4,67 @@
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
-from parlai.core.teachers import FbDeprecatedDialogTeacher
+from parlai.core.teachers import DialogTeacher
from .build import build
from parlai.utils.data import DatatypeHelper
import copy
import os
+import codecs
-def _path(opt, filtered):
- # Build the data if it doesn't exist.
- build(opt)
- dt = opt['datatype'].split(':')[0]
- return os.path.join(opt['datapath'], 'CornellMovie', dt + filtered + '.txt')
+def _path(opt, *additions):
+ return os.path.join(opt['datapath'], 'CornellMovie', *additions)
-class DefaultTeacher(FbDeprecatedDialogTeacher):
+class DefaultTeacher(DialogTeacher):
+ DOUBLE = False
+
def __init__(self, opt, shared=None):
opt = copy.deepcopy(opt)
- opt['datafile'] = _path(opt, '')
- opt['cands_datafile'] = opt['datafile']
self.fold = DatatypeHelper.fold(opt['datatype'])
+ build(opt)
+ opt['datafile'] = _path(opt, self.fold + '.txt')
super().__init__(opt, shared)
- def num_examples(self):
- if self.fold == 'train':
- return 133125
- elif self.fold == 'valid':
- return 16759
- elif self.fold == 'test':
- return 16611
+ def setup_data(self, datafile):
+ lines_file = _path(self.opt, 'cornell movie-dialogs corpus', 'movie_lines.txt')
+ convo_file = _path(
+ self.opt, 'cornell movie-dialogs corpus', 'movie_conversations.txt'
+ )
+
+ lines = {}
+
+ codecs.register_error('strict', codecs.ignore_errors)
+ with codecs.open(lines_file, 'r') as f:
+ for line in f:
+ l = line.split(' +++$+++ ')
+ lines[l[0]] = ' '.join(l[4:]).strip('\n').replace('\t', ' ')
+
+ cnt = 0
+ with codecs.open(convo_file, 'r') as f:
+ for cnt, line in enumerate(f, 1):
+ l = line.split(' ')
+ convo = ' '.join(l[6:]).strip('\n').strip('[').strip(']')
+ c = convo.replace("'", '').replace(' ', '').split(',')
+
+ texts = [lines[l] for l in c]
+
+ if (cnt % 10 == 0) and self.fold != 'test':
+ continue
+ elif (cnt % 10 == 1) and self.fold != 'valid':
+ continue
+ elif (cnt % 10 > 1) and self.fold != 'train':
+ continue
+
+ for i, (prompt, response) in enumerate(zip(texts[::2], texts[1::2])):
+ yield {'text': prompt, 'label': response}, i == 0
- def num_episodes(self):
- if self.fold == 'train':
- return 66478
- elif self.fold == 'valid':
- return 8310
- elif self.fold == 'test':
- return 8309
+ if self.DOUBLE:
+ for i, (prompt, response) in enumerate(
+ zip(texts[1::2], texts[2::2])
+ ):
+ yield {'text': prompt, 'label': response}, i == 0
class DoubleTeacher(DefaultTeacher):
@@ -49,64 +72,4 @@ class DoubleTeacher(DefaultTeacher):
This version creates text-label pairs from the perspective of both speakers.
"""
- def num_examples(self):
- if self.fold == 'train':
- return 176975
- elif self.fold == 'valid':
- return 22349
- elif self.fold == 'test':
- return 22013
-
- def num_episodes(self):
- if self.fold == 'train':
- return 102401
- elif self.fold == 'valid':
- return 12806
- elif self.fold == 'test':
- return 12790
-
- def _rebuild(self, entries):
- new_list = []
- if len(entries) > 0:
- # add all ( y_t => x_(t+1) ) pairs
- new_list.extend(
- [
- (entries[i][1][0], [entries[i + 1][0]])
- for i in range(len(entries) - 1)
- ]
- )
- return new_list
-
- def _is_valid(self, entry):
- if entry[0] == '' or entry[1] is None:
- return False
- return True
-
- def setup_data(self, path):
- """
- Adds additional perspectives. For example, in the conversation:
-
- x1 y1
- x2 y2
- x3
-
- Creates the additional dialog:
-
- y1 x2
- y2 x3
- """
- # this shows conversations in both directions
- alternate = []
- for entry, new in super().setup_data(path):
- if new:
- for i, e in enumerate(self._rebuild(alternate)):
- if self._is_valid(e):
- yield e, i == 0
- alternate.clear()
- alternate.append(entry)
- if self._is_valid(entry):
- yield entry, new
- if alternate:
- for i, e in enumerate(self._rebuild(alternate)):
- if self._is_valid(e):
- yield e, i == 0
+ DOUBLE = True
diff --git a/parlai/tasks/cornell_movie/build.py b/parlai/tasks/cornell_movie/build.py
--- a/parlai/tasks/cornell_movie/build.py
+++ b/parlai/tasks/cornell_movie/build.py
@@ -20,55 +20,11 @@
]
-def create_fb_format(lines_file, convo_file, outpath):
- print('[building fbformat]')
- with PathManager.open(
- os.path.join(outpath, 'train.txt'), 'w'
- ) as ftrain, PathManager.open(
- os.path.join(outpath, 'valid.txt'), 'w'
- ) as fvalid, PathManager.open(
- os.path.join(outpath, 'test.txt'), 'w'
- ) as ftest:
- lines = {}
-
- codecs.register_error('strict', codecs.ignore_errors)
- with codecs.open(lines_file, 'r') as f:
- for line in f:
- l = line.split(' +++$+++ ')
- lines[l[0]] = ' '.join(l[4:]).strip('\n').replace('\t', ' ')
-
- cnt = 0
- with codecs.open(convo_file, 'r') as f:
- for line in f:
- l = line.split(' ')
- convo = ' '.join(l[6:]).strip('\n').strip('[').strip(']')
- c = convo.replace("'", '').replace(' ', '').split(',')
-
- # forward conversation
- s = ''
- index = 0
- for i in range(0, len(c), 2):
- index += 1
- s += str(index) + ' ' + lines[c[i]]
- if len(c) > i + 1:
- s += '\t' + lines[c[i + 1]]
- s += '\n'
-
- cnt = cnt + 1
- handle = ftrain
- if (cnt % 10) == 0:
- handle = ftest
- if (cnt % 10) == 1:
- handle = fvalid
- handle.write(s + '\n')
-
-
def build(opt):
dpath = os.path.join(opt['datapath'], 'CornellMovie')
- version = None
+ version = 'v1.01'
if not build_data.built(dpath, version_string=version):
- print('[building data: ' + dpath + ']')
if build_data.built(dpath):
# An older version exists, so remove these outdated files.
build_data.remove_dir(dpath)
@@ -78,12 +34,5 @@ def build(opt):
for downloadable_file in RESOURCES:
downloadable_file.download_file(dpath)
- dpext = os.path.join(dpath, 'cornell movie-dialogs corpus')
- create_fb_format(
- os.path.join(dpext, 'movie_lines.txt'),
- os.path.join(dpext, 'movie_conversations.txt'),
- dpath,
- )
-
# Mark the data as built.
build_data.mark_done(dpath, version_string=version)
| diff --git a/parlai/tasks/cornell_movie/test/cornell_movie_double_test.yml b/parlai/tasks/cornell_movie/test/cornell_movie_double_test.yml
--- a/parlai/tasks/cornell_movie/test/cornell_movie_double_test.yml
+++ b/parlai/tasks/cornell_movie/test/cornell_movie_double_test.yml
@@ -3,152 +3,28 @@ acts:
eval_labels:
- You're sweet.
id: cornell_movie:double
- label_candidates:
- - ' And he agreed?'
- - ' The only one who understands what this me-...'
- - '"...unsure whether or not Enemy Action..."'
- - '"A Reason to Love."'
- - '"A people is a detour of nature to get 6 or 7 great men - Yes, and then to
- get around them..." Nietzsche said that.'
- - '"Cora" is my part. You''ve got to tell Lloyd it''s for me.'
- - '"Debbie Does Dallas"... Hell, it''s in Russian. I can''t read it...'
- - '"Deserve" don''t mean shit, Little Bill.'
- - '"Deutchland, Deutchland ... "'
- - '"Do chickens give milk?"'
- - '[Your brother wants to talk to you.]'
- - '`Cause you know I can do other stuff. I mean, if you wanted me to talk or...'
- - and HERBERT exchange a glance. HONORA smiles at Juliet.
- - frowns at Henry.
- - how much weight have you lost?
- - kissed a lotta tadpoles. Listen, I been thinking about your problem. I'm not
- the guy to sponsor you. It would be unethical. But, there is something I could
- do for you. Putt-putt golf.
- - ooohhhhhh no.
- - xxxxxx
- - yeah, I had a bad night.
- - yeah.
- reward: 0
text: You have my word. As a gentleman
- - episode_done: false
eval_labels:
- What crap?
id: cornell_movie:double
- label_candidates:
- - ' And he agreed?'
- - ' The only one who understands what this me-...'
- - '"...unsure whether or not Enemy Action..."'
- - '"A Reason to Love."'
- - '"A people is a detour of nature to get 6 or 7 great men - Yes, and then to
- get around them..." Nietzsche said that.'
- - '"Cora" is my part. You''ve got to tell Lloyd it''s for me.'
- - '"Debbie Does Dallas"... Hell, it''s in Russian. I can''t read it...'
- - '"Deserve" don''t mean shit, Little Bill.'
- - '"Deutchland, Deutchland ... "'
- - '"Do chickens give milk?"'
- - '[Your brother wants to talk to you.]'
- - '`Cause you know I can do other stuff. I mean, if you wanted me to talk or...'
- - and HERBERT exchange a glance. HONORA smiles at Juliet.
- - frowns at Henry.
- - how much weight have you lost?
- - kissed a lotta tadpoles. Listen, I been thinking about your problem. I'm not
- the guy to sponsor you. It would be unethical. But, there is something I could
- do for you. Putt-putt golf.
- - ooohhhhhh no.
- - xxxxxx
- - yeah, I had a bad night.
- - yeah.
- reward: 0
text: do you listen to this crap?
- - episode_done: true
eval_labels:
- Thank God! If I had to hear one more story about your coiffure...
id: cornell_movie:double
- label_candidates:
- - ' And he agreed?'
- - ' The only one who understands what this me-...'
- - '"...unsure whether or not Enemy Action..."'
- - '"A Reason to Love."'
- - '"A people is a detour of nature to get 6 or 7 great men - Yes, and then to
- get around them..." Nietzsche said that.'
- - '"Cora" is my part. You''ve got to tell Lloyd it''s for me.'
- - '"Debbie Does Dallas"... Hell, it''s in Russian. I can''t read it...'
- - '"Deserve" don''t mean shit, Little Bill.'
- - '"Deutchland, Deutchland ... "'
- - '"Do chickens give milk?"'
- - '[Your brother wants to talk to you.]'
- - '`Cause you know I can do other stuff. I mean, if you wanted me to talk or...'
- - and HERBERT exchange a glance. HONORA smiles at Juliet.
- - frowns at Henry.
- - how much weight have you lost?
- - kissed a lotta tadpoles. Listen, I been thinking about your problem. I'm not
- the guy to sponsor you. It would be unethical. But, there is something I could
- do for you. Putt-putt golf.
- - ooohhhhhh no.
- - xxxxxx
- - yeah, I had a bad night.
- - yeah.
- reward: 0
text: Me. This endless ...blonde babble. I'm like, boring myself.
- - episode_done: true
eval_labels:
- Me. This endless ...blonde babble. I'm like, boring myself.
id: cornell_movie:double
- label_candidates:
- - ' And he agreed?'
- - ' The only one who understands what this me-...'
- - '"...unsure whether or not Enemy Action..."'
- - '"A Reason to Love."'
- - '"A people is a detour of nature to get 6 or 7 great men - Yes, and then to
- get around them..." Nietzsche said that.'
- - '"Cora" is my part. You''ve got to tell Lloyd it''s for me.'
- - '"Debbie Does Dallas"... Hell, it''s in Russian. I can''t read it...'
- - '"Deserve" don''t mean shit, Little Bill.'
- - '"Deutchland, Deutchland ... "'
- - '"Do chickens give milk?"'
- - '[Your brother wants to talk to you.]'
- - '`Cause you know I can do other stuff. I mean, if you wanted me to talk or...'
- - and HERBERT exchange a glance. HONORA smiles at Juliet.
- - frowns at Henry.
- - how much weight have you lost?
- - kissed a lotta tadpoles. Listen, I been thinking about your problem. I'm not
- the guy to sponsor you. It would be unethical. But, there is something I could
- do for you. Putt-putt golf.
- - ooohhhhhh no.
- - xxxxxx
- - yeah, I had a bad night.
- - yeah.
text: What crap?
- - episode_done: true
eval_labels:
- Sometimes I wonder if the guys we're supposed to want to go out with are the
ones we actually want to go out with, you know?
id: cornell_movie:double
- label_candidates:
- - ' And he agreed?'
- - ' The only one who understands what this me-...'
- - '"...unsure whether or not Enemy Action..."'
- - '"A Reason to Love."'
- - '"A people is a detour of nature to get 6 or 7 great men - Yes, and then to
- get around them..." Nietzsche said that.'
- - '"Cora" is my part. You''ve got to tell Lloyd it''s for me.'
- - '"Debbie Does Dallas"... Hell, it''s in Russian. I can''t read it...'
- - '"Deserve" don''t mean shit, Little Bill.'
- - '"Deutchland, Deutchland ... "'
- - '"Do chickens give milk?"'
- - '[Your brother wants to talk to you.]'
- - '`Cause you know I can do other stuff. I mean, if you wanted me to talk or...'
- - and HERBERT exchange a glance. HONORA smiles at Juliet.
- - frowns at Henry.
- - how much weight have you lost?
- - kissed a lotta tadpoles. Listen, I been thinking about your problem. I'm not
- the guy to sponsor you. It would be unethical. But, there is something I could
- do for you. Putt-putt golf.
- - ooohhhhhh no.
- - xxxxxx
- - yeah, I had a bad night.
- - yeah.
- reward: 0
text: Bianca, I don't think the highlights of dating Joey Dorsey are going to
include door-opening and coat-holding.
-num_episodes: 12790
-num_examples: 22013
+num_episodes: 8309
+num_examples: 13725
diff --git a/parlai/tasks/cornell_movie/test/cornell_movie_double_train.yml b/parlai/tasks/cornell_movie/test/cornell_movie_double_train.yml
--- a/parlai/tasks/cornell_movie/test/cornell_movie_double_train.yml
+++ b/parlai/tasks/cornell_movie/test/cornell_movie_double_train.yml
@@ -1,145 +1,31 @@
acts:
- - episode_done: true
id: cornell_movie:double
- label_candidates:
- - ' We have twenty-one minutes. Captain, we can still save V''ger... and
- ourselves.'
- - ' DELETE IN CUTTING'
- - ' I''ve got the real thing!'
- - ' Oh, we''re screwed, Night...'
- - ' ...Mason. I can barely hear you.'
- - ' ...checked.'
- - ' ...or it''ll ignite.'
- - ' ...shit!'
- - ' ...the night is a tunnel... a hole into tomorrow... if we''re to have a tomorrow...'
- - ' All right. Hey.'
- - xxxxxx
- - y-y-yes--
- - yeah, yes, hi, hello.
- - yeah.
- - yeah?
- - yet cannot love nor write it.
- - you'll need one more hit.
- - '{suddenly unctious) Oh, but of course ...'
- - '| It''s not working. You''re not doing it properly. Ni!'
- - ~Don't shoot, G-Men..."
labels:
- Forget it.
- reward: 0
text: You're asking me out. That's so cute. What's your name again?
- - episode_done: false
id: cornell_movie:double
- label_candidates:
- - ' We have twenty-one minutes. Captain, we can still save V''ger... and
- ourselves.'
- - ' DELETE IN CUTTING'
- - ' I''ve got the real thing!'
- - ' Oh, we''re screwed, Night...'
- - ' ...Mason. I can barely hear you.'
- - ' ...checked.'
- - ' ...or it''ll ignite.'
- - ' ...shit!'
- - ' ...the night is a tunnel... a hole into tomorrow... if we''re to have a tomorrow...'
- - ' All right. Hey.'
- - xxxxxx
- - y-y-yes--
- - yeah, yes, hi, hello.
- - yeah.
- - yeah?
- - yet cannot love nor write it.
- - you'll need one more hit.
- - '{suddenly unctious) Oh, but of course ...'
- - '| It''s not working. You''re not doing it properly. Ni!'
- - ~Don't shoot, G-Men..."
labels:
- Cameron.
- reward: 0
text: No, no, it's my fault -- we didn't have a proper introduction ---
- - episode_done: true
id: cornell_movie:double
- label_candidates:
- - ' We have twenty-one minutes. Captain, we can still save V''ger... and
- ourselves.'
- - ' DELETE IN CUTTING'
- - ' I''ve got the real thing!'
- - ' Oh, we''re screwed, Night...'
- - ' ...Mason. I can barely hear you.'
- - ' ...checked.'
- - ' ...or it''ll ignite.'
- - ' ...shit!'
- - ' ...the night is a tunnel... a hole into tomorrow... if we''re to have a tomorrow...'
- - ' All right. Hey.'
- - xxxxxx
- - y-y-yes--
- - yeah, yes, hi, hello.
- - yeah.
- - yeah?
- - yet cannot love nor write it.
- - you'll need one more hit.
- - '{suddenly unctious) Oh, but of course ...'
- - '| It''s not working. You''re not doing it properly. Ni!'
- - ~Don't shoot, G-Men..."
labels:
- Seems like she could get a date easy enough...
- reward: 0
text: The thing is, Cameron -- I'm at the mercy of a particularly hideous breed
of loser. My sister. I can't date until she does.
- - episode_done: true
id: cornell_movie:double
- label_candidates:
- - ' We have twenty-one minutes. Captain, we can still save V''ger... and
- ourselves.'
- - ' DELETE IN CUTTING'
- - ' I''ve got the real thing!'
- - ' Oh, we''re screwed, Night...'
- - ' ...Mason. I can barely hear you.'
- - ' ...checked.'
- - ' ...or it''ll ignite.'
- - ' ...shit!'
- - ' ...the night is a tunnel... a hole into tomorrow... if we''re to have a tomorrow...'
- - ' All right. Hey.'
- - xxxxxx
- - y-y-yes--
- - yeah, yes, hi, hello.
- - yeah.
- - yeah?
- - yet cannot love nor write it.
- - you'll need one more hit.
- - '{suddenly unctious) Oh, but of course ...'
- - '| It''s not working. You''re not doing it properly. Ni!'
- - ~Don't shoot, G-Men..."
labels:
- The thing is, Cameron -- I'm at the mercy of a particularly hideous breed of
loser. My sister. I can't date until she does.
text: Cameron.
- - episode_done: true
id: cornell_movie:double
- label_candidates:
- - ' We have twenty-one minutes. Captain, we can still save V''ger... and
- ourselves.'
- - ' DELETE IN CUTTING'
- - ' I''ve got the real thing!'
- - ' Oh, we''re screwed, Night...'
- - ' ...Mason. I can barely hear you.'
- - ' ...checked.'
- - ' ...or it''ll ignite.'
- - ' ...shit!'
- - ' ...the night is a tunnel... a hole into tomorrow... if we''re to have a tomorrow...'
- - ' All right. Hey.'
- - xxxxxx
- - y-y-yes--
- - yeah, yes, hi, hello.
- - yeah.
- - yeah?
- - yet cannot love nor write it.
- - you'll need one more hit.
- - '{suddenly unctious) Oh, but of course ...'
- - '| It''s not working. You''re not doing it properly. Ni!'
- - ~Don't shoot, G-Men..."
labels:
- Unsolved mystery. She used to be really popular when she started high school,
then it was just like she got sick of it or something.
- reward: 0
text: Why?
-num_episodes: 102401
-num_examples: 176975
+num_episodes: 66478
+num_examples: 110496
diff --git a/parlai/tasks/cornell_movie/test/cornell_movie_double_valid.yml b/parlai/tasks/cornell_movie/test/cornell_movie_double_valid.yml
--- a/parlai/tasks/cornell_movie/test/cornell_movie_double_valid.yml
+++ b/parlai/tasks/cornell_movie/test/cornell_movie_double_valid.yml
@@ -3,167 +3,28 @@ acts:
eval_labels:
- Well, I thought we'd start with pronunciation, if that's okay with you.
id: cornell_movie:double
- label_candidates:
- - ' Hey, actually...'
- - ' ...-onds...with just a fraction of what''s in...'
- - ' No...'
- - ' That''s why I didn''t give it...'
- - ' Yeah...'
- - '"A faithful heart makes wishes come true."'
- - '"Age of Enlightenment". Shit. Like some waitress in a Las Vegas coffee shop
- is going to get an obscure French philosophical reference. How demeaning. I
- may as well have just said "Let me jump your ignorant bones."...'
- - '"Are not capable of love" is what you mean.'
- - '"At first I thought it was infatuation ...But oh it''s lasted so long..."'
- - '"At the center of the investigation are well-known Washington-area attorneys
- Robert Dean and Rachel Banks."'
- - think that's, uh-
- - uh uh.
- - wHAT'S My status?! I've lost three men and your worthless fuck! After I kill
- this asshole I'm coming your Yllo!
- - was fascinated with you.... a real live detective.... You used to tell me
- the most wonderful stories. Were they true?
- - we've got kidnapping, grand theft auto, burglary, and two counts of murder on
- you, and I'm gonna see to it personally that it sticks!
- - xxxxxx
- - yeah, good, ok.
- - you woke me up to tell me that?
- - you're gonna like night clubs, they're really a lotta fun.
- - '{baffled) I don''t follow you.'
- reward: 0
text: Can we make this quick? Roxanne Korrine and Andrew Barrett are having an
incredibly horrendous public break- up on the quad. Again.
- - episode_done: true
eval_labels:
- Okay... then how 'bout we try out some French cuisine. Saturday? Night?
id: cornell_movie:double
- label_candidates:
- - ' Hey, actually...'
- - ' ...-onds...with just a fraction of what''s in...'
- - ' No...'
- - ' That''s why I didn''t give it...'
- - ' Yeah...'
- - '"A faithful heart makes wishes come true."'
- - '"Age of Enlightenment". Shit. Like some waitress in a Las Vegas coffee shop
- is going to get an obscure French philosophical reference. How demeaning. I
- may as well have just said "Let me jump your ignorant bones."...'
- - '"Are not capable of love" is what you mean.'
- - '"At first I thought it was infatuation ...But oh it''s lasted so long..."'
- - '"At the center of the investigation are well-known Washington-area attorneys
- Robert Dean and Rachel Banks."'
- - think that's, uh-
- - uh uh.
- - wHAT'S My status?! I've lost three men and your worthless fuck! After I kill
- this asshole I'm coming your Yllo!
- - was fascinated with you.... a real live detective.... You used to tell me
- the most wonderful stories. Were they true?
- - we've got kidnapping, grand theft auto, burglary, and two counts of murder on
- you, and I'm gonna see to it personally that it sticks!
- - xxxxxx
- - yeah, good, ok.
- - you woke me up to tell me that?
- - you're gonna like night clubs, they're really a lotta fun.
- - '{baffled) I don''t follow you.'
- reward: 0
text: Not the hacking and gagging and spitting part. Please.
- - episode_done: true
eval_labels:
- Not the hacking and gagging and spitting part. Please.
id: cornell_movie:double
- label_candidates:
- - ' Hey, actually...'
- - ' ...-onds...with just a fraction of what''s in...'
- - ' No...'
- - ' That''s why I didn''t give it...'
- - ' Yeah...'
- - '"A faithful heart makes wishes come true."'
- - '"Age of Enlightenment". Shit. Like some waitress in a Las Vegas coffee shop
- is going to get an obscure French philosophical reference. How demeaning. I
- may as well have just said "Let me jump your ignorant bones."...'
- - '"Are not capable of love" is what you mean.'
- - '"At first I thought it was infatuation ...But oh it''s lasted so long..."'
- - '"At the center of the investigation are well-known Washington-area attorneys
- Robert Dean and Rachel Banks."'
- - think that's, uh-
- - uh uh.
- - wHAT'S My status?! I've lost three men and your worthless fuck! After I kill
- this asshole I'm coming your Yllo!
- - was fascinated with you.... a real live detective.... You used to tell me
- the most wonderful stories. Were they true?
- - we've got kidnapping, grand theft auto, burglary, and two counts of murder on
- you, and I'm gonna see to it personally that it sticks!
- - xxxxxx
- - yeah, good, ok.
- - you woke me up to tell me that?
- - you're gonna like night clubs, they're really a lotta fun.
- - '{baffled) I don''t follow you.'
text: Well, I thought we'd start with pronunciation, if that's okay with you.
- - episode_done: true
eval_labels:
- Eber's Deep Conditioner every two days. And I never, ever use a blowdryer without
the diffuser attachment.
id: cornell_movie:double
- label_candidates:
- - ' Hey, actually...'
- - ' ...-onds...with just a fraction of what''s in...'
- - ' No...'
- - ' That''s why I didn''t give it...'
- - ' Yeah...'
- - '"A faithful heart makes wishes come true."'
- - '"Age of Enlightenment". Shit. Like some waitress in a Las Vegas coffee shop
- is going to get an obscure French philosophical reference. How demeaning. I
- may as well have just said "Let me jump your ignorant bones."...'
- - '"Are not capable of love" is what you mean.'
- - '"At first I thought it was infatuation ...But oh it''s lasted so long..."'
- - '"At the center of the investigation are well-known Washington-area attorneys
- Robert Dean and Rachel Banks."'
- - think that's, uh-
- - uh uh.
- - wHAT'S My status?! I've lost three men and your worthless fuck! After I kill
- this asshole I'm coming your Yllo!
- - was fascinated with you.... a real live detective.... You used to tell me
- the most wonderful stories. Were they true?
- - we've got kidnapping, grand theft auto, burglary, and two counts of murder on
- you, and I'm gonna see to it personally that it sticks!
- - xxxxxx
- - yeah, good, ok.
- - you woke me up to tell me that?
- - you're gonna like night clubs, they're really a lotta fun.
- - '{baffled) I don''t follow you.'
- reward: 0
text: How do you get your hair to look like that?
- - episode_done: false
eval_labels:
- What good stuff?
id: cornell_movie:double
- label_candidates:
- - ' Hey, actually...'
- - ' ...-onds...with just a fraction of what''s in...'
- - ' No...'
- - ' That''s why I didn''t give it...'
- - ' Yeah...'
- - '"A faithful heart makes wishes come true."'
- - '"Age of Enlightenment". Shit. Like some waitress in a Las Vegas coffee shop
- is going to get an obscure French philosophical reference. How demeaning. I
- may as well have just said "Let me jump your ignorant bones."...'
- - '"Are not capable of love" is what you mean.'
- - '"At first I thought it was infatuation ...But oh it''s lasted so long..."'
- - '"At the center of the investigation are well-known Washington-area attorneys
- Robert Dean and Rachel Banks."'
- - think that's, uh-
- - uh uh.
- - wHAT'S My status?! I've lost three men and your worthless fuck! After I kill
- this asshole I'm coming your Yllo!
- - was fascinated with you.... a real live detective.... You used to tell me
- the most wonderful stories. Were they true?
- - we've got kidnapping, grand theft auto, burglary, and two counts of murder on
- you, and I'm gonna see to it personally that it sticks!
- - xxxxxx
- - yeah, good, ok.
- - you woke me up to tell me that?
- - you're gonna like night clubs, they're really a lotta fun.
- - '{baffled) I don''t follow you.'
- reward: 0
text: I figured you'd get to the good stuff eventually.
-num_episodes: 12806
-num_examples: 22349
+num_episodes: 8310
+num_examples: 13914
diff --git a/parlai/tasks/cornell_movie/test/cornell_movie_test.yml b/parlai/tasks/cornell_movie/test/cornell_movie_test.yml
--- a/parlai/tasks/cornell_movie/test/cornell_movie_test.yml
+++ b/parlai/tasks/cornell_movie/test/cornell_movie_test.yml
@@ -3,127 +3,29 @@ acts:
eval_labels:
- You're sweet.
id: cornell_movie
- label_candidates:
- - ' And he agreed?'
- - ' The only one who understands what this me-...'
- - '"...unsure whether or not Enemy Action..."'
- - '"A Reason to Love."'
- - '"A people is a detour of nature to get 6 or 7 great men - Yes, and then to
- get around them..." Nietzsche said that.'
- - '"Cora" is my part. You''ve got to tell Lloyd it''s for me.'
- - '"Debbie Does Dallas"... Hell, it''s in Russian. I can''t read it...'
- - '"Deserve" don''t mean shit, Little Bill.'
- - '"Deutchland, Deutchland ... "'
- - '"Do chickens give milk?"'
- - '[Your brother wants to talk to you.]'
- - '`Cause you know I can do other stuff. I mean, if you wanted me to talk or...'
- - and HERBERT exchange a glance. HONORA smiles at Juliet.
- - frowns at Henry.
- - how much weight have you lost?
- - kissed a lotta tadpoles. Listen, I been thinking about your problem. I'm not
- the guy to sponsor you. It would be unethical. But, there is something I could
- do for you. Putt-putt golf.
- - ooohhhhhh no.
- - xxxxxx
- - yeah, I had a bad night.
- - yeah.
- reward: 0
text: You have my word. As a gentleman
- - episode_done: false
eval_labels:
- What crap?
id: cornell_movie
- label_candidates:
- - ' And he agreed?'
- - ' The only one who understands what this me-...'
- - '"...unsure whether or not Enemy Action..."'
- - '"A Reason to Love."'
- - '"A people is a detour of nature to get 6 or 7 great men - Yes, and then to
- get around them..." Nietzsche said that.'
- - '"Cora" is my part. You''ve got to tell Lloyd it''s for me.'
- - '"Debbie Does Dallas"... Hell, it''s in Russian. I can''t read it...'
- - '"Deserve" don''t mean shit, Little Bill.'
- - '"Deutchland, Deutchland ... "'
- - '"Do chickens give milk?"'
- - '[Your brother wants to talk to you.]'
- - '`Cause you know I can do other stuff. I mean, if you wanted me to talk or...'
- - and HERBERT exchange a glance. HONORA smiles at Juliet.
- - frowns at Henry.
- - how much weight have you lost?
- - kissed a lotta tadpoles. Listen, I been thinking about your problem. I'm not
- the guy to sponsor you. It would be unethical. But, there is something I could
- do for you. Putt-putt golf.
- - ooohhhhhh no.
- - xxxxxx
- - yeah, I had a bad night.
- - yeah.
- reward: 0
text: do you listen to this crap?
- - episode_done: true
eval_labels:
- Thank God! If I had to hear one more story about your coiffure...
id: cornell_movie
- label_candidates:
- - ' And he agreed?'
- - ' The only one who understands what this me-...'
- - '"...unsure whether or not Enemy Action..."'
- - '"A Reason to Love."'
- - '"A people is a detour of nature to get 6 or 7 great men - Yes, and then to
- get around them..." Nietzsche said that.'
- - '"Cora" is my part. You''ve got to tell Lloyd it''s for me.'
- - '"Debbie Does Dallas"... Hell, it''s in Russian. I can''t read it...'
- - '"Deserve" don''t mean shit, Little Bill.'
- - '"Deutchland, Deutchland ... "'
- - '"Do chickens give milk?"'
- - '[Your brother wants to talk to you.]'
- - '`Cause you know I can do other stuff. I mean, if you wanted me to talk or...'
- - and HERBERT exchange a glance. HONORA smiles at Juliet.
- - frowns at Henry.
- - how much weight have you lost?
- - kissed a lotta tadpoles. Listen, I been thinking about your problem. I'm not
- the guy to sponsor you. It would be unethical. But, there is something I could
- do for you. Putt-putt golf.
- - ooohhhhhh no.
- - xxxxxx
- - yeah, I had a bad night.
- - yeah.
- reward: 0
text: Me. This endless ...blonde babble. I'm like, boring myself.
-- - episode_done: false
+- - episode_done: true
eval_labels:
- Sometimes I wonder if the guys we're supposed to want to go out with are the
ones we actually want to go out with, you know?
id: cornell_movie
- label_candidates:
- - ' And he agreed?'
- - ' The only one who understands what this me-...'
- - '"...unsure whether or not Enemy Action..."'
- - '"A Reason to Love."'
- - '"A people is a detour of nature to get 6 or 7 great men - Yes, and then to
- get around them..." Nietzsche said that.'
- - '"Cora" is my part. You''ve got to tell Lloyd it''s for me.'
- - '"Debbie Does Dallas"... Hell, it''s in Russian. I can''t read it...'
- - '"Deserve" don''t mean shit, Little Bill.'
- - '"Deutchland, Deutchland ... "'
- - '"Do chickens give milk?"'
- - '[Your brother wants to talk to you.]'
- - '`Cause you know I can do other stuff. I mean, if you wanted me to talk or...'
- - and HERBERT exchange a glance. HONORA smiles at Juliet.
- - frowns at Henry.
- - how much weight have you lost?
- - kissed a lotta tadpoles. Listen, I been thinking about your problem. I'm not
- the guy to sponsor you. It would be unethical. But, there is something I could
- do for you. Putt-putt golf.
- - ooohhhhhh no.
- - xxxxxx
- - yeah, I had a bad night.
- - yeah.
- reward: 0
text: Bianca, I don't think the highlights of dating Joey Dorsey are going to
include door-opening and coat-holding.
- - episode_done: true
+ eval_labels:
+ - Can you at least start wearing a bra?
id: cornell_movie
- reward: 0
- text: All I know is -- I'd give up my private line to go out with a guy like Joey.
+ text: I have the potential to smack the crap out of you if you don't get out of
+ my way.
num_episodes: 8309
-num_examples: 16611
+num_examples: 13725
diff --git a/parlai/tasks/cornell_movie/test/cornell_movie_train.yml b/parlai/tasks/cornell_movie/test/cornell_movie_train.yml
--- a/parlai/tasks/cornell_movie/test/cornell_movie_train.yml
+++ b/parlai/tasks/cornell_movie/test/cornell_movie_train.yml
@@ -1,121 +1,30 @@
acts:
- - episode_done: true
id: cornell_movie
- label_candidates:
- - ' We have twenty-one minutes. Captain, we can still save V''ger... and
- ourselves.'
- - ' DELETE IN CUTTING'
- - ' I''ve got the real thing!'
- - ' Oh, we''re screwed, Night...'
- - ' ...Mason. I can barely hear you.'
- - ' ...checked.'
- - ' ...or it''ll ignite.'
- - ' ...shit!'
- - ' ...the night is a tunnel... a hole into tomorrow... if we''re to have a tomorrow...'
- - ' All right. Hey.'
- - xxxxxx
- - y-y-yes--
- - yeah, yes, hi, hello.
- - yeah.
- - yeah?
- - yet cannot love nor write it.
- - you'll need one more hit.
- - '{suddenly unctious) Oh, but of course ...'
- - '| It''s not working. You''re not doing it properly. Ni!'
- - ~Don't shoot, G-Men..."
labels:
- Forget it.
- reward: 0
text: You're asking me out. That's so cute. What's your name again?
- - episode_done: false
id: cornell_movie
- label_candidates:
- - ' We have twenty-one minutes. Captain, we can still save V''ger... and
- ourselves.'
- - ' DELETE IN CUTTING'
- - ' I''ve got the real thing!'
- - ' Oh, we''re screwed, Night...'
- - ' ...Mason. I can barely hear you.'
- - ' ...checked.'
- - ' ...or it''ll ignite.'
- - ' ...shit!'
- - ' ...the night is a tunnel... a hole into tomorrow... if we''re to have a tomorrow...'
- - ' All right. Hey.'
- - xxxxxx
- - y-y-yes--
- - yeah, yes, hi, hello.
- - yeah.
- - yeah?
- - yet cannot love nor write it.
- - you'll need one more hit.
- - '{suddenly unctious) Oh, but of course ...'
- - '| It''s not working. You''re not doing it properly. Ni!'
- - ~Don't shoot, G-Men..."
labels:
- Cameron.
- reward: 0
text: No, no, it's my fault -- we didn't have a proper introduction ---
- - episode_done: true
id: cornell_movie
- label_candidates:
- - ' We have twenty-one minutes. Captain, we can still save V''ger... and
- ourselves.'
- - ' DELETE IN CUTTING'
- - ' I''ve got the real thing!'
- - ' Oh, we''re screwed, Night...'
- - ' ...Mason. I can barely hear you.'
- - ' ...checked.'
- - ' ...or it''ll ignite.'
- - ' ...shit!'
- - ' ...the night is a tunnel... a hole into tomorrow... if we''re to have a tomorrow...'
- - ' All right. Hey.'
- - xxxxxx
- - y-y-yes--
- - yeah, yes, hi, hello.
- - yeah.
- - yeah?
- - yet cannot love nor write it.
- - you'll need one more hit.
- - '{suddenly unctious) Oh, but of course ...'
- - '| It''s not working. You''re not doing it properly. Ni!'
- - ~Don't shoot, G-Men..."
labels:
- Seems like she could get a date easy enough...
- reward: 0
text: The thing is, Cameron -- I'm at the mercy of a particularly hideous breed
of loser. My sister. I can't date until she does.
-- - episode_done: false
+- - episode_done: true
id: cornell_movie
- label_candidates:
- - ' We have twenty-one minutes. Captain, we can still save V''ger... and
- ourselves.'
- - ' DELETE IN CUTTING'
- - ' I''ve got the real thing!'
- - ' Oh, we''re screwed, Night...'
- - ' ...Mason. I can barely hear you.'
- - ' ...checked.'
- - ' ...or it''ll ignite.'
- - ' ...shit!'
- - ' ...the night is a tunnel... a hole into tomorrow... if we''re to have a tomorrow...'
- - ' All right. Hey.'
- - xxxxxx
- - y-y-yes--
- - yeah, yes, hi, hello.
- - yeah.
- - yeah?
- - yet cannot love nor write it.
- - you'll need one more hit.
- - '{suddenly unctious) Oh, but of course ...'
- - '| It''s not working. You''re not doing it properly. Ni!'
- - ~Don't shoot, G-Men..."
labels:
- Unsolved mystery. She used to be really popular when she started high school,
then it was just like she got sick of it or something.
- reward: 0
text: Why?
- - episode_done: true
id: cornell_movie
- reward: 0
- text: That's a shame.
+ labels:
+ - Let me see what I can do.
+ text: Gosh, if only we could find Kat a boyfriend...
num_episodes: 66478
-num_examples: 133125
+num_examples: 110496
diff --git a/parlai/tasks/cornell_movie/test/cornell_movie_valid.yml b/parlai/tasks/cornell_movie/test/cornell_movie_valid.yml
--- a/parlai/tasks/cornell_movie/test/cornell_movie_valid.yml
+++ b/parlai/tasks/cornell_movie/test/cornell_movie_valid.yml
@@ -3,168 +3,28 @@ acts:
eval_labels:
- Well, I thought we'd start with pronunciation, if that's okay with you.
id: cornell_movie
- label_candidates:
- - ' Hey, actually...'
- - ' ...-onds...with just a fraction of what''s in...'
- - ' No...'
- - ' That''s why I didn''t give it...'
- - ' Yeah...'
- - '"A faithful heart makes wishes come true."'
- - '"Age of Enlightenment". Shit. Like some waitress in a Las Vegas coffee shop
- is going to get an obscure French philosophical reference. How demeaning. I
- may as well have just said "Let me jump your ignorant bones."...'
- - '"Are not capable of love" is what you mean.'
- - '"At first I thought it was infatuation ...But oh it''s lasted so long..."'
- - '"At the center of the investigation are well-known Washington-area attorneys
- Robert Dean and Rachel Banks."'
- - think that's, uh-
- - uh uh.
- - wHAT'S My status?! I've lost three men and your worthless fuck! After I kill
- this asshole I'm coming your Yllo!
- - was fascinated with you.... a real live detective.... You used to tell me
- the most wonderful stories. Were they true?
- - we've got kidnapping, grand theft auto, burglary, and two counts of murder on
- you, and I'm gonna see to it personally that it sticks!
- - xxxxxx
- - yeah, good, ok.
- - you woke me up to tell me that?
- - you're gonna like night clubs, they're really a lotta fun.
- - '{baffled) I don''t follow you.'
- reward: 0
text: Can we make this quick? Roxanne Korrine and Andrew Barrett are having an
incredibly horrendous public break- up on the quad. Again.
- - episode_done: true
eval_labels:
- Okay... then how 'bout we try out some French cuisine. Saturday? Night?
id: cornell_movie
- label_candidates:
- - ' Hey, actually...'
- - ' ...-onds...with just a fraction of what''s in...'
- - ' No...'
- - ' That''s why I didn''t give it...'
- - ' Yeah...'
- - '"A faithful heart makes wishes come true."'
- - '"Age of Enlightenment". Shit. Like some waitress in a Las Vegas coffee shop
- is going to get an obscure French philosophical reference. How demeaning. I
- may as well have just said "Let me jump your ignorant bones."...'
- - '"Are not capable of love" is what you mean.'
- - '"At first I thought it was infatuation ...But oh it''s lasted so long..."'
- - '"At the center of the investigation are well-known Washington-area attorneys
- Robert Dean and Rachel Banks."'
- - think that's, uh-
- - uh uh.
- - wHAT'S My status?! I've lost three men and your worthless fuck! After I kill
- this asshole I'm coming your Yllo!
- - was fascinated with you.... a real live detective.... You used to tell me
- the most wonderful stories. Were they true?
- - we've got kidnapping, grand theft auto, burglary, and two counts of murder on
- you, and I'm gonna see to it personally that it sticks!
- - xxxxxx
- - yeah, good, ok.
- - you woke me up to tell me that?
- - you're gonna like night clubs, they're really a lotta fun.
- - '{baffled) I don''t follow you.'
- reward: 0
text: Not the hacking and gagging and spitting part. Please.
- - episode_done: true
eval_labels:
- Eber's Deep Conditioner every two days. And I never, ever use a blowdryer without
the diffuser attachment.
id: cornell_movie
- label_candidates:
- - ' Hey, actually...'
- - ' ...-onds...with just a fraction of what''s in...'
- - ' No...'
- - ' That''s why I didn''t give it...'
- - ' Yeah...'
- - '"A faithful heart makes wishes come true."'
- - '"Age of Enlightenment". Shit. Like some waitress in a Las Vegas coffee shop
- is going to get an obscure French philosophical reference. How demeaning. I
- may as well have just said "Let me jump your ignorant bones."...'
- - '"Are not capable of love" is what you mean.'
- - '"At first I thought it was infatuation ...But oh it''s lasted so long..."'
- - '"At the center of the investigation are well-known Washington-area attorneys
- Robert Dean and Rachel Banks."'
- - think that's, uh-
- - uh uh.
- - wHAT'S My status?! I've lost three men and your worthless fuck! After I kill
- this asshole I'm coming your Yllo!
- - was fascinated with you.... a real live detective.... You used to tell me
- the most wonderful stories. Were they true?
- - we've got kidnapping, grand theft auto, burglary, and two counts of murder on
- you, and I'm gonna see to it personally that it sticks!
- - xxxxxx
- - yeah, good, ok.
- - you woke me up to tell me that?
- - you're gonna like night clubs, they're really a lotta fun.
- - '{baffled) I don''t follow you.'
- reward: 0
text: How do you get your hair to look like that?
- - episode_done: false
eval_labels:
- What good stuff?
id: cornell_movie
- label_candidates:
- - ' Hey, actually...'
- - ' ...-onds...with just a fraction of what''s in...'
- - ' No...'
- - ' That''s why I didn''t give it...'
- - ' Yeah...'
- - '"A faithful heart makes wishes come true."'
- - '"Age of Enlightenment". Shit. Like some waitress in a Las Vegas coffee shop
- is going to get an obscure French philosophical reference. How demeaning. I
- may as well have just said "Let me jump your ignorant bones."...'
- - '"Are not capable of love" is what you mean.'
- - '"At first I thought it was infatuation ...But oh it''s lasted so long..."'
- - '"At the center of the investigation are well-known Washington-area attorneys
- Robert Dean and Rachel Banks."'
- - think that's, uh-
- - uh uh.
- - wHAT'S My status?! I've lost three men and your worthless fuck! After I kill
- this asshole I'm coming your Yllo!
- - was fascinated with you.... a real live detective.... You used to tell me
- the most wonderful stories. Were they true?
- - we've got kidnapping, grand theft auto, burglary, and two counts of murder on
- you, and I'm gonna see to it personally that it sticks!
- - xxxxxx
- - yeah, good, ok.
- - you woke me up to tell me that?
- - you're gonna like night clubs, they're really a lotta fun.
- - '{baffled) I don''t follow you.'
- reward: 0
text: I figured you'd get to the good stuff eventually.
- - episode_done: true
eval_labels:
- Like my fear of wearing pastels?
id: cornell_movie
- label_candidates:
- - ' Hey, actually...'
- - ' ...-onds...with just a fraction of what''s in...'
- - ' No...'
- - ' That''s why I didn''t give it...'
- - ' Yeah...'
- - '"A faithful heart makes wishes come true."'
- - '"Age of Enlightenment". Shit. Like some waitress in a Las Vegas coffee shop
- is going to get an obscure French philosophical reference. How demeaning. I
- may as well have just said "Let me jump your ignorant bones."...'
- - '"Are not capable of love" is what you mean.'
- - '"At first I thought it was infatuation ...But oh it''s lasted so long..."'
- - '"At the center of the investigation are well-known Washington-area attorneys
- Robert Dean and Rachel Banks."'
- - think that's, uh-
- - uh uh.
- - wHAT'S My status?! I've lost three men and your worthless fuck! After I kill
- this asshole I'm coming your Yllo!
- - was fascinated with you.... a real live detective.... You used to tell me
- the most wonderful stories. Were they true?
- - we've got kidnapping, grand theft auto, burglary, and two counts of murder on
- you, and I'm gonna see to it personally that it sticks!
- - xxxxxx
- - yeah, good, ok.
- - you woke me up to tell me that?
- - you're gonna like night clubs, they're really a lotta fun.
- - '{baffled) I don''t follow you.'
- reward: 0
text: The "real you".
num_episodes: 8310
-num_examples: 16759
+num_examples: 13914
| Fail to train any agent with conell_movie dataset
For example, when I train transformer/generator with cornell_movie dataset by command:
`
parlai train_model -t cornell_movie -m transformer/generator -mf ./data/models/transformer_generator/model_cornell_movie_lr1 -gpu 0 -bs 64 -eps 11 -sval True -veps 1 -lr 1
`
The following error occurred:
`
17:54:42 | Current ParlAI commit: 2426d74b93184689be5067bdbf99f1ba96748f7b
17:54:43 | creating task(s): cornell_movie
17:54:43 | loading fbdialog data: /data4/lulc/ben/util/ParlAI/data/CornellMovie/train.txt
17:54:44 | training...
Traceback (most recent call last):
File "/data4/lulc/anaconda3/envs/BEN/bin/parlai", line 33, in <module>
sys.exit(load_entry_point('parlai', 'console_scripts', 'parlai')())
File "/data4/lulc/ben/util/ParlAI/parlai/__main__.py", line 14, in main
superscript_main()
File "/data4/lulc/ben/util/ParlAI/parlai/core/script.py", line 306, in superscript_main
return SCRIPT_REGISTRY[cmd].klass._run_from_parser_and_opt(opt, parser)
File "/data4/lulc/ben/util/ParlAI/parlai/core/script.py", line 89, in _run_from_parser_and_opt
return script.run()
File "/data4/lulc/ben/util/ParlAI/parlai/scripts/train_model.py", line 935, in run
return self.train_loop.train()
File "/data4/lulc/ben/util/ParlAI/parlai/scripts/train_model.py", line 899, in train
for _train_log in self.train_steps():
File "/data4/lulc/ben/util/ParlAI/parlai/scripts/train_model.py", line 802, in train_steps
world.parley()
File "/data4/lulc/ben/util/ParlAI/parlai/core/worlds.py", line 865, in parley
batch_act = self.batch_act(agent_idx, batch_observations[agent_idx])
File "/data4/lulc/ben/util/ParlAI/parlai/core/worlds.py", line 833, in batch_act
batch_actions = a.batch_act(batch_observation)
File "/data4/lulc/ben/util/ParlAI/parlai/core/torch_agent.py", line 2131, in batch_act
batch = self.batchify(observations)
File "/data4/lulc/ben/util/ParlAI/parlai/core/torch_generator_agent.py", line 658, in batchify
batch = super().batchify(obs_batch, sort=sort)
File "/data4/lulc/ben/util/ParlAI/parlai/core/torch_agent.py", line 1690, in batchify
[ex.get('label_truncated_length') for ex in exs]
TypeError: an integer is required (got type NoneType)
`
Actually, any attempts on command line parameters have failed, like '--truncate', '--text-truncate', or '--label-truncate'.
And any agents training with this dataset have failed either.
So, what is the correct way to train cornell_movie dataset?
Thanks~
| 2021-04-28T12:48:14Z | [] | [] |
|
facebookresearch/ParlAI | 3,718 | facebookresearch__ParlAI-3718 | [
"3648"
] | 3cd86465cf73bb9e90a8f9cfbb3e46fcd525b366 | diff --git a/parlai/scripts/eval_model.py b/parlai/scripts/eval_model.py
--- a/parlai/scripts/eval_model.py
+++ b/parlai/scripts/eval_model.py
@@ -118,13 +118,27 @@ def _save_eval_stats(opt, report):
f.write("\n") # for jq
+def get_task_world_logs(task, world_logs, is_multitask=False):
+ if not is_multitask:
+ return world_logs
+ else:
+ base_outfile, extension = os.path.splitext(world_logs)
+ return f'{base_outfile}_{task}{extension}'
+
+
def _eval_single_world(opt, agent, task):
logging.info(f'Evaluating task {task} using datatype {opt.get("datatype")}.')
# set up world logger
- world_logger = WorldLogger(opt) if opt['world_logs'] else None
-
task_opt = opt.copy() # copy opt since we're editing the task
task_opt['task'] = task
+ # add task suffix in case of multi-tasking
+ if opt['world_logs']:
+ task_opt['world_logs'] = get_task_world_logs(
+ task, task_opt['world_logs'], is_multitask=len(opt['task'].split(',')) > 1
+ )
+
+ world_logger = WorldLogger(task_opt) if task_opt['world_logs'] else None
+
world = create_task(task_opt, agent) # create worlds for tasks
# set up logging
@@ -161,10 +175,10 @@ def _eval_single_world(opt, agent, task):
world_logger.reset() # add final acts to logs
if is_distributed():
rank = get_rank()
- base_outfile, extension = os.path.splitext(opt['world_logs'])
+ base_outfile, extension = os.path.splitext(task_opt['world_logs'])
outfile = base_outfile + f'_{rank}' + extension
else:
- outfile = opt['world_logs']
+ outfile = task_opt['world_logs']
world_logger.write(outfile, world, file_format=opt['save_format'])
report = aggregate_unnamed_reports(all_gather_list(world.report()))
| diff --git a/tests/test_eval_model.py b/tests/test_eval_model.py
--- a/tests/test_eval_model.py
+++ b/tests/test_eval_model.py
@@ -8,6 +8,7 @@
import pytest
import unittest
import parlai.utils.testing as testing_utils
+from parlai.scripts.eval_model import get_task_world_logs
class TestEvalModel(unittest.TestCase):
@@ -227,6 +228,34 @@ def test_save_report(self):
json_lines = f.readlines()
assert len(json_lines) == 100
+ def test_save_multiple_logs(self):
+ """
+ Test that we can save multiple world_logs from eval model on multiple tasks.
+ """
+ with testing_utils.tempdir() as tmpdir:
+ log_report = os.path.join(tmpdir, 'world_logs.jsonl')
+ save_report = os.path.join(tmpdir, 'report')
+ multitask = 'integration_tests,blended_skill_talk'
+ opt = dict(
+ task=multitask,
+ model='repeat_label',
+ datatype='valid',
+ batchsize=97,
+ num_examples=100,
+ display_examples=False,
+ world_logs=log_report,
+ report_filename=save_report,
+ )
+ valid, test = testing_utils.eval_model(opt)
+
+ for task in multitask.split(','):
+ task_log_report = get_task_world_logs(
+ task, log_report, is_multitask=True
+ )
+ with PathManager.open(task_log_report) as f:
+ json_lines = f.readlines()
+ assert len(json_lines) == 100
+
if __name__ == '__main__':
unittest.main()
| Using world-logs while evaluating a model with more than one task, only keeps the world log for the last task.
Trying to evaluate one mode with two tasks, while keeping the model outputs in world logs, I noticed that there was no result from one of the tasks in the world log. To reproduce this one may try running
```
parlai eval_model -t wizard_of_wikipedia,babi \
--world-logs /some/path/world-log \
--num-examples 1 --model repeat_label
```
Running this, there is only a single line in the `world-log.json` file. Checking the file you can see `"id": "babi:Task1k:1"` which may mean that parlai is generating separate world log files for each task, but assumes same name for all of them and writes over the previous ones.
| This issue has not had activity in 30 days. Please feel free to reopen if you have more issues. You may apply the "never-stale" tag to prevent this from happening. | 2021-06-14T22:41:04Z | [] | [] |
facebookresearch/ParlAI | 4,140 | facebookresearch__ParlAI-4140 | [
"4121"
] | 06ac02db712b93aa7a86cd5bb5d5b48db2b8c1dd | diff --git a/parlai/scripts/interactive_web.py b/parlai/scripts/interactive_web.py
--- a/parlai/scripts/interactive_web.py
+++ b/parlai/scripts/interactive_web.py
@@ -25,6 +25,8 @@
import json
import time
+from parlai.agents.local_human.local_human import LocalHumanAgent
+
HOST_NAME = 'localhost'
PORT = 8080
@@ -266,14 +268,14 @@ def wait():
def interactive_web(opt):
global SHARED
- opt['task'] = 'parlai.agents.local_human.local_human:LocalHumanAgent'
+ human_agent = LocalHumanAgent(opt)
# Create model and assign it to the specified task
agent = create_agent(opt, requireModelExists=True)
agent.opt.log()
SHARED['opt'] = agent.opt
SHARED['agent'] = agent
- SHARED['world'] = create_task(SHARED.get('opt'), SHARED['agent'])
+ SHARED['world'] = create_task(SHARED.get('opt'), [human_agent, SHARED['agent']])
MyHandler.protocol_version = 'HTTP/1.0'
httpd = HTTPServer((opt['host'], opt['port']), MyHandler)
| diff --git a/tests/test_interactive.py b/tests/test_interactive.py
--- a/tests/test_interactive.py
+++ b/tests/test_interactive.py
@@ -85,7 +85,7 @@ def _run_test_repeat(self, tmpdir: str, fake_input: FakeInput):
class TestInteractiveWeb(unittest.TestCase):
- def test_iweb(self):
+ def test_iweb(self, task: str = None):
import threading
import random
import requests
@@ -93,10 +93,11 @@ def test_iweb(self):
import parlai.scripts.interactive_web as iweb
port = random.randint(30000, 40000)
+ kwargs = {'model': 'repeat_query', 'port': port}
+ if task:
+ kwargs['task'] = task
thread = threading.Thread(
- target=iweb.InteractiveWeb.main,
- kwargs={'model': 'repeat_query', 'port': port},
- daemon=True,
+ target=iweb.InteractiveWeb.main, kwargs=kwargs, daemon=True
)
thread.start()
iweb.wait()
@@ -123,6 +124,9 @@ def test_iweb(self):
iweb.shutdown()
+ def test_iweb_task(self):
+ self.test_iweb(task='convai2')
+
class TestProfileInteractive(unittest.TestCase):
def test_profile_interactive(self):
| RuntimeError on BB2 during interactivce_web mode with blended_skill_talk task
**Bug description**
RuntimeError: There must be exactly two agents for this world.
Getting 'RuntimeError' error when trying to run **interactive_web** with BB2 and --task blended_skill_talk
**Reproduction steps**
```
python -m parlai.scripts.interactive_web --host 0.0.0.0 --task blended_skill_talk --model-file zoo:blenderbot2/blenderbot2_400M/model --search_server 'http://36.135.119.125:1111'
```
**Logs**
```
4:14:22 WARN | Overriding opt["task"] to blended_skill_talk (previously: None)
14:14:22 WARN | Overriding opt["model_file"] to /home/sam/ParlAI/data/models/blenderbot2/blenderbot2_400M/model (previously: /checkpoint/kshust
er/projects/knowledge_bot/kbot_memfix_sweep25_Fri_Jul__9/338/model.oss)
14:14:22 WARN | Overriding opt["search_server"] to http://36.135.119.125:1111 (previously: None)
14:14:22 INFO | loading dictionary from /home/sam/ParlAI/data/models/blenderbot2/blenderbot2_400M/model.dict
14:14:22 INFO | num words = 50264
14:14:22 INFO | BlenderBot2Fid: full interactive mode on.
14:14:34 INFO | Creating the search engine retriever.
Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertModel: ['cls.predictions.transform.dense.weight', `
'cls.seq_relationship.bias', 'cls.seq_relationship.weight', 'cls.predictions.bias', 'cls.predictions.decoder.weight', 'cls.predictions.transfo
rm.LayerNorm.bias', 'cls.predictions.transform.dense.bias', 'cls.predictions.transform.LayerNorm.weight']
- This IS expected if you are initializing BertModel from the checkpoint of a model trained on another task or with another architecture (e.g.
initializing a BertForSequenceClassification model from a BertForPreTraining model).
- This IS NOT expected if you are initializing BertModel from the checkpoint of a model that you expect to be exactly identical (initializing a
BertForSequenceClassification model from a BertForSequenceClassification model).
14:14:44 INFO | Building Query Generator from file: /home/sam/ParlAI/data/models/blenderbot2/query_generator/model
14:14:50 INFO | Building Memory Decoder from file: /home/sam/ParlAI/data/models/blenderbot2/memory_decoder/model
14:14:57 INFO | Total parameters: 732,961,280 (731,781,632 trainable)
14:14:57 INFO | Loading existing model params from /home/sam/ParlAI/data/models/blenderbot2/blenderbot2_400M/model
14:14:58 INFO | Opt:
14:14:58 INFO | activation: gelu
14:14:58 INFO | adafactor_eps: '[1e-30, 0.001]'
14:14:58 INFO | adam_eps: 1e-08
14:14:58 INFO | add_p1_after_newln: False
14:14:58 INFO | allow_missing_init_opts: False
14:14:58 INFO | attention_dropout: 0.1
14:14:58 INFO | batchsize: 12
14:14:58 INFO | beam_block_full_context: False
14:14:58 INFO | beam_block_list_filename: None
14:14:58 INFO | beam_block_ngram: 3
14:14:58 INFO | beam_context_block_ngram: 3
14:14:58 INFO | beam_delay: 30
14:14:58 INFO | beam_length_penalty: 0.65
14:14:58 INFO | beam_min_length: 20
14:14:58 INFO | beam_size: 10
14:14:58 INFO | betas: '[0.9, 0.999]'
14:14:58 INFO | bpe_add_prefix_space: None
14:14:58 INFO | bpe_debug: False
14:14:58 INFO | bpe_dropout: None
14:14:58 INFO | bpe_merge: None
14:14:58 INFO | bpe_vocab: None
14:14:58 INFO | candidates: inline
14:14:58 INFO | cap_num_predictions: 100
14:14:58 INFO | checkpoint_activations: False
14:14:58 INFO | codes_attention_num_heads: 4
14:14:58 INFO | codes_attention_type: basic
14:14:58 INFO | compressed_indexer_factory: IVF4096_HNSW128,PQ128
14:14:58 INFO | compressed_indexer_gpu_train: False
14:14:58 INFO | compressed_indexer_nprobe: 64
14:14:58 INFO | compute_tokenized_bleu: False
14:14:58 INFO | converting: False
14:14:58 INFO | data_parallel: False
14:14:58 INFO | datapath: /home/sam/ParlAI/data
14:14:58 INFO | datatype: train:stream
14:14:58 INFO | delimiter: '\n'
14:14:58 INFO | dict_class: parlai.core.dict:DictionaryAgent
14:14:58 INFO | dict_endtoken: __end__
14:14:58 INFO | dict_file: /home/sam/ParlAI/data/models/blenderbot2/blenderbot2_400M/model.dict
14:14:58 INFO | dict_initpath: None
14:14:58 INFO | dict_language: english
14:14:58 INFO | dict_loaded: True
14:14:58 INFO | dict_lower: False
14:14:58 INFO | dict_max_ngram_size: -1
14:14:58 INFO | dict_maxtokens: -1
14:14:58 INFO | dict_minfreq: 0
14:14:58 INFO | dict_nulltoken: __null__
14:14:58 INFO | dict_starttoken: __start__
14:14:58 INFO | dict_textfields: text,labels
14:14:58 INFO | embedding_projection: random
14:14:58 INFO | embedding_size: 1024
14:14:58 INFO | embedding_type: random
14:14:58 INFO | embeddings_scale: True
14:14:58 INFO | encode_candidate_vecs: True
14:14:58 INFO | encode_candidate_vecs_batchsize: 256
14:14:58 INFO | eval_candidates: inline
14:14:58 INFO | ffn_size: 4096
14:14:58 INFO | fixed_candidate_vecs: reuse
14:14:58 INFO | fixed_candidates_path: None
14:14:58 INFO | force_fp16_tokens: True
14:14:58 INFO | fp16: False
14:14:58 INFO | fp16_impl: safe
14:14:58 INFO | generation_model: bart
14:14:58 INFO | gold_document_key: __selected-docs__
14:14:58 INFO | gold_document_titles_key: select-docs-titles
14:14:58 INFO | gold_knowledge_passage_key: checked_sentence
14:14:58 INFO | gold_knowledge_title_key: title
14:14:58 INFO | gold_sentence_key: __selected-sentences__
14:14:58 INFO | gpu: -1
14:14:58 INFO | gradient_clip: 0.1
14:14:58 INFO | hide_labels: False
14:14:58 INFO | history_add_global_end_token: None
14:14:58 INFO | history_reversed: False
14:14:58 INFO | history_size: -1
14:14:58 INFO | hnsw_ef_construction: 200
14:14:58 INFO | hnsw_ef_search: 128
14:14:58 INFO | hnsw_indexer_store_n: 128
14:14:58 INFO | host: 0.0.0.0
14:14:58 INFO | ignore_bad_candidates: False
14:14:58 INFO | image_cropsize: 224
14:14:58 INFO | image_mode: raw
14:14:58 INFO | image_size: 256
14:14:58 INFO | include_initial_utterances: False
14:14:58 INFO | include_personas: True
14:14:58 INFO | memory_decoder_delimiter: '\n'
14:14:58 INFO | memory_decoder_ignore_phrase: persona:
14:14:58 INFO | memory_decoder_key: full_text
14:14:58 INFO | memory_decoder_model_file: zoo:blenderbot2/memory_decoder/model
14:14:58 INFO | memory_decoder_one_line_memories: False
14:14:58 INFO | memory_decoder_truncate: -1
14:14:58 INFO | memory_doc_delimiter: :
14:14:58 INFO | memory_doc_title_delimiter: ' / '
14:14:58 INFO | memory_extractor_phrase: persona:
14:14:58 INFO | memory_key: personas
14:14:58 INFO | memory_reader_model: None
14:14:58 INFO | memory_retriever_truncate: -1
14:14:58 INFO | memory_writer_model: bert
14:14:58 INFO | memory_writer_model_file: zoo:hallucination/multiset_dpr/hf_bert_base.cp
14:14:58 INFO | min_doc_token_length: 64
14:14:58 INFO | model: projects.blenderbot2.agents.blenderbot2:BlenderBot2FidAgent
14:14:58 INFO | model_file: /home/sam/ParlAI/data/models/blenderbot2/blenderbot2_400M/model
14:14:58 INFO | model_parallel: True
14:14:58 INFO | momentum: 0
14:14:58 INFO | multitask_weights: '[3.0, 1.0, 1.0, 1.0, 3.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]'
14:14:58 INFO | mutators: None
14:14:58 INFO | n_decoder_layers: 12
14:14:58 INFO | n_docs: 5
14:14:58 INFO | n_encoder_layers: 12
14:14:58 INFO | n_extra_positions: 0
14:14:58 INFO | n_heads: 16
14:14:58 INFO | n_layers: 12
14:14:58 INFO | n_positions: 1024
14:14:58 INFO | n_ranked_doc_chunks: 1
14:14:58 INFO | n_segments: 0
14:14:58 INFO | nesterov: True
14:14:58 INFO | no_cuda: False
14:14:58 INFO | normalize_sent_emb: False
14:14:58 INFO | nus: [0.7]
14:14:58 INFO | optimizer: adamax
14:14:58 INFO | outfile:
14:14:58 INFO | output_scaling: 1.0
14:14:58 INFO | override: "{'host': '0.0.0.0', 'task': 'blended_skill_talk', 'model_file': '/home/sam/ParlAI/data/models/blenderbot2/blende
rbot2_400M/model', 'search_server': 'http://36.135.119.125:1111'}"
14:14:58 INFO | parlai_home: /private/home/kshuster/ParlAI
14:14:58 INFO | path_to_dense_embeddings: None
14:14:58 INFO | path_to_dpr_passages: zoo:hallucination/wiki_passages/psgs_w100.tsv
14:14:58 INFO | path_to_index: zoo:hallucination/wiki_index_compressed/compressed_pq
14:14:58 INFO | person_tokens: False
14:14:58 INFO | poly_attention_num_heads: 4
14:14:58 INFO | poly_attention_type: basic
14:14:58 INFO | poly_faiss_model_file: None
14:14:58 INFO | poly_n_codes: 64
14:14:58 INFO | poly_score_initial_lambda: 0.5
14:14:58 INFO | polyencoder_init_model: wikito
14:14:58 INFO | polyencoder_type: codes
14:14:58 INFO | port: 8080
14:14:58 INFO | print_docs: False
14:14:58 INFO | query_generator_beam_min_length: 2
14:14:58 INFO | query_generator_beam_size: 1
14:14:58 INFO | query_generator_delimiter: '\n'
14:14:58 INFO | query_generator_ignore_phrase: persona:
14:14:58 INFO | query_generator_inference: beam
14:14:58 INFO | query_generator_key: full_text
14:14:58 INFO | query_generator_model_file: zoo:blenderbot2/query_generator/model
14:14:58 INFO | query_generator_truncate: -1
14:14:58 INFO | query_model: bert_from_parlai_rag
14:14:58 INFO | rag_model_type: token
14:14:58 INFO | rag_query_truncate: 512
14:14:58 INFO | rag_retriever_query: full_history
14:14:58 INFO | rag_retriever_type: search_engine
14:14:58 INFO | rag_turn_discount_factor: 1.0
14:14:58 INFO | rag_turn_marginalize: doc_then_turn
14:14:58 INFO | retriever_delimiter: '\n'
14:14:58 INFO | retriever_embedding_size: 768
14:14:58 INFO | retriever_ignore_phrase: persona:
14:14:58 INFO | return_cand_scores: False
14:14:58 INFO | safe_personas_only: True
14:14:58 INFO | save_format: conversations
14:14:58 INFO | search_query_generator_beam_min_length: 2
14:14:58 INFO | search_query_generator_beam_size: 1
14:14:58 INFO | search_query_generator_inference: greedy
14:14:58 INFO | search_query_generator_model_file: zoo:blenderbot2/query_generator/model
14:14:58 INFO | search_query_generator_text_truncate: 512
14:14:58 INFO | search_server: http://36.135.119.125:1111
14:14:58 INFO | share_encoders: True
14:14:58 INFO | share_search_and_memory_query_encoder: False
14:14:58 INFO | share_word_embeddings: True
14:14:58 INFO | single_turn: False
14:14:58 INFO | skip_generation: False
14:14:58 INFO | skip_retrieval_token: no_passages_used
14:14:58 INFO | special_tok_lst: None
14:14:58 INFO | split_lines: True
14:14:58 INFO | splitted_chunk_length: 256
14:14:58 INFO | starttime: Jul09_14-09
14:14:58 INFO | t5_dropout: 0.0
14:14:58 INFO | t5_generation_config: None
14:14:58 INFO | t5_model_arch: t5-base
14:14:58 INFO | t5_model_parallel: False
14:14:58 INFO | task: blended_skill_talk
14:14:58 INFO | temperature: 1.0
14:14:58 INFO | text_truncate: 512
14:14:58 INFO | tfidf_max_doc_paragraphs: -1
14:14:58 INFO | tfidf_model_path: zoo:wikipedia_full/tfidf_retriever/model
14:14:58 INFO | thorough: False
14:14:58 INFO | topk: 10
14:14:58 INFO | topp: 0.9
14:14:58 INFO | train_predict: False
14:14:58 INFO | Current ParlAI commit: 33ce8b5b4cd40d93f22da097d5eb6ce475c56ca8
14:14:58 INFO | creating task(s): blended_skill_talk
Traceback (most recent call last):
File "/opt/conda/lib/python3.7/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/opt/conda/lib/python3.7/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/home/sam/ParlAI/parlai/scripts/interactive_web.py", line 302, in <module>
InteractiveWeb.main()
File "/home/sam/ParlAI/parlai/core/script.py", line 129, in main
return cls._run_args(None)
File "/home/sam/ParlAI/parlai/core/script.py", line 101, in _run_args
return cls._run_from_parser_and_opt(opt, parser)
File "/home/sam/ParlAI/parlai/core/script.py", line 108, in _run_from_parser_and_opt
return script.run()
File "/home/sam/ParlAI/parlai/scripts/interactive_web.py", line 298, in run
return interactive_web(self.opt)
File "/home/sam/ParlAI/parlai/scripts/interactive_web.py", line 276, in interactive_web
SHARED['world'] = create_task(SHARED.get('opt'), SHARED['agent'])
File "/home/sam/ParlAI/parlai/core/worlds.py", line 1441, in create_task
world = create_task_world(opt, user_agents, default_world=default_world)
File "/home/sam/ParlAI/parlai/core/worlds.py", line 1414, in create_task_world
return world_class(opt, task_agents + user_agents)
File "/home/sam/ParlAI/parlai/tasks/blended_skill_talk/worlds.py", line 139, in __init__
super().__init__(opt, agents, shared)
File "/home/sam/ParlAI/parlai/tasks/interactive/worlds.py", line 34, in __init__
super().__init__(opt, agents, shared)
File "/home/sam/ParlAI/parlai/core/worlds.py", line 340, in __init__
raise RuntimeError('There must be exactly two agents for this world.')
RuntimeError: There must be exactly two agents for this world.
```
**Additional context**
Running with safe interactive mode is working fine
```
python -m parlai.scripts.safe_interactive --task blended_skill_talk --model-file zoo:blender
bot2/blenderbot2_400M/model --search_server 'http://36.135.119.125:1111'
```
| 2021-11-04T14:39:33Z | [] | [] |
|
facebookresearch/ParlAI | 4,634 | facebookresearch__ParlAI-4634 | [
"4629"
] | 556048d846272405f856108b35cb35ef9f0c7a4f | diff --git a/projects/seeker/agents/seeker.py b/projects/seeker/agents/seeker.py
--- a/projects/seeker/agents/seeker.py
+++ b/projects/seeker/agents/seeker.py
@@ -81,6 +81,12 @@ def add_cmdline_args(
default=False,
help='Whether to make model act output fully serializable.',
)
+ combo_fid.add_argument(
+ '--force-skip-retrieval',
+ type='bool',
+ default=False,
+ help='If True, we force skip retrieval on any/all incoming examples',
+ )
def build_model(self) -> ComboFidModel:
"""
@@ -107,9 +113,12 @@ def batchify(self, obs_batch: List[Message], sort: bool = False) -> Batch:
batch = super().batchify(obs_batch, sort)
valid_exs = [ex for ex in obs_batch if self.is_valid(ex)]
if valid_exs:
- skip_retrieval = [
- ex.get(self.opt['skip_retrieval_key'], False) for ex in valid_exs
- ]
+ if self.opt.get('force_skip_retrieval', False):
+ skip_retrieval = [True] * len(valid_exs)
+ else:
+ skip_retrieval = [
+ ex.get(self.opt['skip_retrieval_key'], False) for ex in valid_exs
+ ]
batch.skip_retrieval_vec = torch.BoolTensor(skip_retrieval)
if any(ex.get('prior_knowledge_responses') for ex in valid_exs):
vecs, _lens = self._pad_tensor(
@@ -563,7 +572,9 @@ def observe(self, observation: Message) -> Dict[str, Message]:
for key in ['label_candidates', 'knowledge']:
# Delete unnecessarily large keys
observation.pop(key, '')
- observation['knowledge_response'] = observation.get('checked_sentence', '')
+ observation.force_set(
+ 'knowledge_response', observation.get('checked_sentence', '')
+ )
raw_observation = copy.deepcopy(observation)
# This part is *specifically* for document chunking.
| diff --git a/tests/nightly/gpu/test_seeker.py b/tests/nightly/gpu/test_seeker.py
--- a/tests/nightly/gpu/test_seeker.py
+++ b/tests/nightly/gpu/test_seeker.py
@@ -6,6 +6,7 @@
import unittest
import parlai.scripts.eval_model as ems
+from parlai.scripts.self_chat import SelfChat
import parlai.utils.testing as testing_utils
R2C2_BASE_400M = 'zoo:seeker/r2c2_base_400M/model'
@@ -111,3 +112,15 @@ def test_blenderbot(self):
'datatype': 'valid',
}
ems.EvalModel.main(**opt)
+
+
+class TestSeekerSelfChat(unittest.TestCase):
+ def test_400m(self):
+ SelfChat.main(
+ model_file='zoo:seeker/seeker_dialogue_400M/model',
+ num_self_chats=1,
+ init_opt='gen/seeker_dialogue',
+ search_decision='never',
+ search_server='none',
+ krm_force_skip_retrieval=True,
+ )
| [Seeker self chat] RuntimeError: Message already contains key `knowledge_response`. If this was intentional, please use the function `force_set(key, value)`.
Hi, I'm still trying to figure out how to run Seeker in self chat with a search engine, I'm facing the following issue.
```bash
parlai self_chat --model-file zoo:seeker/seeker_dialogue_400M/model --num-self-chats 200 --display-examples True --seed_messages_from_file="./seeds.txt" --outfile="./out" --init-opt gen/seeker_dialogue --search-server 0.0.0.0:8083
```
```
Current ParlAI commit: a84d28c4f9e9a56ddf9f8c3b124b61ac5ae991f5
Current internal commit: a84d28c4f9e9a56ddf9f8c3b124b61ac5ae991f5
Current fb commit: a84d28c4f9e9a56ddf9f8c3b124b61ac5ae991f5
```
>...
>20:40:13 | creating task(s): self_chat
[context]: Hi!
/home/gpu/ParlAI/parlai/core/torch_generator_agent.py:1728: UserWarning: __floordiv__ is deprecated, and its behavior will change in a future version of pytorch. It currently rounds toward 0 (like the 'trunc' function NOT 'floor'). This results in incorrect rounding for negative values. To keep the current behavior, use torch.div(a, b, rounding_mode='trunc'), or for actual floor division, use torch.div(a, b, rounding_mode='floor').
hyp_ids = best_idxs // voc_size
20:40:13 | Search Queries: ['conscious experiences']
20:40:13 | sending search request to http://0.0.0.0:8083
20:40:23 | URLS:
https://www.sciencedirect.com/topics/computer-science/conscious-experience
https://nobaproject.com/modules/consciousness
https://www.open.edu/openlearn/mod/resource/view.php?id=25779
https://www.verywellmind.com/what-is-consciousness-2795922
https://www.jstor.org/stable/2253868
20:40:23 | Generated knowledge: ['1. Browse Content\n2. ScienceDirect help']
Traceback (most recent call last):
File "/home/gpu/env/bin/parlai", line 33, in <module>
sys.exit(load_entry_point('parlai', 'console_scripts', 'parlai')())
File "/home/gpu/ParlAI/parlai/__main__.py", line 14, in main
superscript_main()
File "/home/gpu/ParlAI/parlai/core/script.py", line 325, in superscript_main
return SCRIPT_REGISTRY[cmd].klass._run_from_parser_and_opt(opt, parser)
File "/home/gpu/ParlAI/parlai/core/script.py", line 108, in _run_from_parser_and_opt
return script.run()
File "/home/gpu/ParlAI/parlai/scripts/self_chat.py", line 176, in run
return self_chat(self.opt)
File "/home/gpu/ParlAI/parlai/scripts/self_chat.py", line 146, in self_chat
_run_self_chat_episode(opt, world, logger)
File "/home/gpu/ParlAI/parlai/scripts/self_chat.py", line 93, in _run_self_chat_episode
world.parley()
File "/home/gpu/ParlAI/parlai/tasks/self_chat/worlds.py", line 178, in parley
self.agents[1 - i].observe(validate(self.acts[i]))
File "/home/gpu/ParlAI/projects/seeker/agents/seeker.py", line 566, in observe
observation['knowledge_response'] = observation.get('checked_sentence', '')
File "/home/gpu/ParlAI/parlai/core/message.py", line 30, in __setitem__
raise RuntimeError(
RuntimeError: Message already contains key `knowledge_response`. If this was intentional, please use the function `force_set(key, value)`.
Should I tweak the knowledge params? Or could you give me the CLI to run Seeker in self chat with a search engine please?
Thanks a lot 😇
| 2022-06-27T20:21:39Z | [] | [] |
|
VATSIM-UK/UK-Sector-File | 4,817 | VATSIM-UK__UK-Sector-File-4817 | [
"4816"
] | 6ab0ef63f3e03bb27433d1a7b9c1f6f35e537df9 | diff --git a/_data/Tools/src/api.py b/_data/Tools/src/api.py
new file mode 100644
--- /dev/null
+++ b/_data/Tools/src/api.py
@@ -0,0 +1,65 @@
+from src.util import airac, util
+
+import requests
+from bs4 import BeautifulSoup
+from loguru import logger
+import re
+
+class AipAPI:
+ def __init__(self):
+ self.airac = airac.Airac()
+ self.cycle = self.airac.cycle()
+ self.rootUrl = self.airac.url()
+
+ def parseENR4_1(self) -> dict[str,list]:
+ """Parse the AIP ENR4.1 page
+
+ Returns:
+ dict[str,list]: A dictionary containing the VOR identifier and some information about it (see example below)
+ {
+ "ADN": ["Aberdeen", "114.300", ("N057.18.37.620", "W002.16.01.950")], # name, frequency, coords
+ ...
+ }
+ """
+
+ url = self.rootUrl + "EG-ENR-4.1-en-GB.html"
+ text = requests.get(url).text
+ soup = BeautifulSoup(text, "html.parser")
+
+ # get table rows from heading
+
+ ad22 = soup.find("div", attrs={"id": "ENR-4.1"})
+ rows = list(list(ad22.children)[1].children)[1].children
+
+ outputs = {}
+
+ for row in rows:
+ name = list(list(list(row.children)[0].children)[1].children)[1].string
+ name = util.capitalise(name)
+
+ vorDmeNdb = str(list(list(row.children)[0].children)[3])
+ if re.search(r"NDB", vorDmeNdb):
+ continue # skip NDBs
+ elif re.search(r"DME", vorDmeNdb) and not re.search(r"VOR", vorDmeNdb):
+ name += " (DME)" # add DME to the name if it's only a DME
+
+ identifier = list(list(row.children)[1].children)[1].string
+
+ freq = list(list(list(row.children)[2].children)[1].children)[1].string
+ try:
+ float(freq)
+ except ValueError:
+ if identifier == "LON": # LON's frequency isn't on the AIP
+ freq = "113.600"
+ else:
+ freq = list(list(list(row.children)[2].children)[3].children)[1].string
+
+ coordA = list(list(list(row.children)[4].children)[0].children)[1].string
+ coordB = list(list(list(row.children)[4].children)[1].children)[1].string
+ coords = util.ukCoordsToSectorFile(coordA, coordB)
+
+ # logger.debug(f"{identifier} {freq} {' '.join(coords)} ; {name}")
+
+ outputs[identifier] = [name, freq, coords]
+
+ return outputs
diff --git a/_data/Tools/src/runner.py b/_data/Tools/src/runner.py
new file mode 100644
--- /dev/null
+++ b/_data/Tools/src/runner.py
@@ -0,0 +1,44 @@
+import api
+
+import argparse
+
+class Runner:
+ def __init__(self, args):
+ self.args = args
+ self.aipApi = api.AipAPI()
+
+ def readCurrentData(self, page):
+ with open(f"./../../{page}", "r") as f:
+ return f.read().split("\n")
+
+ def writeLines(self, page, data):
+ with open(f"./../../{page}", "w") as f:
+ f.write("\n".join(data))
+
+ def run(self):
+ if self.args["page"] == "ENR4.1" or self.args["page"] == "all": # ENR4.1
+ # get current data
+ currentData = self.readCurrentData("Navaids/VOR_UK.txt")
+
+ # get new data
+ newData = self.aipApi.parseENR4_1()
+
+ # compare
+ for i, line in enumerate(currentData):
+ vorID = line.split(" ")[0]
+ if vorID in newData.keys(): # only rewrite if the VOR/DME is in both the old data and the new data, otherwise existing data is kept
+ # if the VOR/DME is in both the old data and the new data, write the new data onto the old data (if the data is the same we still write, just no change will be visible because the written data is the same as the stored data)
+ dataAboutVORDME = newData[vorID]
+ currentData[i] = f"{vorID} {dataAboutVORDME[1]} {' '.join(dataAboutVORDME[2])} ; {dataAboutVORDME[0]}"
+
+ self.writeLines("Navaids/VOR_UK.txt", currentData)
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description="Parse parts of the UK eAIP, using our AIP API")
+ parser.add_argument("page", help="The part of the AIP to parse", choices=["all", "ENR4.1"])
+
+ args = vars(parser.parse_args())
+
+ runner = Runner(args)
+
+ runner.run()
diff --git a/_data/Tools/src/util/airac.py b/_data/Tools/src/util/airac.py
new file mode 100644
--- /dev/null
+++ b/_data/Tools/src/util/airac.py
@@ -0,0 +1,72 @@
+"""
+eAIP Parser
+Chris Parkinson (@chssn)
+"""
+
+# Standard Libraries
+import math
+from datetime import date, timedelta
+
+# Third Party Libraries
+from loguru import logger
+
+# Local Libraries
+
+class Airac:
+ """Class for general functions relating to AIRAC"""
+
+ def __init__(self):
+ # First AIRAC date following the last cycle length modification
+ start_date = "2020-01-02" # 2001
+ self.base_date = date.fromisoformat(str(start_date))
+ # Length of one AIRAC cycle
+ self.cycle_days = 28
+
+ self.cycles = -1
+
+ def initialise(self, date_in=0) -> int:
+ """Calculate the number of AIRAC cycles between any given date and the start date"""
+
+ if date_in:
+ input_date = date.fromisoformat(str(date_in))
+ else:
+ input_date = date.today()
+
+ # How many AIRAC cycles have occured since the start date
+ diff_cycles = (input_date - self.base_date) / timedelta(days=1)
+ # Round that number down to the nearest whole integer
+ self.cycles = math.floor(diff_cycles / self.cycle_days)
+
+ return self.cycles
+
+ def cycle(self, next_cycle:bool=False) -> str:
+ """Return the date of the current AIRAC cycle"""
+
+ if self.cycles == -1: # only initialise if not already done
+ self.cycles = self.initialise()
+
+ if next_cycle:
+ number_of_days = (self.cycles + 1) * self.cycle_days
+ else:
+ number_of_days = self.cycles * self.cycle_days
+ current_cycle = self.base_date + timedelta(days=number_of_days)
+ logger.debug("Current AIRAC Cycle is: {}", current_cycle)
+
+ return current_cycle
+
+ def url(self, next_cycle:bool=False) -> str:
+ """Return a generated URL based on the AIRAC cycle start date"""
+
+ base_url = "https://www.aurora.nats.co.uk/htmlAIP/Publications/"
+ if next_cycle:
+ # if the 'next_cycle' variable is passed, generate a URL for the next AIRAC cycle
+ base_date = self.cycle(next_cycle=True)
+ else:
+ base_date = self.cycle()
+
+ base_post_string = "-AIRAC/html/eAIP/"
+
+ formatted_url = base_url + str(base_date) + base_post_string
+ logger.debug(formatted_url)
+
+ return formatted_url
diff --git a/_data/Tools/src/util/util.py b/_data/Tools/src/util/util.py
new file mode 100644
--- /dev/null
+++ b/_data/Tools/src/util/util.py
@@ -0,0 +1,17 @@
+import re
+
+def capitalise(inp):
+ # capitalise the first letter of each word in a string
+ out = []
+ for word in inp.split(" "):
+ out.append(word[0].upper() + word[1:].lower())
+
+ return " ".join(out)
+
+def ukCoordsToSectorFile(inA, inB):
+ # convert from XXXXXX.XXN to NXXX.XX.XX.XXX etc
+ if re.match(r"[0-9]{6}(?:\.[0-9]{2}|)[N|S]", inA) is None or re.match(r"[0-9]{7}(?:\.[0-9]{2}|)[E|W]", inB) is None:
+ raise ValueError(f"Invalid coordinates provided: {inA}, {inB}")
+ outA = inA[-1] + "0" + inA[:2] + "." + inA[2:4] + "." + inA[4:6] + "." + inA[7:9].ljust(3, '0')
+ outB = inB[-1] + inB[:3] + "." + inB[3:5] + "." + inB[5:7] + "." + inB[8:10].ljust(3, '0')
+ return (outA, outB) # probably should be a tuple
| diff --git a/_data/Tools/tests/test_airac.py b/_data/Tools/tests/test_airac.py
new file mode 100644
--- /dev/null
+++ b/_data/Tools/tests/test_airac.py
@@ -0,0 +1,62 @@
+import unittest
+from datetime import date
+from src.util import airac
+
+class TestAirac(unittest.TestCase):
+ def setUp(self):
+ self.airac = airac.Airac()
+
+ def test_initialise(self):
+ self.assertEqual(self.airac.initialise("2020-01-02"), 0) # 0 date
+ self.assertEqual(self.airac.initialise("2023-07-12"), 45) # random date
+ self.assertEqual(self.airac.initialise("2023-07-19"), 46) # random date
+ self.assertEqual(self.airac.initialise("2023-08-09"), 46) # edge case
+ self.assertEqual(self.airac.initialise("2023-08-10"), 47) # edge case
+ self.assertEqual(self.airac.initialise("2028-12-20"), 116) # far edge case
+ self.assertEqual(self.airac.initialise("2028-12-21"), 117) # far edge case
+
+ def test_cycle(self):
+ # next_cycle = False
+ self.airac.initialise("2020-01-02")
+ self.assertEqual(self.airac.cycle(), date(2020, 1, 2)) # 0 date
+ self.airac.initialise("2021-05-16")
+ self.assertEqual(self.airac.cycle(), date(2021, 4, 22)) # random date
+ self.airac.initialise("2023-12-27")
+ self.assertEqual(self.airac.cycle(), date(2023, 11, 30)) # edge case
+ self.airac.initialise("2023-12-28")
+ self.assertEqual(self.airac.cycle(), date(2023, 12, 28)) # edge case
+
+ # next_cycle = True
+ self.airac.initialise("2020-01-02")
+ self.assertEqual(self.airac.cycle(next_cycle=True), date(2020, 1, 30)) # 0 date
+ self.airac.initialise("2021-05-16")
+ self.assertEqual(self.airac.cycle(next_cycle=True), date(2021, 5, 20)) # random date
+ self.airac.initialise("2023-12-27")
+ self.assertEqual(self.airac.cycle(next_cycle=True), date(2023, 12, 28)) # edge case
+ self.airac.initialise("2023-12-28")
+ self.assertEqual(self.airac.cycle(next_cycle=True), date(2024, 1, 25)) # edge case
+
+ def test_url(self):
+ # next_cycle = False
+ self.airac.initialise("2020-01-02")
+ self.assertEqual(self.airac.url(next_cycle=False), "https://www.aurora.nats.co.uk/htmlAIP/Publications/2020-01-02-AIRAC/html/eAIP/") # 0 date
+ self.airac.initialise("2021-05-16")
+ self.assertEqual(self.airac.url(next_cycle=False), "https://www.aurora.nats.co.uk/htmlAIP/Publications/2021-04-22-AIRAC/html/eAIP/") # random date
+ self.airac.initialise("2023-12-27")
+ self.assertEqual(self.airac.url(next_cycle=False), "https://www.aurora.nats.co.uk/htmlAIP/Publications/2023-11-30-AIRAC/html/eAIP/") # edge case
+ self.airac.initialise("2023-12-28")
+ self.assertEqual(self.airac.url(next_cycle=False), "https://www.aurora.nats.co.uk/htmlAIP/Publications/2023-12-28-AIRAC/html/eAIP/") # edge case
+
+ # next_cycle = True
+ self.airac.initialise("2020-01-02")
+ self.assertEqual(self.airac.url(next_cycle=True), "https://www.aurora.nats.co.uk/htmlAIP/Publications/2020-01-30-AIRAC/html/eAIP/") # 0 date
+ self.airac.initialise("2021-05-16")
+ self.assertEqual(self.airac.url(next_cycle=True), "https://www.aurora.nats.co.uk/htmlAIP/Publications/2021-05-20-AIRAC/html/eAIP/") # random date
+ self.airac.initialise("2023-12-27")
+ self.assertEqual(self.airac.url(next_cycle=True), "https://www.aurora.nats.co.uk/htmlAIP/Publications/2023-12-28-AIRAC/html/eAIP/") # edge case
+ self.airac.initialise("2023-12-28")
+ self.assertEqual(self.airac.url(next_cycle=True), "https://www.aurora.nats.co.uk/htmlAIP/Publications/2024-01-25-AIRAC/html/eAIP/") # edge case
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/_data/Tools/tests/test_api.py b/_data/Tools/tests/test_api.py
new file mode 100644
--- /dev/null
+++ b/_data/Tools/tests/test_api.py
@@ -0,0 +1,19 @@
+from src import api
+from src.util import airac
+
+import unittest
+
+class TestApi(unittest.TestCase):
+ def setUp(self):
+ self.aip = api.AipAPI()
+
+ def test_parseENR4_1(self):
+ self.aip.airac = airac.Airac()
+ self.aip.airac.initialise("2020-01-02") # test on AIRAC 2001
+ self.aip.cycle = self.aip.airac.cycle()
+ self.aip.rootUrl = self.aip.airac.url()
+ self.assertEqual(self.aip.parseENR4_1(), {'ADN': ['Aberdeen', '114.300', ('N057.18.38.000', 'W002.16.02.000')], 'BKY': ['Barkway', '116.250', ('N051.59.23.000', 'E000.03.43.000')], 'BEL': ['Belfast', '117.200', ('N054.39.40.000', 'W006.13.48.000')], 'BEN': ['Benbecula (DME)', '113.950', ('N057.28.41.000', 'W007.21.55.000')], 'BHD': ['Berry Head', '112.050', ('N050.23.55.000', 'W003.29.37.000')], 'BIG': ['Biggin', '115.100', ('N051.19.51.000', 'E000.02.05.000')], 'BNN': ['Bovingdon', '113.750', ('N051.43.34.000', 'W000.32.59.000')], 'BCN': ['Brecon', '117.450', ('N051.43.32.000', 'W003.15.47.000')], 'BPK': ['Brookmans Park', '117.500', ('N051.44.59.000', 'W000.06.24.000')], 'CLN': ['Clacton', '114.550', ('N051.50.55.000', 'E001.08.51.000')], 'CPT': ['Compton', '114.350', ('N051.29.30.000', 'W001.13.11.000')], 'DTY': ['Daventry', '116.400', ('N052.10.49.000', 'W001.06.50.000')], 'DCS': ['Dean Cross (DME)', '115.200', ('N054.43.19.000', 'W003.20.26.000')], 'DET': ['Detling', '117.300', ('N051.18.14.000', 'E000.35.50.000')], 'DVR': ['Dover (DME)', '114.950', ('N051.09.45.000', 'E001.21.33.000')], 'DUD': ['Dundonald (DME)', '115.450', ('N055.33.32.000', 'W004.36.06.000')], 'GAM': ['Gamston (DME)', '112.800', ('N053.16.53.000', 'W000.56.50.000')], 'GOW': ['Glasgow (DME)', '115.400', ('N055.52.14.000', 'W004.26.45.000')], 'GWC': ['Goodwood (DME)', '114.750', ('N050.51.19.000', 'W000.45.24.000')], 'GLO': ['Green Lowther (DME)', '109.650', ('N055.23.24.000', 'W003.44.12.000')], 'HON': ['Honiley', '113.650', ('N052.21.24.000', 'W001.39.49.000')], 'IOM': ['Isle Of Man', '112.200', ('N054.04.01.000', 'W004.45.49.000')], 'JSY': ['Jersey', '112.200', ('N049.13.16.000', 'W002.02.46.000')], 'LAM': ['Lambourne', '115.600', ('N051.38.46.000', 'E000.09.06.000')], 'LND': ['Lands End', '114.200', ('N050.08.11.000', 'W005.38.13.000')], 'LON': ['London', '113.600', ('N051.29.14.000', 'W000.28.00.000')], 'LYD': ['Lydd (DME)', '114.050', ('N050.59.59.000', 'E000.52.43.000')], 'MAC': ['Machrihanish (DME)', '116.000', ('N055.25.48.000', 'W005.39.01.000')], 'MCT': ['Manchester', '113.550', ('N053.21.25.000', 'W002.15.44.000')], 'MAY': ['Mayfield', '117.900', ('N051.01.02.000', 'E000.06.58.000')], 'MID': ['Midhurst', '114.000', ('N051.03.14.000', 'W000.37.30.000')], 'NEW': ['Newcastle (DME)', '114.250', ('N055.02.18.000', 'W001.41.54.000')], 'OCK': ['Ockham (DME)', '115.300', ('N051.18.18.000', 'W000.26.50.000')], 'OTR': ['Ottringham', '113.900', ('N053.41.54.000', 'W000.06.13.000')], 'PTH': ['Perth', '110.400', ('N056.26.33.000', 'W003.22.07.000')], 'POL': ['Pole Hill', '112.100', ('N053.44.38.000', 'W002.06.12.000')], 'SAB': ['Saint Abbs', '112.500', ('N055.54.27.000', 'W002.12.23.000')], 'SFD': ['Seaford', '117.000', ('N050.45.38.000', 'E000.07.19.000')], 'SAM': ['Southampton (DME)', '113.350', ('N050.57.19.000', 'W001.20.42.000')], 'STN': ['Stornoway', '115.100', ('N058.12.25.000', 'W006.10.59.000')], 'STU': ['Strumble', '113.100', ('N051.59.41.000', 'W005.02.25.000')], 'SUM': ['Sumburgh', '117.350', ('N059.52.44.000', 'W001.17.12.000')], 'TLA': ['Talla', '113.800', ('N055.29.57.000', 'W003.21.10.000')], 'TIR': ['Tiree', '117.700', ('N056.29.36.000', 'W006.52.32.000')], 'TNT': ['Trent', '115.700', ('N053.03.14.000', 'W001.40.12.000')], 'TRN': ['Turnberry', '117.500', ('N055.18.48.000', 'W004.47.02.000')], 'WAL': ['Wallasey', '114.100', ('N053.23.31.000', 'W003.08.04.000')], 'WIK': ['Wick', '113.600', ('N058.27.32.000', 'W003.06.01.000')]} )
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/_data/Tools/tests/test_util.py b/_data/Tools/tests/test_util.py
new file mode 100644
--- /dev/null
+++ b/_data/Tools/tests/test_util.py
@@ -0,0 +1,20 @@
+from src.util import util
+
+import unittest
+
+class TestUtil(unittest.TestCase):
+ def test_capitalise(self):
+ self.assertEqual(util.capitalise("hello world"), "Hello World") # normal
+ self.assertEqual(util.capitalise("hello world!"), "Hello World!") # punctuation
+ self.assertEqual(util.capitalise("HELLO WORLD"), "Hello World") # all caps
+ self.assertEqual(util.capitalise("hElLo"), "Hello") # random, 1 word
+
+ def test_ukCoordsToSectorFile(self):
+ with self.assertRaises(ValueError):
+ util.ukCoordsToSectorFile("ABCDEF.GHN", "ABCDEFG.HIW")
+ self.assertEqual(util.ukCoordsToSectorFile("503011.88N", "0032833.64W"), ("N050.30.11.880", "W003.28.33.640")) # random
+ self.assertEqual(util.ukCoordsToSectorFile("503011N", "0032833W"), ("N050.30.11.000", "W003.28.33.000")) # no decimal
+
+
+if __name__ == '__main__':
+ unittest.main()
| Fix incorrect VOR/NDB Coordinates
# What is the bug/error?
- Some coordinates incorrect
# What is the expected functionality?
- Coordinates are correct (note this PR is only for VOR/NDB stations, **not** NDB only stations)
# Sources
AIP
# File changes (if known)
Navaids/VOR_UK.txt, others
| 2023-07-18T06:15:20Z | [] | [] |
|
VATSIM-UK/UK-Sector-File | 4,886 | VATSIM-UK__UK-Sector-File-4886 | [
"4885"
] | a019ac2c3ad75c9832294d4c00533989abc661f5 | diff --git a/_data/Tools/src/api.py b/_data/Tools/src/api.py
--- a/_data/Tools/src/api.py
+++ b/_data/Tools/src/api.py
@@ -1,4 +1,4 @@
-from src.util import airac, util
+from util import airac, util
import requests
from bs4 import BeautifulSoup
@@ -11,13 +11,99 @@ def __init__(self):
self.cycle = self.airac.cycle()
self.rootUrl = self.airac.url()
- def parseENR4_1(self) -> dict[str,list]:
+ def parseENR3_2(self) -> dict[str,dict]:
+ """Parse the AIP ENR3.2 page
+
+ Returns:
+ dict[str,dict]: A dictionary containing the airway identifier and some information about it (see example below)
+ {
+ "L6": {"waypoints": [{"name": "DVR", "lowerlimit": 85, "upperlimit": 460}, {"name": "DET"}]},
+ ...
+ }
+ """
+ url = self.rootUrl + "EG-ENR-3.2-en-GB.html"
+ text = requests.get(url).text
+ soup = BeautifulSoup(text, "html.parser")
+
+ enr32 = soup.find("div", attrs={"id": "ENR-3.2"})
+ airways = list(enr32.children)[1:]
+
+ outputs = {}
+
+ for airway in airways:
+ if "AmdtInsertedAIRAC" in str(airway): # skip amendments
+ continue
+ tbody = list(airway.children)[2]
+ wpts = list(tbody.children)
+ routeTitleHTML = wpts[0]
+ airwayName = list(list(list(routeTitleHTML.children)[0].children)[0].children)[1].string
+
+ if airwayName == "N84": # badly formatted airway
+ continue
+
+ outputs[airwayName] = {"waypoints": []}
+
+ wpts.pop(0) # remove name
+
+ # deal with first wpt
+ wptName = list(list(wpts[0].children)[1].children)
+ if len(wptName) > 4: # VOR/DME
+ wptName = wptName[5].string
+ else: # FIX
+ wptName = wptName[1].string
+
+ wpts.pop(0)
+
+ outputs[airwayName]["waypoints"].append({"name": wptName})
+
+ # deal with rest of wpts
+
+ for i in range(0, len(wpts), 2): # pair waypoint names with their data
+ wptHTML = wpts[i + 1]
+ wptName = list(list(wptHTML.children)[1].children)
+ if len(wptName) > 4: # VOR/DME
+ wptName = wptName[5].string
+ else: # FIX
+ wptName = wptName[1].string
+ # print(wptName)
+
+ try:
+ wptDataHTML = wpts[i]
+
+ upperLowerBox = list(list(wptDataHTML.children)[3].children)[0]
+
+ if airwayName == "N22" and wptName == "BHD": # for some reason only this waypoint is in a different format :facepalm:
+ upperLimit = 245
+ lowerLimit = 85
+ else:
+ upperLimit = list(list(list(list(list(list(upperLowerBox.children)[0].children)[0].children)[0].children)[0].children)[0].children)[4].string
+ lowerLimit = list(list(list(list(list(list(upperLowerBox.children)[0].children)[0].children)[1].children)[0].children)[0].children)
+ if "FT" in lowerLimit[4].string:
+ lowerLimit = lowerLimit[1].string[:2]
+ else:
+ lowerLimit = lowerLimit[4].string
+
+ outputs[airwayName]["waypoints"].append({"name": wptName, "lowerlimit": int(lowerLimit), "upperlimit": int(upperLimit)})
+ except IndexError:
+ outputs[airwayName]["waypoints"].append({"name": wptName})
+ except AttributeError:
+ print(airwayName, wptName)
+ raise ValueError # NATS broke something if this gets run :(
+
+ # wptLowerLimit = list(list(wpt.children)[1].children)[0].string
+ # wptUpperLimit = list(list(wpt.children)[1].children)[2].string
+ # print(f"{wptName} {wptLowerLimit} {wptUpperLimit}")
+
+ return outputs
+
+
+ def parseENR4_1(self) -> dict[str,dict]:
"""Parse the AIP ENR4.1 page
Returns:
- dict[str,list]: A dictionary containing the VOR identifier and some information about it (see example below)
+ dict[str,dict]: A dictionary containing the VOR identifier and some information about it (see example below)
{
- "ADN": ["Aberdeen", "114.300", ("N057.18.37.620", "W002.16.01.950")], # name, frequency, coords
+ "ADN": {"name": "Aberdeen", "frequency": "114.300", "coordinates": ("N057.18.37.620", "W002.16.01.950")}, # name, frequency, coordinates
...
}
"""
@@ -60,6 +146,37 @@ def parseENR4_1(self) -> dict[str,list]:
# logger.debug(f"{identifier} {freq} {' '.join(coords)} ; {name}")
- outputs[identifier] = [name, freq, coords]
+ outputs[identifier] = {"name": name, "frequency": freq, "coordinates": coords}
return outputs
+
+ def parseENR4_4(self) -> dict[str,dict]:
+ """Parse the AIP ENR4.4 page
+
+ Returns:
+ dict[str,dict]: A dictionary containing the FIX identifier and some information about it (see example below)
+ {
+ "ABBEW": {"coordinates": ("N050.30.11.880", "W003.28.33.640")}, # coordinates
+ ...
+ }
+ """
+ url = self.rootUrl + "EG-ENR-4.4-en-GB.html"
+ text = requests.get(url).text
+ soup = BeautifulSoup(text, "html.parser")
+
+ # get table rows from heading
+
+ table = soup.find("table", attrs={"class": "ENR-table"})
+ rows = list(list(table.children)[1].children)
+
+ outputs = {}
+
+ for row in rows:
+ fixName = list(list(row.children)[0].children)[1].string
+ coordA = list(list(list(row.children)[1].children)[0].children)[1].string
+ coordB = list(list(list(row.children)[1].children)[1].children)[1].string
+ coords = util.ukCoordsToSectorFile(coordA, coordB)
+
+ outputs[fixName] = {"coordinates": coords}
+
+ return outputs
diff --git a/_data/Tools/src/runner.py b/_data/Tools/src/runner.py
--- a/_data/Tools/src/runner.py
+++ b/_data/Tools/src/runner.py
@@ -1,7 +1,8 @@
-import api
-
+import os
import argparse
+import api
+
class Runner:
def __init__(self, args):
self.args = args
@@ -29,13 +30,122 @@ def run(self):
if vorID in newData.keys(): # only rewrite if the VOR/DME is in both the old data and the new data, otherwise existing data is kept
# if the VOR/DME is in both the old data and the new data, write the new data onto the old data (if the data is the same we still write, just no change will be visible because the written data is the same as the stored data)
dataAboutVORDME = newData[vorID]
- currentData[i] = f"{vorID} {dataAboutVORDME[1]} {' '.join(dataAboutVORDME[2])} ; {dataAboutVORDME[0]}"
+ currentData[i] = f"{vorID} {dataAboutVORDME['frequency']} {' '.join(dataAboutVORDME['coordinates'])} ; {dataAboutVORDME['name']}"
self.writeLines("Navaids/VOR_UK.txt", currentData)
+
+ elif self.args["page"] == "ENR4.4" or self.args["page"] == "all":
+ currentData = self.readCurrentData("Navaids/FIXES_UK.txt")
+
+ newData = self.aipApi.parseENR4_4()
+
+ for i, line in enumerate(currentData):
+ fixID = line.split(" ")[0]
+ if fixID in newData.keys():
+ dataAboutFix = newData[fixID]
+ currentData[i] = f"{fixID} {' '.join(dataAboutFix['coordinates'])}"
+
+ self.writeLines("Navaids/FIXES_UK.txt", currentData)
+
+ elif self.args["page"] == "ENR3.2" or self.args["page"] == "all":
+ newData = self.aipApi.parseENR3_2()
+
+ lowerAirways = os.listdir("../../ATS Routes/RNAV/Lower")
+ upperAirways = os.listdir("../../ATS Routes/RNAV/Upper")
+
+ for airway in newData.keys():
+ prevLowerIndex = None
+ prevUpperIndex = None
+ firstLower = True
+ firstUpper = True
+ lowerLines = []
+ upperLines = []
+ for i, waypoint in enumerate(newData[airway]["waypoints"]):
+ try:
+ lowerLimit = waypoint["lowerlimit"]
+ except KeyError:
+ lowerLimit = 0
+
+ try:
+ upperLimit = waypoint["upperlimit"]
+ except KeyError:
+ upperLimit = 0
+
+ lb = False
+
+ if i == 0: # special logic for first wpt: only include in lower if next wpt is also in lower
+ if newData[airway]["waypoints"][1]["lowerlimit"] < 245:
+ lowerLines.append(waypoint["name"])
+ lb = True
+
+ if i == 0:
+ if newData[airway]["waypoints"][1]["upperlimit"] > 245:
+ upperLines.append(waypoint["name"])
+ continue
+
+ if lb:
+ continue
+
+ # two VERY annoying exceptions
+ if airway == "M40" and waypoint["name"] == "IDESI":
+ lowerLines.append("XXXXX")
+ lowerLines.append(waypoint["name"])
+ prevLowerIndex = i
+ elif airway == "L620" and waypoint["name"] == "CLN":
+ lowerLines.append("XXXXX")
+ lowerLines.append(waypoint["name"])
+ prevLowerIndex = i
+
+ elif lowerLimit < 245:
+ if firstLower:
+ lowerLines.append(waypoint["name"])
+ prevLowerIndex = i
+ firstLower = False
+ elif prevLowerIndex == i - 1:
+ lowerLines.append(waypoint["name"])
+ prevLowerIndex = i
+ else: # add in spacing line with a filler wpt of `XXXXX`
+ lowerLines.append("XXXXX")
+ lowerLines.append(waypoint["name"])
+ prevLowerIndex = i
+ if upperLimit > 245: # same logic as above
+ if firstUpper:
+ upperLines.append(waypoint["name"])
+ prevUpperIndex = i
+ firstUpper = False
+ elif prevUpperIndex == i - 1:
+ upperLines.append(waypoint["name"])
+ prevUpperIndex = i
+ else:
+ upperLines.append("XXXXX")
+ upperLines.append(waypoint["name"])
+ prevUpperIndex = i
+
+ # put into file format
+ lowerOutput = []
+ for i in range(len(lowerLines) - 1):
+ if lowerLines[i] == "XXXXX":
+ lowerOutput.append(";Non-contiguous")
+ elif lowerLines[i + 1] == "XXXXX":
+ pass
+ else:
+ lowerOutput.append(lowerLines[i].ljust(5, " ") + " " + lowerLines[i + 1].ljust(5, " "))
+
+ upperOutput = []
+ for i in range(len(upperLines) - 1):
+ if upperLines[i] == "XXXXX":
+ upperOutput.append(";Non-contiguous")
+ elif upperLines[i + 1] == "XXXXX":
+ pass
+ else:
+ upperOutput.append(upperLines[i].ljust(5, " ") + " " + upperLines[i + 1].ljust(5, " "))
+
+ print(airway, lowerOutput, upperOutput)
+
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Parse parts of the UK eAIP, using our AIP API")
- parser.add_argument("page", help="The part of the AIP to parse", choices=["all", "ENR4.1"])
+ parser.add_argument("page", help="The part of the AIP to parse", choices=["all", "ENR3.2", "ENR4.1", "ENR4.4"])
args = vars(parser.parse_args())
| diff --git a/_data/Tools/tests/test_api.py b/_data/Tools/tests/test_api.py
--- a/_data/Tools/tests/test_api.py
+++ b/_data/Tools/tests/test_api.py
@@ -12,8 +12,14 @@ def test_parseENR4_1(self):
self.aip.airac.initialise("2020-01-02") # test on AIRAC 2001
self.aip.cycle = self.aip.airac.cycle()
self.aip.rootUrl = self.aip.airac.url()
- self.assertEqual(self.aip.parseENR4_1(), {'ADN': ['Aberdeen', '114.300', ('N057.18.38.000', 'W002.16.02.000')], 'BKY': ['Barkway', '116.250', ('N051.59.23.000', 'E000.03.43.000')], 'BEL': ['Belfast', '117.200', ('N054.39.40.000', 'W006.13.48.000')], 'BEN': ['Benbecula (DME)', '113.950', ('N057.28.41.000', 'W007.21.55.000')], 'BHD': ['Berry Head', '112.050', ('N050.23.55.000', 'W003.29.37.000')], 'BIG': ['Biggin', '115.100', ('N051.19.51.000', 'E000.02.05.000')], 'BNN': ['Bovingdon', '113.750', ('N051.43.34.000', 'W000.32.59.000')], 'BCN': ['Brecon', '117.450', ('N051.43.32.000', 'W003.15.47.000')], 'BPK': ['Brookmans Park', '117.500', ('N051.44.59.000', 'W000.06.24.000')], 'CLN': ['Clacton', '114.550', ('N051.50.55.000', 'E001.08.51.000')], 'CPT': ['Compton', '114.350', ('N051.29.30.000', 'W001.13.11.000')], 'DTY': ['Daventry', '116.400', ('N052.10.49.000', 'W001.06.50.000')], 'DCS': ['Dean Cross (DME)', '115.200', ('N054.43.19.000', 'W003.20.26.000')], 'DET': ['Detling', '117.300', ('N051.18.14.000', 'E000.35.50.000')], 'DVR': ['Dover (DME)', '114.950', ('N051.09.45.000', 'E001.21.33.000')], 'DUD': ['Dundonald (DME)', '115.450', ('N055.33.32.000', 'W004.36.06.000')], 'GAM': ['Gamston (DME)', '112.800', ('N053.16.53.000', 'W000.56.50.000')], 'GOW': ['Glasgow (DME)', '115.400', ('N055.52.14.000', 'W004.26.45.000')], 'GWC': ['Goodwood (DME)', '114.750', ('N050.51.19.000', 'W000.45.24.000')], 'GLO': ['Green Lowther (DME)', '109.650', ('N055.23.24.000', 'W003.44.12.000')], 'HON': ['Honiley', '113.650', ('N052.21.24.000', 'W001.39.49.000')], 'IOM': ['Isle Of Man', '112.200', ('N054.04.01.000', 'W004.45.49.000')], 'JSY': ['Jersey', '112.200', ('N049.13.16.000', 'W002.02.46.000')], 'LAM': ['Lambourne', '115.600', ('N051.38.46.000', 'E000.09.06.000')], 'LND': ['Lands End', '114.200', ('N050.08.11.000', 'W005.38.13.000')], 'LON': ['London', '113.600', ('N051.29.14.000', 'W000.28.00.000')], 'LYD': ['Lydd (DME)', '114.050', ('N050.59.59.000', 'E000.52.43.000')], 'MAC': ['Machrihanish (DME)', '116.000', ('N055.25.48.000', 'W005.39.01.000')], 'MCT': ['Manchester', '113.550', ('N053.21.25.000', 'W002.15.44.000')], 'MAY': ['Mayfield', '117.900', ('N051.01.02.000', 'E000.06.58.000')], 'MID': ['Midhurst', '114.000', ('N051.03.14.000', 'W000.37.30.000')], 'NEW': ['Newcastle (DME)', '114.250', ('N055.02.18.000', 'W001.41.54.000')], 'OCK': ['Ockham (DME)', '115.300', ('N051.18.18.000', 'W000.26.50.000')], 'OTR': ['Ottringham', '113.900', ('N053.41.54.000', 'W000.06.13.000')], 'PTH': ['Perth', '110.400', ('N056.26.33.000', 'W003.22.07.000')], 'POL': ['Pole Hill', '112.100', ('N053.44.38.000', 'W002.06.12.000')], 'SAB': ['Saint Abbs', '112.500', ('N055.54.27.000', 'W002.12.23.000')], 'SFD': ['Seaford', '117.000', ('N050.45.38.000', 'E000.07.19.000')], 'SAM': ['Southampton (DME)', '113.350', ('N050.57.19.000', 'W001.20.42.000')], 'STN': ['Stornoway', '115.100', ('N058.12.25.000', 'W006.10.59.000')], 'STU': ['Strumble', '113.100', ('N051.59.41.000', 'W005.02.25.000')], 'SUM': ['Sumburgh', '117.350', ('N059.52.44.000', 'W001.17.12.000')], 'TLA': ['Talla', '113.800', ('N055.29.57.000', 'W003.21.10.000')], 'TIR': ['Tiree', '117.700', ('N056.29.36.000', 'W006.52.32.000')], 'TNT': ['Trent', '115.700', ('N053.03.14.000', 'W001.40.12.000')], 'TRN': ['Turnberry', '117.500', ('N055.18.48.000', 'W004.47.02.000')], 'WAL': ['Wallasey', '114.100', ('N053.23.31.000', 'W003.08.04.000')], 'WIK': ['Wick', '113.600', ('N058.27.32.000', 'W003.06.01.000')]} )
-
+ self.assertEqual(self.aip.parseENR4_1(), {'ADN': {'name': 'Aberdeen', 'frequency': '114.300', 'coordinates': ('N057.18.38.000', 'W002.16.02.000')}, 'BKY': {'name': 'Barkway', 'frequency': '116.250', 'coordinates': ('N051.59.23.000', 'E000.03.43.000')}, 'BEL': {'name': 'Belfast', 'frequency': '117.200', 'coordinates': ('N054.39.40.000', 'W006.13.48.000')}, 'BEN': {'name': 'Benbecula (DME)', 'frequency': '113.950', 'coordinates': ('N057.28.41.000', 'W007.21.55.000')}, 'BHD': {'name': 'Berry Head', 'frequency': '112.050', 'coordinates': ('N050.23.55.000', 'W003.29.37.000')}, 'BIG': {'name': 'Biggin', 'frequency': '115.100', 'coordinates': ('N051.19.51.000', 'E000.02.05.000')}, 'BNN': {'name': 'Bovingdon', 'frequency': '113.750', 'coordinates': ('N051.43.34.000', 'W000.32.59.000')}, 'BCN': {'name': 'Brecon', 'frequency': '117.450', 'coordinates': ('N051.43.32.000', 'W003.15.47.000')}, 'BPK': {'name': 'Brookmans Park', 'frequency': '117.500', 'coordinates': ('N051.44.59.000', 'W000.06.24.000')}, 'CLN': {'name': 'Clacton', 'frequency': '114.550', 'coordinates': ('N051.50.55.000', 'E001.08.51.000')}, 'CPT': {'name': 'Compton', 'frequency': '114.350', 'coordinates': ('N051.29.30.000', 'W001.13.11.000')}, 'DTY': {'name': 'Daventry', 'frequency': '116.400', 'coordinates': ('N052.10.49.000', 'W001.06.50.000')}, 'DCS': {'name': 'Dean Cross (DME)', 'frequency': '115.200', 'coordinates': ('N054.43.19.000', 'W003.20.26.000')}, 'DET': {'name': 'Detling', 'frequency': '117.300', 'coordinates': ('N051.18.14.000', 'E000.35.50.000')}, 'DVR': {'name': 'Dover (DME)', 'frequency': '114.950', 'coordinates': ('N051.09.45.000', 'E001.21.33.000')}, 'DUD': {'name': 'Dundonald (DME)', 'frequency': '115.450', 'coordinates': ('N055.33.32.000', 'W004.36.06.000')}, 'GAM': {'name': 'Gamston (DME)', 'frequency': '112.800', 'coordinates': ('N053.16.53.000', 'W000.56.50.000')}, 'GOW': {'name': 'Glasgow (DME)', 'frequency': '115.400', 'coordinates': ('N055.52.14.000', 'W004.26.45.000')}, 'GWC': {'name': 'Goodwood (DME)', 'frequency': '114.750', 'coordinates': ('N050.51.19.000', 'W000.45.24.000')}, 'GLO': {'name': 'Green Lowther (DME)', 'frequency': '109.650', 'coordinates': ('N055.23.24.000', 'W003.44.12.000')}, 'HON': {'name': 'Honiley', 'frequency': '113.650', 'coordinates': ('N052.21.24.000', 'W001.39.49.000')}, 'IOM': {'name': 'Isle Of Man', 'frequency': '112.200', 'coordinates': ('N054.04.01.000', 'W004.45.49.000')}, 'JSY': {'name': 'Jersey', 'frequency': '112.200', 'coordinates': ('N049.13.16.000', 'W002.02.46.000')}, 'LAM': {'name': 'Lambourne', 'frequency': '115.600', 'coordinates': ('N051.38.46.000', 'E000.09.06.000')}, 'LND': {'name': 'Lands End', 'frequency': '114.200', 'coordinates': ('N050.08.11.000', 'W005.38.13.000')}, 'LON': {'name': 'London', 'frequency': '113.600', 'coordinates': ('N051.29.14.000', 'W000.28.00.000')}, 'LYD': {'name': 'Lydd (DME)', 'frequency': '114.050', 'coordinates': ('N050.59.59.000', 'E000.52.43.000')}, 'MAC': {'name': 'Machrihanish (DME)', 'frequency': '116.000', 'coordinates': ('N055.25.48.000', 'W005.39.01.000')}, 'MCT': {'name': 'Manchester', 'frequency': '113.550', 'coordinates': ('N053.21.25.000', 'W002.15.44.000')}, 'MAY': {'name': 'Mayfield', 'frequency': '117.900', 'coordinates': ('N051.01.02.000', 'E000.06.58.000')}, 'MID': {'name': 'Midhurst', 'frequency': '114.000', 'coordinates': ('N051.03.14.000', 'W000.37.30.000')}, 'NEW': {'name': 'Newcastle (DME)', 'frequency': '114.250', 'coordinates': ('N055.02.18.000', 'W001.41.54.000')}, 'OCK': {'name': 'Ockham (DME)', 'frequency': '115.300', 'coordinates': ('N051.18.18.000', 'W000.26.50.000')}, 'OTR': {'name': 'Ottringham', 'frequency': '113.900', 'coordinates': ('N053.41.54.000', 'W000.06.13.000')}, 'PTH': {'name': 'Perth', 'frequency': '110.400', 'coordinates': ('N056.26.33.000', 'W003.22.07.000')}, 'POL': {'name': 'Pole Hill', 'frequency': '112.100', 'coordinates': ('N053.44.38.000', 'W002.06.12.000')}, 'SAB': {'name': 'Saint Abbs', 'frequency': '112.500', 'coordinates': ('N055.54.27.000', 'W002.12.23.000')}, 'SFD': {'name': 'Seaford', 'frequency': '117.000', 'coordinates': ('N050.45.38.000', 'E000.07.19.000')}, 'SAM': {'name': 'Southampton (DME)', 'frequency': '113.350', 'coordinates': ('N050.57.19.000', 'W001.20.42.000')}, 'STN': {'name': 'Stornoway', 'frequency': '115.100', 'coordinates': ('N058.12.25.000', 'W006.10.59.000')}, 'STU': {'name': 'Strumble', 'frequency': '113.100', 'coordinates': ('N051.59.41.000', 'W005.02.25.000')}, 'SUM': {'name': 'Sumburgh', 'frequency': '117.350', 'coordinates': ('N059.52.44.000', 'W001.17.12.000')}, 'TLA': {'name': 'Talla', 'frequency': '113.800', 'coordinates': ('N055.29.57.000', 'W003.21.10.000')}, 'TIR': {'name': 'Tiree', 'frequency': '117.700', 'coordinates': ('N056.29.36.000', 'W006.52.32.000')}, 'TNT': {'name': 'Trent', 'frequency': '115.700', 'coordinates': ('N053.03.14.000', 'W001.40.12.000')}, 'TRN': {'name': 'Turnberry', 'frequency': '117.500', 'coordinates': ('N055.18.48.000', 'W004.47.02.000')}, 'WAL': {'name': 'Wallasey', 'frequency': '114.100', 'coordinates': ('N053.23.31.000', 'W003.08.04.000')}, 'WIK': {'name': 'Wick', 'frequency': '113.600', 'coordinates': ('N058.27.32.000', 'W003.06.01.000')}})
+ def test_parseENR4_4(self):
+ self.aip.airac = airac.Airac()
+ self.aip.airac.initialise("2020-01-02") # test on AIRAC 2001
+ self.aip.cycle = self.aip.airac.cycle()
+ self.aip.rootUrl = self.aip.airac.url()
+ self.assertEqual(self.aip.parseENR4_4(), {'ABAPO': {'coordinates': ('N052.06.10.000', 'W004.48.12.000')}, 'ABBOT': {'coordinates': ('N052.00.58.000', 'E000.35.58.000')}, 'ABDAL': {'coordinates': ('N051.26.46.000', 'W001.51.48.000')}, 'ABDUK': {'coordinates': ('N051.38.34.000', 'W003.48.01.000')}, 'ABEDA': {'coordinates': ('N052.19.40.000', 'E001.52.16.000')}, 'ABEVI': {'coordinates': ('N054.42.27.000', 'W002.46.29.000')}, 'ABGEP': {'coordinates': ('N053.25.39.000', 'W004.51.40.000')}, 'ABKAT': {'coordinates': ('N054.08.53.000', 'W001.58.47.000')}, 'ABKIM': {'coordinates': ('N050.50.43.000', 'W001.58.58.000')}, 'ABLIN': {'coordinates': ('N052.46.58.000', 'W004.59.33.000')}, 'ABSAV': {'coordinates': ('N050.38.29.000', 'W001.10.29.000')}, 'ABSUN': {'coordinates': ('N054.17.32.000', 'W006.15.18.000')}, 'ABTOS': {'coordinates': ('N053.14.45.000', 'E000.25.36.000')}, 'ABTUM': {'coordinates': ('N051.26.04.000', 'E001.22.29.000')}, 'ADASI': {'coordinates': ('N059.13.15.000', 'W006.17.31.000')}, 'ADCUB': {'coordinates': ('N050.54.36.000', 'W002.00.00.000')}, 'ADELU': {'coordinates': ('N053.26.30.000', 'W001.32.13.000')}, 'ADGEG': {'coordinates': ('N053.13.43.000', 'E002.19.06.000')}, 'ADKIK': {'coordinates': ('N050.47.49.000', 'W002.32.45.000')}, 'ADLOG': {'coordinates': ('N050.19.25.000', 'W000.49.33.000')}, 'ADMAG': {'coordinates': ('N051.14.02.000', 'E000.29.34.000')}, 'ADMEX': {'coordinates': ('N052.21.39.000', 'W001.27.45.000')}, 'ADMIS': {'coordinates': ('N051.59.49.000', 'E000.10.36.000')}, 'ADNAM': {'coordinates': ('N052.06.17.000', 'E000.44.19.000')}, 'ADODO': {'coordinates': ('N058.30.00.000', 'W010.00.00.000')}, 'ADRUD': {'coordinates': ('N049.10.14.000', 'W006.26.55.000')}, 'ADVAS': {'coordinates': ('N051.40.53.000', 'E001.26.33.000')}, 'AGLIL': {'coordinates': ('N053.34.42.000', 'W003.27.11.000')}, 'AGORI': {'coordinates': ('N057.00.00.000', 'W013.00.00.000')}, 'AGPED': {'coordinates': ('N055.07.19.000', 'W001.56.57.000')}, 'AKIVO': {'coordinates': ('N059.57.52.000', 'W009.30.00.000')}, 'AKOKO': {'coordinates': ('N053.14.16.000', 'E003.12.07.000')}, 'AKUPA': {'coordinates': ('N052.29.12.000', 'W001.05.28.000')}, 'ALASO': {'coordinates': ('N055.26.50.000', 'W001.33.54.000')}, 'ALAVA': {'coordinates': ('N053.23.43.000', 'W003.58.17.000')}, 'ALESO': {'coordinates': ('N050.34.32.000', 'E001.13.32.000')}, 'ALOTI': {'coordinates': ('N057.54.27.000', 'E003.36.30.000')}, 'ALUTA': {'coordinates': ('N049.00.19.000', 'W007.29.53.000')}, 'ALVIN': {'coordinates': ('N051.39.45.000', 'W002.40.42.000')}, 'AMDUT': {'coordinates': ('N050.40.28.000', 'E000.47.46.000')}, 'AMHAG': {'coordinates': ('N051.30.00.000', 'W004.18.01.000')}, 'AMLAD': {'coordinates': ('N056.15.52.000', 'W010.00.00.000')}, 'AMLET': {'coordinates': ('N053.16.07.000', 'W001.50.18.000')}, 'AMMAN': {'coordinates': ('N051.50.26.000', 'W003.59.54.000')}, 'AMPIT': {'coordinates': ('N053.11.39.000', 'W003.11.33.000')}, 'AMPOP': {'coordinates': ('N049.31.48.000', 'W006.09.03.000')}, 'AMRAL': {'coordinates': ('N052.06.53.000', 'W002.54.33.000')}, 'AMTAP': {'coordinates': ('N058.22.25.000', 'W009.30.00.000')}, 'AMTOD': {'coordinates': ('N050.32.16.000', 'W001.25.44.000')}, 'AMVEL': {'coordinates': ('N053.11.03.000', 'W000.01.55.000')}, 'ANJAK': {'coordinates': ('N052.53.50.000', 'W002.59.33.000')}, 'ANNET': {'coordinates': ('N049.39.05.000', 'W004.00.05.000')}, 'APPLE': {'coordinates': ('N054.30.00.000', 'W002.30.50.000')}, 'APSOV': {'coordinates': ('N055.49.23.000', 'W010.00.00.000')}, 'ARKIL': {'coordinates': ('N050.39.28.000', 'W008.00.00.000')}, 'ARNUN': {'coordinates': ('N051.03.26.000', 'E000.55.53.000')}, 'ARPUB': {'coordinates': ('N053.35.34.000', 'W001.14.28.000')}, 'ARREK': {'coordinates': ('N051.36.43.000', 'E001.50.07.000')}, 'ARSAT': {'coordinates': ('N054.32.06.000', 'W001.44.19.000')}, 'ARTEX': {'coordinates': ('N056.53.41.000', 'E003.57.38.000')}, 'ARTHA': {'coordinates': ('N053.46.59.000', 'W002.17.07.000')}, 'ARTIX': {'coordinates': ('N053.23.42.000', 'W001.02.09.000')}, 'ASKAM': {'coordinates': ('N054.57.47.000', 'E003.13.50.000')}, 'ASKEY': {'coordinates': ('N052.00.50.000', 'W000.03.10.000')}, 'ASLIB': {'coordinates': ('N054.49.28.000', 'W003.03.39.000')}, 'ASMIM': {'coordinates': ('N053.26.46.000', 'W002.39.11.000')}, 'ASNIP': {'coordinates': ('N053.28.43.000', 'W002.54.26.000')}, 'ASNUD': {'coordinates': ('N056.41.39.000', 'W003.03.20.000')}, 'ASRAX': {'coordinates': ('N051.31.17.000', 'W004.16.43.000')}, 'ASRUS': {'coordinates': ('N055.03.21.000', 'W003.20.45.000')}, 'ASTRA': {'coordinates': ('N050.51.56.000', 'W000.08.47.000')}, 'ATNAK': {'coordinates': ('N057.00.00.000', 'E005.00.00.000')}, 'ATSAP': {'coordinates': ('N051.27.16.000', 'E001.30.17.000')}, 'ATSIX': {'coordinates': ('N060.00.00.000', 'W010.00.00.000')}, 'ATSUR': {'coordinates': ('N050.00.00.000', 'W014.00.00.000')}, 'ATWEL': {'coordinates': ('N050.50.51.000', 'W003.25.02.000')}, 'AVANT': {'coordinates': ('N050.49.12.000', 'W000.56.18.000')}, 'AVRAL': {'coordinates': ('N055.30.34.000', 'E003.42.59.000')}, 'BABAX': {'coordinates': ('N050.25.11.000', 'W003.48.17.000')}, 'BABGU': {'coordinates': ('N054.16.39.000', 'W005.16.25.000')}, 'BABRA': {'coordinates': ('N053.37.39.000', 'W003.56.43.000')}, 'BADGA': {'coordinates': ('N055.37.49.000', 'E002.36.58.000')}, 'BADIM': {'coordinates': ('N051.38.38.000', 'W002.30.40.000')}, 'BADSI': {'coordinates': ('N052.39.30.000', 'W004.46.17.000')}, 'BAGIT': {'coordinates': ('N053.13.45.000', 'W003.59.18.000')}, 'BAGSO': {'coordinates': ('N053.40.48.000', 'W005.30.00.000')}, 'BAKER': {'coordinates': ('N051.29.42.000', 'E000.17.53.000')}, 'BAKOX': {'coordinates': ('N053.31.43.000', 'W005.09.33.000')}, 'BAKUR': {'coordinates': ('N052.14.30.000', 'W005.40.49.000')}, 'BALID': {'coordinates': ('N056.29.49.000', 'W001.57.07.000')}, 'BALIX': {'coordinates': ('N059.00.00.000', 'W010.00.00.000')}, 'BAMEP': {'coordinates': ('N053.52.42.000', 'W002.16.09.000')}, 'BAMLI': {'coordinates': ('N054.08.28.000', 'W006.39.04.000')}, 'BAMRA': {'coordinates': ('N059.26.00.000', 'W003.57.10.000')}, 'BANBA': {'coordinates': ('N051.57.10.000', 'W006.14.21.000')}, 'BANEM': {'coordinates': ('N052.20.08.000', 'E001.30.19.000')}, 'BANTO': {'coordinates': ('N052.21.28.000', 'W000.13.57.000')}, 'BANVA': {'coordinates': ('N050.32.42.000', 'E000.45.12.000')}, 'BARKU': {'coordinates': ('N060.36.18.000', 'W009.24.36.000')}, 'BARMI': {'coordinates': ('N052.28.42.000', 'E002.34.14.000')}, 'BAROS': {'coordinates': ('N053.24.22.000', 'W003.26.43.000')}, 'BARTN': {'coordinates': ('N053.28.14.000', 'W002.25.41.000')}, 'BATLI': {'coordinates': ('N053.38.45.000', 'W001.21.37.000')}, 'BAVDO': {'coordinates': ('N054.18.20.000', 'W001.09.25.000')}, 'BAVUD': {'coordinates': ('N053.31.08.000', 'W004.51.09.000')}, 'BAZJO': {'coordinates': ('N050.50.25.000', 'W003.53.45.000')}, 'BEBNI': {'coordinates': ('N054.59.06.000', 'W003.35.14.000')}, 'BEDEK': {'coordinates': ('N051.22.15.000', 'W001.33.31.000')}, 'BEDFO': {'coordinates': ('N052.13.30.000', 'W000.33.48.000')}, 'BEDRA': {'coordinates': ('N049.00.00.000', 'W015.00.00.000')}, 'BEGAM': {'coordinates': ('N054.09.25.000', 'W002.07.15.000')}, 'BEGAS': {'coordinates': ('N045.00.00.000', 'W009.00.00.000')}, 'BEGID': {'coordinates': ('N056.30.00.000', 'W014.00.00.000')}, 'BEGTO': {'coordinates': ('N050.45.45.000', 'W001.14.08.000')}, 'BEKET': {'coordinates': ('N058.53.13.000', 'W001.31.20.000')}, 'BEKMO': {'coordinates': ('N052.59.10.000', 'E002.37.10.000')}, 'BELOX': {'coordinates': ('N053.53.16.000', 'W003.29.23.000')}, 'BENBO': {'coordinates': ('N050.27.05.000', 'E000.00.37.000')}, 'BENSU': {'coordinates': ('N051.34.44.000', 'W000.55.08.000')}, 'BEREP': {'coordinates': ('N059.02.09.000', 'E001.44.20.000')}, 'BERUL': {'coordinates': ('N052.40.16.000', 'W003.31.13.000')}, 'BERUX': {'coordinates': ('N045.00.00.000', 'W011.00.00.000')}, 'BESGA': {'coordinates': ('N060.47.23.000', 'W007.54.46.000')}, 'BETAX': {'coordinates': ('N053.00.33.000', 'W000.54.03.000')}, 'BETPO': {'coordinates': ('N051.47.54.000', 'W000.43.28.000')}, 'BEVAM': {'coordinates': ('N055.43.53.000', 'E000.15.03.000')}, 'BEVUP': {'coordinates': ('N050.48.00.000', 'W002.14.20.000')}, 'BIDVA': {'coordinates': ('N050.43.39.000', 'W000.58.39.000')}, 'BIFIN': {'coordinates': ('N052.03.23.000', 'W002.27.19.000')}, 'BIGNO': {'coordinates': ('N050.00.00.000', 'W002.49.30.000')}, 'BILLY': {'coordinates': ('N060.01.11.000', 'W008.34.08.000')}, 'BILNI': {'coordinates': ('N050.40.31.000', 'W002.07.33.000')}, 'BILTO': {'coordinates': ('N056.30.00.000', 'W015.00.00.000')}, 'BILVO': {'coordinates': ('N053.43.52.000', 'W003.11.09.000')}, 'BIMGO': {'coordinates': ('N049.30.00.000', 'W014.00.00.000')}, 'BINBO': {'coordinates': ('N053.24.06.000', 'E003.20.29.000')}, 'BINTI': {'coordinates': ('N054.33.45.000', 'W001.59.45.000')}, 'BISRU': {'coordinates': ('N051.04.18.000', 'E000.22.03.000')}, 'BLACA': {'coordinates': ('N054.53.00.000', 'W005.09.32.000')}, 'BLIXY': {'coordinates': ('N051.37.30.000', 'E001.58.32.000')}, 'BODSO': {'coordinates': ('N053.03.08.000', 'E002.05.41.000')}, 'BOGMI': {'coordinates': ('N050.59.53.000', 'W007.12.17.000')}, 'BOGNA': {'coordinates': ('N050.42.07.000', 'W000.15.06.000')}, 'BOLBI': {'coordinates': ('N053.28.00.000', 'W001.48.45.000')}, 'BOLRO': {'coordinates': ('N050.00.00.000', 'W001.41.29.000')}, 'BOMBO': {'coordinates': ('N051.59.44.000', 'W000.23.47.000')}, 'BONBY': {'coordinates': ('N057.53.30.000', 'W004.20.36.000')}, 'BOVVA': {'coordinates': ('N051.43.34.000', 'W000.32.59.000')}, 'BOYNE': {'coordinates': ('N053.46.02.000', 'W005.30.00.000')}, 'BRAIN': {'coordinates': ('N051.48.40.000', 'E000.39.06.000')}, 'BRASO': {'coordinates': ('N051.41.07.000', 'E000.41.00.000')}, 'BRIPO': {'coordinates': ('N050.42.27.000', 'W002.45.00.000')}, 'BRUCE': {'coordinates': ('N056.14.11.000', 'W005.50.28.000')}, 'BUDON': {'coordinates': ('N057.42.07.000', 'W000.33.43.000')}, 'BUGUP': {'coordinates': ('N051.52.37.000', 'W001.37.00.000')}, 'BUKUT': {'coordinates': ('N052.45.30.000', 'E002.48.02.000')}, 'BUMUX': {'coordinates': ('N050.48.16.000', 'W003.20.58.000')}, 'BURNI': {'coordinates': ('N053.43.38.000', 'W002.31.04.000')}, 'BUSTA': {'coordinates': ('N052.05.34.000', 'E000.04.04.000')}, 'BUZAD': {'coordinates': ('N051.56.32.000', 'W000.33.08.000')}, 'CALDA': {'coordinates': ('N053.46.27.000', 'W002.38.38.000')}, 'CAMBO': {'coordinates': ('N049.17.30.000', 'W005.51.35.000')}, 'CASEL': {'coordinates': ('N053.49.52.000', 'W004.10.57.000')}, 'CASEY': {'coordinates': ('N052.00.31.000', 'E000.34.59.000')}, 'CEDAR': {'coordinates': ('N052.31.07.000', 'W001.49.12.000')}, 'CHASE': {'coordinates': ('N052.36.48.000', 'W001.55.02.000')}, 'CHINN': {'coordinates': ('N057.57.51.000', 'W004.11.17.000')}, 'CLIPY': {'coordinates': ('N052.00.03.000', 'W001.05.09.000')}, 'CLYDE': {'coordinates': ('N055.57.48.000', 'W004.47.32.000')}, 'COWJO': {'coordinates': ('N051.13.24.000', 'W003.48.34.000')}, 'COWLY': {'coordinates': ('N051.36.58.000', 'W001.03.54.000')}, 'CREWE': {'coordinates': ('N052.49.07.000', 'W002.18.27.000')}, 'CROFT': {'coordinates': ('N053.37.37.000', 'W002.32.20.000')}, 'CUTEL': {'coordinates': ('N055.53.09.000', 'E002.22.28.000')}, 'DAGGA': {'coordinates': ('N051.49.19.000', 'E000.47.39.000')}, 'DAGKO': {'coordinates': ('N050.51.38.000', 'W003.53.28.000')}, 'DALEY': {'coordinates': ('N053.40.08.000', 'W002.20.57.000')}, 'DANDI': {'coordinates': ('N053.41.04.000', 'W003.56.58.000')}, 'DAUNT': {'coordinates': ('N055.25.08.000', 'W004.52.47.000')}, 'DAVOT': {'coordinates': ('N057.20.42.000', 'W004.05.29.000')}, 'DAWLY': {'coordinates': ('N050.34.27.000', 'W003.27.50.000')}, 'DAYNE': {'coordinates': ('N053.14.19.000', 'W002.01.45.000')}, 'DEGOS': {'coordinates': ('N054.11.21.000', 'W006.54.23.000')}, 'DELBO': {'coordinates': ('N051.52.37.000', 'W001.16.24.000')}, 'DENBY': {'coordinates': ('N053.31.13.000', 'W001.57.40.000')}, 'DENOG': {'coordinates': ('N057.25.53.000', 'W001.45.16.000')}, 'DESIG': {'coordinates': ('N053.31.38.000', 'W001.53.34.000')}, 'DESNA': {'coordinates': ('N050.37.00.000', 'W000.47.00.000')}, 'DEVAL': {'coordinates': ('N050.51.25.000', 'E001.28.00.000')}, 'DEVBI': {'coordinates': ('N060.42.54.000', 'W008.46.10.000')}, 'DEVOM': {'coordinates': ('N050.43.18.000', 'W003.45.19.000')}, 'DEXEN': {'coordinates': ('N053.16.49.000', 'W005.30.00.000')}, 'DEZZU': {'coordinates': ('N051.36.19.000', 'W002.00.00.000')}, 'DIBAL': {'coordinates': ('N052.47.50.000', 'E002.30.00.000')}, 'DIDEL': {'coordinates': ('N050.50.35.000', 'W003.59.50.000')}, 'DIGBI': {'coordinates': ('N055.11.06.000', 'W001.03.01.000')}, 'DIGMA': {'coordinates': ('N053.39.00.000', 'W003.37.24.000')}, 'DIGSU': {'coordinates': ('N052.01.26.000', 'E000.38.05.000')}, 'DIGUT': {'coordinates': ('N051.39.01.000', 'W001.17.59.000')}, 'DIKAS': {'coordinates': ('N051.46.37.000', 'W003.15.33.000')}, 'DIKVU': {'coordinates': ('N051.17.47.000', 'E000.19.02.000')}, 'DIMLI': {'coordinates': ('N054.56.47.000', 'W006.36.55.000')}, 'DINIM': {'coordinates': ('N051.00.00.000', 'W015.00.00.000')}, 'DIPSO': {'coordinates': ('N053.03.13.000', 'W001.20.39.000')}, 'DISIT': {'coordinates': ('N052.06.11.000', 'W001.42.41.000')}, 'DISVO': {'coordinates': ('N050.39.31.000', 'W001.02.57.000')}, 'DITOB': {'coordinates': ('N052.17.01.000', 'E001.49.27.000')}, 'DOBEM': {'coordinates': ('N051.52.11.000', 'W002.55.45.000')}, 'DOGAL': {'coordinates': ('N054.00.00.000', 'W015.00.00.000')}, 'DOKEK': {'coordinates': ('N051.25.18.000', 'W006.03.29.000')}, 'DOKEN': {'coordinates': ('N055.40.55.000', 'E002.06.56.000')}, 'DOLAS': {'coordinates': ('N052.58.43.000', 'E001.00.03.000')}, 'DOLOP': {'coordinates': ('N053.12.51.000', 'W003.37.51.000')}, 'DOLUR': {'coordinates': ('N049.29.04.000', 'W005.03.26.000')}, 'DOMUT': {'coordinates': ('N050.15.43.000', 'W001.40.09.000')}, 'DONAX': {'coordinates': ('N053.29.11.000', 'W003.57.40.000')}, 'DONIB': {'coordinates': ('N055.43.09.000', 'W007.41.44.000')}, 'DONNA': {'coordinates': ('N051.42.00.000', 'W000.44.37.000')}, 'DOPEK': {'coordinates': ('N053.23.04.000', 'W000.55.32.000')}, 'DORKI': {'coordinates': ('N051.16.33.000', 'W000.15.52.000')}, 'DOSUN': {'coordinates': ('N060.07.03.000', 'W006.23.39.000')}, 'DRAKE': {'coordinates': ('N050.12.34.000', 'W000.04.34.000')}, 'DUCNO': {'coordinates': ('N051.58.24.000', 'W002.18.52.000')}, 'DUFFY': {'coordinates': ('N054.30.53.000', 'W005.51.39.000')}, 'DUFZO': {'coordinates': ('N051.30.17.000', 'W004.20.36.000')}, 'EBOTO': {'coordinates': ('N052.21.33.000', 'W000.41.40.000')}, 'EDONU': {'coordinates': ('N056.17.36.000', 'W003.33.41.000')}, 'EDUXO': {'coordinates': ('N053.24.24.000', 'W001.13.01.000')}, 'EKLAD': {'coordinates': ('N053.15.14.000', 'W002.49.29.000')}, 'EKNIV': {'coordinates': ('N051.24.26.000', 'E000.37.31.000')}, 'ELBOB': {'coordinates': ('N054.43.58.000', 'W007.44.38.000')}, 'ELDAX': {'coordinates': ('N050.29.18.000', 'E000.39.30.000')}, 'ELDER': {'coordinates': ('N050.39.48.000', 'W001.20.14.000')}, 'ELKOG': {'coordinates': ('N060.26.48.000', 'W009.30.00.000')}, 'ELMUD': {'coordinates': ('N055.06.56.000', 'W001.40.24.000')}, 'ELNAB': {'coordinates': ('N053.22.11.000', 'E001.12.51.000')}, 'ELNOD': {'coordinates': ('N053.28.26.000', 'W001.35.24.000')}, 'ELRAG': {'coordinates': ('N051.10.28.000', 'W005.06.21.000')}, 'ELRIP': {'coordinates': ('N050.37.45.000', 'W002.59.24.000')}, 'ELSAN': {'coordinates': ('N056.42.19.000', 'E003.43.58.000')}, 'ELSOX': {'coordinates': ('N051.00.00.000', 'W014.00.00.000')}, 'ELVOS': {'coordinates': ('N052.42.01.000', 'W001.18.25.000')}, 'EMBOR': {'coordinates': ('N052.57.48.000', 'W001.17.45.000')}, 'EMKAD': {'coordinates': ('N051.14.50.000', 'E000.42.34.000')}, 'EMKUK': {'coordinates': ('N051.58.37.000', 'W001.21.15.000')}, 'EMLON': {'coordinates': ('N053.12.20.000', 'E002.30.00.000')}, 'EMPER': {'coordinates': ('N049.00.00.000', 'W009.00.00.000')}, 'ENIPI': {'coordinates': ('N055.01.11.000', 'W003.15.10.000')}, 'ENITO': {'coordinates': ('N052.48.05.000', 'E002.28.03.000')}, 'ENOKA': {'coordinates': ('N055.20.10.000', 'W003.39.46.000')}, 'ENUKU': {'coordinates': ('N056.14.00.000', 'W003.38.19.000')}, 'EPOXI': {'coordinates': ('N053.19.59.000', 'W003.27.18.000')}, 'EPUNA': {'coordinates': ('N050.30.00.000', 'W014.00.00.000')}, 'ERAKA': {'coordinates': ('N058.00.00.000', 'W010.00.00.000')}, 'ERDOG': {'coordinates': ('N059.20.03.000', 'W001.25.04.000')}, 'ERDUV': {'coordinates': ('N053.52.35.000', 'W004.10.08.000')}, 'ERFAX': {'coordinates': ('N050.26.30.000', 'W003.59.03.000')}, 'ERGAB': {'coordinates': ('N054.09.22.000', 'W002.23.12.000')}, 'ERGUM': {'coordinates': ('N050.07.25.000', 'W001.44.50.000')}, 'ERING': {'coordinates': ('N051.35.26.000', 'E001.36.43.000')}, 'ERKEX': {'coordinates': ('N050.52.41.000', 'E001.19.37.000')}, 'ERKIT': {'coordinates': ('N054.01.49.000', 'W000.49.49.000')}, 'ERLOT': {'coordinates': ('N055.17.14.000', 'W000.35.06.000')}, 'ERNAN': {'coordinates': ('N054.16.44.000', 'W007.23.34.000')}, 'ERNOK': {'coordinates': ('N051.41.30.000', 'W002.56.44.000')}, 'ERSON': {'coordinates': ('N056.27.48.000', 'W004.18.24.000')}, 'ESKDO': {'coordinates': ('N055.17.56.000', 'W003.12.15.000')}, 'ESTRY': {'coordinates': ('N053.39.52.000', 'W003.15.42.000')}, 'ETARI': {'coordinates': ('N055.30.00.000', 'W015.00.00.000')}, 'ETIGA': {'coordinates': ('N054.02.39.000', 'W004.47.27.000')}, 'ETILO': {'coordinates': ('N057.30.00.000', 'W010.00.00.000')}, 'ETSOM': {'coordinates': ('N057.53.31.000', 'W009.30.00.000')}, 'ETVAX': {'coordinates': ('N050.58.07.000', 'E000.35.56.000')}, 'EVBEV': {'coordinates': ('N057.47.10.000', 'W007.40.41.000')}, 'EVEXU': {'coordinates': ('N050.41.16.000', 'W000.34.41.000')}, 'EVMEK': {'coordinates': ('N051.47.41.000', 'E001.12.18.000')}, 'EVNAL': {'coordinates': ('N057.24.31.000', 'W009.30.00.000')}, 'EVNAS': {'coordinates': ('N051.40.04.000', 'E000.26.38.000')}, 'EVRIN': {'coordinates': ('N051.46.56.000', 'W006.33.48.000')}, 'EVSEM': {'coordinates': ('N051.43.50.000', 'W001.27.45.000')}, 'EVSON': {'coordinates': ('N053.21.27.000', 'W001.39.49.000')}, 'EVTES': {'coordinates': ('N051.04.55.000', 'W001.12.57.000')}, 'EXARO': {'coordinates': ('N051.00.03.000', 'W001.22.50.000')}, 'EXJET': {'coordinates': ('N050.53.19.000', 'W003.05.32.000')}, 'EXMOR': {'coordinates': ('N051.10.43.000', 'W003.21.35.000')}, 'FACTU': {'coordinates': ('N052.21.03.000', 'W002.17.13.000')}, 'FENIK': {'coordinates': ('N055.42.55.000', 'W004.17.31.000')}, 'FERIT': {'coordinates': ('N051.37.59.000', 'E000.32.21.000')}, 'FIGZI': {'coordinates': ('N051.57.35.000', 'W002.36.14.000')}, 'FINDO': {'coordinates': ('N056.22.09.000', 'W003.27.49.000')}, 'FINMA': {'coordinates': ('N051.59.47.000', 'W001.03.13.000')}, 'FITBO': {'coordinates': ('N052.13.09.000', 'W002.24.13.000')}, 'FONZU': {'coordinates': ('N050.49.22.000', 'W003.53.59.000')}, 'FORTY': {'coordinates': ('N057.57.00.000', 'E000.35.45.000')}, 'FOXLA': {'coordinates': ('N052.01.57.000', 'W004.12.37.000')}, 'FOYLE': {'coordinates': ('N056.08.34.000', 'W004.22.56.000')}, 'FRANE': {'coordinates': ('N051.23.06.000', 'E000.37.39.000')}, 'FYNER': {'coordinates': ('N056.02.56.000', 'W005.06.55.000')}, 'GAJIT': {'coordinates': ('N051.20.40.000', 'W002.00.00.000')}, 'GAKKA': {'coordinates': ('N050.41.37.000', 'W003.55.42.000')}, 'GANKI': {'coordinates': ('N056.45.48.000', 'W002.58.08.000')}, 'GAPLI': {'coordinates': ('N050.00.00.000', 'W008.00.00.000')}, 'GARMI': {'coordinates': ('N050.00.00.000', 'W001.22.50.000')}, 'GARVA': {'coordinates': ('N057.41.18.000', 'W004.29.41.000')}, 'GASBA': {'coordinates': ('N051.50.10.000', 'E000.48.53.000')}, 'GASGU': {'coordinates': ('N051.12.24.000', 'W000.57.36.000')}, 'GASKO': {'coordinates': ('N054.13.29.000', 'W001.57.21.000')}, 'GATRA': {'coordinates': ('N051.17.52.000', 'W006.37.55.000')}, 'GAVEL': {'coordinates': ('N059.23.00.000', 'W001.24.22.000')}, 'GAVGO': {'coordinates': ('N051.33.50.000', 'W001.42.36.000')}, 'GETNO': {'coordinates': ('N053.55.48.000', 'W003.29.47.000')}, 'GIBSO': {'coordinates': ('N050.44.59.000', 'W002.31.07.000')}, 'GIBSU': {'coordinates': ('N056.45.47.000', 'E003.16.10.000')}, 'GIGTO': {'coordinates': ('N053.38.01.000', 'W004.05.51.000')}, 'GIGUL': {'coordinates': ('N054.36.26.000', 'E003.10.10.000')}, 'GILDA': {'coordinates': ('N051.36.06.000', 'E000.35.01.000')}, 'GILTI': {'coordinates': ('N051.34.13.000', 'E002.03.18.000')}, 'GINGA': {'coordinates': ('N056.55.30.000', 'W009.30.00.000')}, 'GINIS': {'coordinates': ('N053.27.39.000', 'W004.51.29.000')}, 'GIPER': {'coordinates': ('N051.00.00.000', 'W012.00.00.000')}, 'GIPLO': {'coordinates': ('N053.31.31.000', 'W000.47.23.000')}, 'GIRLI': {'coordinates': ('N054.42.41.000', 'W001.48.11.000')}, 'GIRVA': {'coordinates': ('N055.11.08.000', 'W004.53.47.000')}, 'GISOK': {'coordinates': ('N051.30.09.000', 'W005.07.28.000')}, 'GITUS': {'coordinates': ('N050.20.00.000', 'W005.42.00.000')}, 'GIVEM': {'coordinates': ('N055.27.54.000', 'E000.14.53.000')}, 'GIVPO': {'coordinates': ('N053.17.10.000', 'E002.52.09.000')}, 'GIVUN': {'coordinates': ('N050.43.58.000', 'W000.48.03.000')}, 'GLESK': {'coordinates': ('N056.54.00.000', 'W002.47.45.000')}, 'GODLU': {'coordinates': ('N051.09.58.000', 'E001.17.04.000')}, 'GODOS': {'coordinates': ('N053.14.57.000', 'E003.25.47.000')}, 'GODPA': {'coordinates': ('N052.59.00.000', 'W003.10.01.000')}, 'GOGIG': {'coordinates': ('N051.53.30.000', 'W003.15.02.000')}, 'GOGSI': {'coordinates': ('N051.17.27.000', 'W001.00.02.000')}, 'GOKTU': {'coordinates': ('N050.15.05.000', 'W001.22.45.000')}, 'GOLES': {'coordinates': ('N053.36.29.000', 'W001.05.00.000')}, 'GOLVO': {'coordinates': ('N054.24.41.000', 'E003.35.08.000')}, 'GOMOT': {'coordinates': ('N055.55.10.000', 'W000.32.39.000')}, 'GOMUP': {'coordinates': ('N057.00.00.000', 'W010.00.00.000')}, 'GONAN': {'coordinates': ('N045.00.00.000', 'W014.00.00.000')}, 'GOREV': {'coordinates': ('N056.03.12.000', 'E005.00.00.000')}, 'GOSAM': {'coordinates': ('N055.47.19.000', 'W004.12.02.000')}, 'GOTNA': {'coordinates': ('N054.35.41.000', 'W005.35.53.000')}, 'GOXUL': {'coordinates': ('N051.21.32.000', 'W000.10.33.000')}, 'GRICE': {'coordinates': ('N056.11.48.000', 'W003.41.08.000')}, 'GROVE': {'coordinates': ('N052.23.38.000', 'W001.55.44.000')}, 'GUNPA': {'coordinates': ('N061.00.00.000', 'E000.00.00.000')}, 'GUNSO': {'coordinates': ('N049.03.10.000', 'W011.46.06.000')}, 'GUNTU': {'coordinates': ('N053.35.37.000', 'W003.09.54.000')}, 'GUSSI': {'coordinates': ('N057.12.47.000', 'W004.07.27.000')}, 'HALIF': {'coordinates': ('N053.44.06.000', 'W001.34.50.000')}, 'HANKY': {'coordinates': ('N051.06.46.000', 'W001.07.51.000')}, 'HARDY': {'coordinates': ('N050.28.16.000', 'E000.29.28.000')}, 'HASTY': {'coordinates': ('N050.43.42.000', 'E000.32.00.000')}, 'HAVEN': {'coordinates': ('N055.27.40.000', 'W002.59.47.000')}, 'HAWKE': {'coordinates': ('N050.22.27.000', 'E000.05.25.000')}, 'HAZEL': {'coordinates': ('N051.00.19.000', 'W000.59.04.000')}, 'HEMEL': {'coordinates': ('N051.48.20.000', 'W000.25.10.000')}, 'HERON': {'coordinates': ('N055.20.35.000', 'W005.00.00.000')}, 'HICAP': {'coordinates': ('N051.05.16.000', 'W003.50.24.000')}, 'HILLY': {'coordinates': ('N051.20.06.000', 'E000.14.37.000')}, 'HOFIK': {'coordinates': ('N051.26.05.000', 'W002.00.00.000')}, 'HOLLY': {'coordinates': ('N050.53.12.000', 'W000.05.42.000')}, 'HOTBE': {'coordinates': ('N050.47.49.000', 'W003.54.19.000')}, 'IBNID': {'coordinates': ('N050.31.44.000', 'W001.47.08.000')}, 'IBOLU': {'coordinates': ('N056.23.10.000', 'W003.08.03.000')}, 'IBRAR': {'coordinates': ('N053.31.54.000', 'W003.41.11.000')}, 'IBROD': {'coordinates': ('N056.30.00.000', 'W010.00.00.000')}, 'IBTES': {'coordinates': ('N054.26.25.000', 'W005.03.15.000')}, 'IDESI': {'coordinates': ('N051.53.52.000', 'E001.53.08.000')}, 'IDEXA': {'coordinates': ('N053.28.14.000', 'W005.09.51.000')}, 'IDGAS': {'coordinates': ('N054.14.23.000', 'W004.33.29.000')}, 'IDITU': {'coordinates': ('N051.32.25.000', 'E000.58.37.000')}, 'IDKOL': {'coordinates': ('N054.05.43.000', 'W001.59.45.000')}, 'IDOKI': {'coordinates': ('N049.51.37.000', 'W006.11.39.000')}, 'IDOVO': {'coordinates': ('N052.13.26.000', 'W003.33.04.000')}, 'ILZAP': {'coordinates': ('N051.35.43.000', 'W005.11.35.000')}, 'IMVUR': {'coordinates': ('N051.10.29.000', 'W000.31.56.000')}, 'INBAS': {'coordinates': ('N056.42.00.000', 'W004.14.59.000')}, 'INBOB': {'coordinates': ('N055.36.25.000', 'E005.00.00.000')}, 'INGUR': {'coordinates': ('N051.42.42.000', 'W002.51.37.000')}, 'INKEK': {'coordinates': ('N050.38.53.000', 'W000.32.09.000')}, 'INKOB': {'coordinates': ('N053.52.16.000', 'W004.24.14.000')}, 'INLAK': {'coordinates': ('N051.28.41.000', 'W001.31.27.000')}, 'INLIM': {'coordinates': ('N051.54.23.000', 'E001.19.13.000')}, 'INPIP': {'coordinates': ('N054.52.36.000', 'W002.53.46.000')}, 'INPUT': {'coordinates': ('N055.18.18.000', 'E004.39.05.000')}, 'INREV': {'coordinates': ('N055.08.57.000', 'W003.05.39.000')}, 'INRUB': {'coordinates': ('N051.49.10.000', 'W003.51.41.000')}, 'INSUN': {'coordinates': ('N050.23.43.000', 'W006.19.24.000')}, 'IPDOR': {'coordinates': ('N055.22.39.000', 'W003.15.45.000')}, 'IPKUS': {'coordinates': ('N051.31.52.000', 'E000.23.19.000')}, 'IPNOX': {'coordinates': ('N054.13.40.000', 'W002.42.22.000')}, 'IPRIL': {'coordinates': ('N051.40.29.000', 'W000.13.35.000')}, 'IPSAD': {'coordinates': ('N055.21.45.000', 'W002.41.14.000')}, 'IPSET': {'coordinates': ('N054.31.06.000', 'W005.27.09.000')}, 'IPSIR': {'coordinates': ('N054.04.54.000', 'W001.56.57.000')}, 'IPZOL': {'coordinates': ('N050.54.27.000', 'W003.13.40.000')}, 'IRKUM': {'coordinates': ('N052.59.48.000', 'W005.22.39.000')}, 'IRKUN': {'coordinates': ('N050.55.16.000', 'E001.28.00.000')}, 'ITJON': {'coordinates': ('N051.28.32.000', 'W002.59.08.000')}, 'ITSUX': {'coordinates': ('N056.03.00.000', 'E003.30.00.000')}, 'ITVIP': {'coordinates': ('N051.17.14.000', 'E001.03.12.000')}, 'IVLOD': {'coordinates': ('N053.07.46.000', 'W003.09.19.000')}, 'IXURA': {'coordinates': ('N052.04.05.000', 'W001.25.40.000')}, 'JACKO': {'coordinates': ('N051.44.09.000', 'E001.25.36.000')}, 'JOZMA': {'coordinates': ('N050.37.38.000', 'W003.56.35.000')}, 'KAPEX': {'coordinates': ('N050.40.30.000', 'W001.51.00.000')}, 'KARNO': {'coordinates': ('N052.53.40.000', 'W002.53.54.000')}, 'KATHY': {'coordinates': ('N050.31.14.000', 'W001.20.00.000')}, 'KEGUN': {'coordinates': ('N053.11.33.000', 'W003.09.01.000')}, 'KELLY': {'coordinates': ('N053.54.19.000', 'W004.21.51.000')}, 'KEMPY': {'coordinates': ('N052.07.39.000', 'E000.38.46.000')}, 'KENET': {'coordinates': ('N051.31.14.000', 'W001.27.18.000')}, 'KEPAD': {'coordinates': ('N052.48.35.000', 'W002.28.44.000')}, 'KESEG': {'coordinates': ('N060.00.58.000', 'W006.00.56.000')}, 'KESIX': {'coordinates': ('N056.57.00.000', 'W014.00.00.000')}, 'KESUP': {'coordinates': ('N051.19.32.000', 'W003.39.17.000')}, 'KIDLI': {'coordinates': ('N051.46.17.000', 'W001.21.41.000')}, 'KILFA': {'coordinates': ('N051.42.09.000', 'W001.57.59.000')}, 'KLONN': {'coordinates': ('N058.23.24.000', 'E002.49.44.000')}, 'KOBBI': {'coordinates': ('N051.41.14.000', 'W000.09.18.000')}, 'KOFAL': {'coordinates': ('N050.28.31.000', 'W003.58.36.000')}, 'KOGAD': {'coordinates': ('N049.30.00.000', 'W015.00.00.000')}, 'KOKAL': {'coordinates': ('N058.57.35.000', 'W002.53.38.000')}, 'KOKIB': {'coordinates': ('N054.30.00.000', 'W014.00.00.000')}, 'KOLAG': {'coordinates': ('N053.02.37.000', 'E003.15.18.000')}, 'KOLID': {'coordinates': ('N053.34.26.000', 'W002.45.20.000')}, 'KONAN': {'coordinates': ('N051.07.51.000', 'E002.00.00.000')}, 'KOPUL': {'coordinates': ('N051.32.32.000', 'E001.08.14.000')}, 'KOTEM': {'coordinates': ('N050.00.00.000', 'W001.03.14.000')}, 'KOXOD': {'coordinates': ('N050.42.29.000', 'W003.19.58.000')}, 'KUBAX': {'coordinates': ('N052.56.34.000', 'E002.57.13.000')}, 'KUGUR': {'coordinates': ('N055.30.00.000', 'W010.00.00.000')}, 'KUKIS': {'coordinates': ('N051.41.56.000', 'W003.04.37.000')}, 'KUMIL': {'coordinates': ('N050.34.31.000', 'W001.36.39.000')}, 'KUNAV': {'coordinates': ('N050.30.54.000', 'E001.03.56.000')}, 'KUVOX': {'coordinates': ('N058.04.59.000', 'W007.36.24.000')}, 'KUXEM': {'coordinates': ('N053.15.11.000', 'W002.40.47.000')}, 'LAGAV': {'coordinates': ('N056.53.35.000', 'W004.12.10.000')}, 'LAKEY': {'coordinates': ('N054.14.20.000', 'W002.58.52.000')}, 'LAMAT': {'coordinates': ('N051.18.07.000', 'W003.01.03.000')}, 'LAMIX': {'coordinates': ('N053.18.29.000', 'W000.40.56.000')}, 'LAMMA': {'coordinates': ('N055.51.30.000', 'W002.46.03.000')}, 'LAMRO': {'coordinates': ('N057.01.51.000', 'E003.33.09.000')}, 'LAMSO': {'coordinates': ('N052.43.58.000', 'E002.59.40.000')}, 'LANAK': {'coordinates': ('N055.42.01.000', 'W003.56.19.000')}, 'LANON': {'coordinates': ('N052.25.52.000', 'W004.22.21.000')}, 'LANPI': {'coordinates': ('N052.11.15.000', 'W005.23.30.000')}, 'LANVA': {'coordinates': ('N053.43.48.000', 'W004.26.38.000')}, 'LAPRA': {'coordinates': ('N052.07.07.000', 'E001.12.36.000')}, 'LARCK': {'coordinates': ('N050.54.42.000', 'E000.26.48.000')}, 'LARDI': {'coordinates': ('N053.36.16.000', 'E003.30.57.000')}, 'LARGA': {'coordinates': ('N054.51.50.000', 'E004.09.12.000')}, 'LARLA': {'coordinates': ('N049.22.41.000', 'W007.07.15.000')}, 'LASNO': {'coordinates': ('N048.35.54.000', 'W009.00.00.000')}, 'LATMU': {'coordinates': ('N053.15.25.000', 'E002.05.22.000')}, 'LAVRI': {'coordinates': ('N051.15.49.000', 'E000.34.49.000')}, 'LAZTU': {'coordinates': ('N051.42.16.000', 'W004.52.00.000')}, 'LEDBO': {'coordinates': ('N052.32.35.000', 'E002.06.08.000')}, 'LEDGO': {'coordinates': ('N051.14.24.000', 'W007.34.05.000')}, 'LEGRO': {'coordinates': ('N053.19.35.000', 'E001.30.46.000')}, 'LELDO': {'coordinates': ('N053.43.00.000', 'W004.05.07.000')}, 'LELNA': {'coordinates': ('N049.58.57.000', 'W002.06.23.000')}, 'LEMGU': {'coordinates': ('N052.24.22.000', 'W005.22.36.000')}, 'LESLU': {'coordinates': ('N051.00.00.000', 'W008.00.00.000')}, 'LESNI': {'coordinates': ('N057.32.33.000', 'W001.16.22.000')}, 'LESRA': {'coordinates': ('N055.23.08.000', 'E005.00.00.000')}, 'LESTA': {'coordinates': ('N052.44.27.000', 'W001.04.19.000')}, 'LIBBA': {'coordinates': ('N055.43.26.000', 'W003.44.43.000')}, 'LIBSO': {'coordinates': ('N053.31.29.000', 'E000.05.36.000')}, 'LIFFY': {'coordinates': ('N053.28.48.000', 'W005.30.00.000')}, 'LIMKA': {'coordinates': ('N054.30.26.000', 'W005.13.11.000')}, 'LINDY': {'coordinates': ('N051.28.14.000', 'W001.02.57.000')}, 'LIPGO': {'coordinates': ('N053.03.50.000', 'W005.30.00.000')}, 'LISBO': {'coordinates': ('N054.31.30.000', 'W006.05.24.000')}, 'LISTO': {'coordinates': ('N053.08.36.000', 'W002.11.57.000')}, 'LIVSU': {'coordinates': ('N051.41.49.000', 'E000.50.58.000')}, 'LIZAD': {'coordinates': ('N049.35.25.000', 'W004.19.49.000')}, 'LOGAN': {'coordinates': ('N051.44.51.000', 'E001.36.43.000')}, 'LOMON': {'coordinates': ('N056.03.46.000', 'W004.34.48.000')}, 'LOMVO': {'coordinates': ('N053.16.16.000', 'W005.10.46.000')}, 'LONAM': {'coordinates': ('N053.50.22.000', 'E003.56.33.000')}, 'LOREL': {'coordinates': ('N052.00.50.000', 'W000.03.10.000')}, 'LORKU': {'coordinates': ('N049.56.18.000', 'W002.22.16.000')}, 'LOVEL': {'coordinates': ('N053.15.06.000', 'W002.16.28.000')}, 'LUCCO': {'coordinates': ('N055.20.49.000', 'W004.33.41.000')}, 'LUFTU': {'coordinates': ('N051.51.39.000', 'W003.55.01.000')}, 'LUGIS': {'coordinates': ('N050.00.00.000', 'W000.52.20.000')}, 'LULOX': {'coordinates': ('N050.22.00.000', 'W008.00.00.000')}, 'LUMBA': {'coordinates': ('N050.56.44.000', 'E000.15.42.000')}, 'LUSEN': {'coordinates': ('N060.30.00.000', 'W010.00.00.000')}, 'LUSIV': {'coordinates': ('N055.40.02.000', 'W004.14.41.000')}, 'LUSOD': {'coordinates': ('N053.56.23.000', 'W004.19.26.000')}, 'LUTIP': {'coordinates': ('N053.15.22.000', 'W004.44.16.000')}, 'LUTOV': {'coordinates': ('N055.14.22.000', 'W010.00.00.000')}, 'LUXTO': {'coordinates': ('N052.08.31.000', 'W002.03.38.000')}, 'LYNAS': {'coordinates': ('N053.26.33.000', 'W004.20.00.000')}, 'MABUG': {'coordinates': ('N049.47.05.000', 'W005.56.12.000')}, 'MADAD': {'coordinates': ('N055.54.47.000', 'W001.44.02.000')}, 'MAGEE': {'coordinates': ('N054.47.30.000', 'W005.36.30.000')}, 'MAKUX': {'coordinates': ('N053.58.30.000', 'W004.52.28.000')}, 'MALBY': {'coordinates': ('N051.35.33.000', 'W002.03.42.000')}, 'MALUD': {'coordinates': ('N053.24.48.000', 'W003.36.30.000')}, 'MAMUL': {'coordinates': ('N053.25.05.000', 'W001.16.48.000')}, 'MANGO': {'coordinates': ('N051.41.34.000', 'E000.47.26.000')}, 'MANIG': {'coordinates': ('N049.47.07.000', 'W003.15.34.000')}, 'MAPLE': {'coordinates': ('N052.23.36.000', 'W001.40.51.000')}, 'MARUK': {'coordinates': ('N050.14.47.000', 'W002.04.20.000')}, 'MASOP': {'coordinates': ('N054.09.21.000', 'W004.21.15.000')}, 'MATCH': {'coordinates': ('N051.46.45.000', 'E000.15.00.000')}, 'MATIK': {'coordinates': ('N061.00.00.000', 'W008.04.00.000')}, 'MATUT': {'coordinates': ('N054.10.17.000', 'W004.38.22.000')}, 'MAVET': {'coordinates': ('N054.02.57.000', 'W003.31.04.000')}, 'MAYLA': {'coordinates': ('N051.37.41.000', 'E000.43.12.000')}, 'MEDOG': {'coordinates': ('N051.57.01.000', 'W003.32.58.000')}, 'MEGEL': {'coordinates': ('N052.20.58.000', 'E002.04.25.000')}, 'MERLY': {'coordinates': ('N051.20.00.000', 'W005.00.00.000')}, 'MIMBI': {'coordinates': ('N051.32.57.000', 'W001.41.36.000')}, 'MIMKU': {'coordinates': ('N056.00.00.000', 'W010.00.00.000')}, 'MIMVA': {'coordinates': ('N053.06.03.000', 'E003.18.13.000')}, 'MIPVO': {'coordinates': ('N052.13.29.000', 'E001.36.09.000')}, 'MIRSI': {'coordinates': ('N053.32.17.000', 'W002.42.42.000')}, 'MITSO': {'coordinates': ('N053.39.07.000', 'W000.53.38.000')}, 'MIVLA': {'coordinates': ('N050.46.30.000', 'W001.15.58.000')}, 'MOCHA': {'coordinates': ('N059.32.56.000', 'W001.21.59.000')}, 'MODGO': {'coordinates': ('N060.26.43.000', 'W007.39.55.000')}, 'MOGLI': {'coordinates': ('N052.18.30.000', 'W000.16.09.000')}, 'MOGLO': {'coordinates': ('N055.30.00.000', 'W014.00.00.000')}, 'MOGMO': {'coordinates': ('N053.24.43.000', 'W001.12.51.000')}, 'MOGTA': {'coordinates': ('N053.10.08.000', 'W002.38.10.000')}, 'MOKBU': {'coordinates': ('N051.16.10.000', 'E001.29.14.000')}, 'MOLAK': {'coordinates': ('N054.36.49.000', 'W009.30.23.000')}, 'MOLIX': {'coordinates': ('N052.49.19.000', 'E003.04.07.000')}, 'MONTY': {'coordinates': ('N052.53.34.000', 'W003.10.26.000')}, 'MOPAT': {'coordinates': ('N051.29.55.000', 'W007.05.37.000')}, 'MORAG': {'coordinates': ('N052.45.10.000', 'W005.30.00.000')}, 'MOSIS': {'coordinates': ('N049.00.26.000', 'W007.12.53.000')}, 'MOSUN': {'coordinates': ('N051.53.38.000', 'W002.26.10.000')}, 'MOTOX': {'coordinates': ('N050.57.28.000', 'E001.28.00.000')}, 'MOVEN': {'coordinates': ('N052.03.43.000', 'W001.48.47.000')}, 'MULIT': {'coordinates': ('N051.00.47.000', 'W003.23.19.000')}, 'NAKID': {'coordinates': ('N049.42.54.000', 'W004.37.23.000')}, 'NALAN': {'coordinates': ('N061.00.00.000', 'W006.00.00.000')}, 'NALAX': {'coordinates': ('N053.29.00.000', 'E000.24.06.000')}, 'NANEV': {'coordinates': ('N057.34.29.000', 'W002.27.14.000')}, 'NANTI': {'coordinates': ('N053.08.15.000', 'W002.34.01.000')}, 'NANUM': {'coordinates': ('N052.01.35.000', 'W001.37.47.000')}, 'NAPEX': {'coordinates': ('N053.12.23.000', 'W000.51.40.000')}, 'NATEB': {'coordinates': ('N055.02.18.000', 'W001.41.54.000')}, 'NATKO': {'coordinates': ('N053.27.12.000', 'W004.38.07.000')}, 'NAVEL': {'coordinates': ('N055.35.13.000', 'E000.50.31.000')}, 'NAVOV': {'coordinates': ('N051.48.31.000', 'W003.15.25.000')}, 'NAVPI': {'coordinates': ('N052.32.50.000', 'E002.50.26.000')}, 'NAVSO': {'coordinates': ('N055.13.22.000', 'W003.48.49.000')}, 'NAXAT': {'coordinates': ('N051.47.21.000', 'W001.57.56.000')}, 'NAXIL': {'coordinates': ('N056.57.24.000', 'W002.43.26.000')}, 'NEDEX': {'coordinates': ('N051.52.37.000', 'W001.28.20.000')}, 'NEDUL': {'coordinates': ('N050.39.58.000', 'W001.32.52.000')}, 'NEKAP': {'coordinates': ('N051.43.35.000', 'W005.17.26.000')}, 'NELBO': {'coordinates': ('N054.29.24.000', 'W005.18.10.000')}, 'NELKO': {'coordinates': ('N050.42.14.000', 'W000.57.18.000')}, 'NELSA': {'coordinates': ('N053.51.48.000', 'W002.11.05.000')}, 'NENVO': {'coordinates': ('N052.42.38.000', 'W002.53.15.000')}, 'NEPNA': {'coordinates': ('N051.29.58.000', 'E000.26.57.000')}, 'NERTU': {'coordinates': ('N049.00.00.000', 'W014.00.00.000')}, 'NESDI': {'coordinates': ('N057.07.18.000', 'W004.08.48.000')}, 'NESON': {'coordinates': ('N054.13.46.000', 'W007.07.24.000')}, 'NETKI': {'coordinates': ('N055.00.00.000', 'W014.00.00.000')}, 'NETVU': {'coordinates': ('N050.24.45.000', 'E000.28.54.000')}, 'NEVIL': {'coordinates': ('N050.00.00.000', 'W000.22.06.000')}, 'NEVIS': {'coordinates': ('N056.41.43.000', 'W004.32.41.000')}, 'NEVRI': {'coordinates': ('N054.04.06.000', 'W006.16.11.000')}, 'NEXUS': {'coordinates': ('N056.12.43.000', 'W001.50.41.000')}, 'NEXXU': {'coordinates': ('N051.32.48.000', 'W004.43.38.000')}, 'NEZZA': {'coordinates': ('N051.23.52.000', 'W003.19.16.000')}, 'NIBDA': {'coordinates': ('N051.00.52.000', 'W000.55.01.000')}, 'NIBOG': {'coordinates': ('N055.00.00.000', 'W010.00.00.000')}, 'NIGIT': {'coordinates': ('N051.18.47.000', 'W001.10.15.000')}, 'NILAV': {'coordinates': ('N045.00.00.000', 'W013.25.00.000')}, 'NILGI': {'coordinates': ('N052.48.35.000', 'W003.10.50.000')}, 'NILON': {'coordinates': ('N051.35.46.000', 'E000.38.04.000')}, 'NIMAT': {'coordinates': ('N053.57.54.000', 'W005.44.32.000')}, 'NINEB': {'coordinates': ('N054.03.44.000', 'W004.07.42.000')}, 'NINEX': {'coordinates': ('N058.51.20.000', 'W009.30.00.000')}, 'NIPIT': {'coordinates': ('N054.27.09.000', 'W008.24.10.000')}, 'NISBI': {'coordinates': ('N053.21.29.000', 'W000.39.21.000')}, 'NITON': {'coordinates': ('N052.33.12.000', 'W003.12.01.000')}, 'NIVKO': {'coordinates': ('N050.56.06.000', 'E000.32.28.000')}, 'NIVUN': {'coordinates': ('N057.34.12.000', 'E004.08.13.000')}, 'NOBDO': {'coordinates': ('N055.04.53.000', 'E002.47.48.000')}, 'NOCAD': {'coordinates': ('N053.58.37.000', 'W004.59.13.000')}, 'NOKIN': {'coordinates': ('N053.04.36.000', 'W002.52.58.000')}, 'NOMSU': {'coordinates': ('N053.28.06.000', 'W003.26.04.000')}, 'NONVA': {'coordinates': ('N051.38.46.000', 'E001.21.44.000')}, 'NOPKI': {'coordinates': ('N054.05.16.000', 'W004.26.08.000')}, 'NORBO': {'coordinates': ('N055.35.45.000', 'W004.45.43.000')}, 'NORLA': {'coordinates': ('N051.37.09.000', 'W006.52.11.000')}, 'NORRY': {'coordinates': ('N051.28.47.000', 'W001.07.24.000')}, 'NOSLO': {'coordinates': ('N053.22.54.000', 'W003.38.42.000')}, 'NOTGI': {'coordinates': ('N050.35.10.000', 'W000.25.03.000')}, 'NOTRO': {'coordinates': ('N050.26.12.000', 'W003.10.21.000')}, 'NOVBA': {'coordinates': ('N056.12.00.000', 'W003.10.51.000')}, 'NOVMA': {'coordinates': ('N051.02.12.000', 'W000.45.15.000')}, 'NUBRI': {'coordinates': ('N051.20.17.000', 'W001.13.56.000')}, 'NUDNA': {'coordinates': ('N052.03.55.000', 'E000.50.17.000')}, 'NUGBO': {'coordinates': ('N051.58.22.000', 'W000.04.12.000')}, 'NUGRA': {'coordinates': ('N053.01.46.000', 'W002.18.14.000')}, 'NULGU': {'coordinates': ('N050.52.17.000', 'W001.59.39.000')}, 'NUMPI': {'coordinates': ('N054.08.40.000', 'W005.35.36.000')}, 'NUMPO': {'coordinates': ('N051.36.36.000', 'W003.17.01.000')}, 'OBOXA': {'coordinates': ('N054.10.36.000', 'W001.54.20.000')}, 'OBUBA': {'coordinates': ('N057.46.40.000', 'W007.33.19.000')}, 'OBUNI': {'coordinates': ('N053.54.40.000', 'W002.44.33.000')}, 'ODADA': {'coordinates': ('N050.54.56.000', 'W005.26.34.000')}, 'ODIGI': {'coordinates': ('N055.11.44.000', 'W003.25.39.000')}, 'ODMIX': {'coordinates': ('N056.58.12.000', 'E001.28.03.000')}, 'ODMOS': {'coordinates': ('N054.45.26.000', 'E003.02.52.000')}, 'ODNEK': {'coordinates': ('N053.34.37.000', 'W000.18.13.000')}, 'ODPEX': {'coordinates': ('N059.24.36.000', 'W009.30.00.000')}, 'ODREP': {'coordinates': ('N050.00.00.000', 'W001.30.07.000')}, 'ODROB': {'coordinates': ('N051.39.15.000', 'E001.54.45.000')}, 'ODUKU': {'coordinates': ('N051.35.32.000', 'E000.17.15.000')}, 'ODVIK': {'coordinates': ('N051.09.57.000', 'E000.29.09.000')}, 'ODVOD': {'coordinates': ('N052.07.56.000', 'E000.08.53.000')}, 'OGAGI': {'coordinates': ('N049.27.22.000', 'W005.01.57.000')}, 'OGTUG': {'coordinates': ('N053.40.12.000', 'W005.08.50.000')}, 'OKESI': {'coordinates': ('N051.26.36.000', 'W002.03.42.000')}, 'OKNOB': {'coordinates': ('N055.21.45.000', 'W004.49.42.000')}, 'OKPAL': {'coordinates': ('N056.49.21.000', 'W002.04.35.000')}, 'OKTAD': {'coordinates': ('N051.49.04.000', 'W002.32.46.000')}, 'OKTEM': {'coordinates': ('N052.43.10.000', 'W003.11.15.000')}, 'OKVAP': {'coordinates': ('N050.57.49.000', 'E001.19.56.000')}, 'OLGUD': {'coordinates': ('N050.43.56.000', 'W001.31.22.000')}, 'OLIVE': {'coordinates': ('N052.24.12.000', 'W001.56.12.000')}, 'OLNEY': {'coordinates': ('N052.07.40.000', 'W000.44.03.000')}, 'OLPOP': {'coordinates': ('N054.05.32.000', 'W002.32.08.000')}, 'OMOKO': {'coordinates': ('N048.50.20.000', 'W012.00.00.000')}, 'ORIST': {'coordinates': ('N050.00.00.000', 'W001.50.42.000')}, 'ORSUM': {'coordinates': ('N057.03.07.000', 'W005.17.41.000')}, 'ORTAC': {'coordinates': ('N049.59.57.000', 'W002.00.18.000')}, 'ORTAV': {'coordinates': ('N059.30.00.000', 'W010.00.00.000')}, 'ORVIK': {'coordinates': ('N059.38.18.000', 'E000.40.09.000')}, 'ORVUL': {'coordinates': ('N055.10.39.000', 'W004.10.07.000')}, 'ORVUX': {'coordinates': ('N050.31.53.000', 'W003.51.07.000')}, 'OSBON': {'coordinates': ('N061.00.00.000', 'W005.00.00.000')}, 'OSBOX': {'coordinates': ('N056.48.23.000', 'W012.48.06.000')}, 'OSGIL': {'coordinates': ('N051.35.20.000', 'W005.07.45.000')}, 'OSKOT': {'coordinates': ('N052.20.30.000', 'W002.03.30.000')}, 'OSLEL': {'coordinates': ('N057.28.58.000', 'W001.31.58.000')}, 'OSMAP': {'coordinates': ('N049.52.11.000', 'W005.42.29.000')}, 'OSMEG': {'coordinates': ('N055.08.26.000', 'W004.06.03.000')}, 'OSNUG': {'coordinates': ('N050.53.36.000', 'W002.00.14.000')}, 'OSPOL': {'coordinates': ('N050.09.00.000', 'W000.11.08.000')}, 'OTBED': {'coordinates': ('N053.17.17.000', 'E000.01.55.000')}, 'OTBUN': {'coordinates': ('N055.16.50.000', 'W002.26.00.000')}, 'OTMET': {'coordinates': ('N050.40.55.000', 'W002.30.53.000')}, 'OTSID': {'coordinates': ('N051.10.23.000', 'W000.13.51.000')}, 'OVDAN': {'coordinates': ('N057.53.21.000', 'E000.18.18.000')}, 'OXLOW': {'coordinates': ('N051.01.55.000', 'W002.00.00.000')}, 'PAAVO': {'coordinates': ('N051.51.49.000', 'E000.51.16.000')}, 'PAVLO': {'coordinates': ('N050.56.05.000', 'W005.53.44.000')}, 'PEDIG': {'coordinates': ('N052.44.48.000', 'W001.43.10.000')}, 'PELET': {'coordinates': ('N054.39.12.000', 'E003.25.52.000')}, 'PEMAK': {'coordinates': ('N049.26.56.000', 'W006.13.05.000')}, 'PEMOB': {'coordinates': ('N052.08.38.000', 'W005.09.41.000')}, 'PEMOS': {'coordinates': ('N061.00.00.000', 'W004.00.00.000')}, 'PENIL': {'coordinates': ('N053.36.57.000', 'W003.39.49.000')}, 'PENUN': {'coordinates': ('N057.06.36.000', 'E004.50.11.000')}, 'PENUX': {'coordinates': ('N051.06.32.000', 'E000.34.12.000')}, 'PEPEG': {'coordinates': ('N054.01.06.000', 'W004.31.05.000')}, 'PEPIN': {'coordinates': ('N059.59.31.000', 'E000.00.54.000')}, 'PEPIS': {'coordinates': ('N051.11.48.000', 'W001.14.37.000')}, 'PEPOD': {'coordinates': ('N054.16.16.000', 'W005.34.09.000')}, 'PEPUL': {'coordinates': ('N052.16.03.000', 'W001.53.14.000')}, 'PERUP': {'coordinates': ('N051.42.34.000', 'W002.56.32.000')}, 'PETIL': {'coordinates': ('N055.56.20.000', 'E005.00.00.000')}, 'PETOX': {'coordinates': ('N057.33.33.000', 'W001.49.02.000')}, 'PHILI': {'coordinates': ('N049.28.31.000', 'W007.01.17.000')}, 'PIBUX': {'coordinates': ('N051.34.59.000', 'E001.46.57.000')}, 'PIGOT': {'coordinates': ('N052.29.43.000', 'W001.04.07.000')}, 'PIKIL': {'coordinates': ('N056.00.00.000', 'W015.00.00.000')}, 'PIKOD': {'coordinates': ('N049.24.38.000', 'W005.15.59.000')}, 'PILIP': {'coordinates': ('N050.20.03.000', 'W001.00.26.000')}, 'PIPAR': {'coordinates': ('N056.07.00.000', 'W003.12.06.000')}, 'PIPEM': {'coordinates': ('N058.09.23.000', 'W003.44.19.000')}, 'PIPIN': {'coordinates': ('N052.36.36.000', 'W000.56.30.000')}, 'PIXAM': {'coordinates': ('N052.04.47.000', 'E001.56.50.000')}, 'PIXUP': {'coordinates': ('N052.01.51.000', 'W001.06.40.000')}, 'PLYMO': {'coordinates': ('N050.20.45.000', 'W004.37.37.000')}, 'POMAX': {'coordinates': ('N051.31.38.000', 'W002.04.52.000')}, 'POTON': {'coordinates': ('N052.05.04.000', 'W000.25.38.000')}, 'PUPJO': {'coordinates': ('N051.30.59.000', 'W002.00.00.000')}, 'RAMID': {'coordinates': ('N052.57.28.000', 'E002.30.00.000')}, 'RAMOX': {'coordinates': ('N053.39.37.000', 'W004.50.22.000')}, 'RAPIX': {'coordinates': ('N051.26.38.000', 'E002.00.00.000')}, 'RAPUM': {'coordinates': ('N054.55.18.000', 'W001.52.58.000')}, 'RATKA': {'coordinates': ('N049.30.00.000', 'W008.00.00.000')}, 'RATLO': {'coordinates': ('N051.59.29.000', 'E001.40.55.000')}, 'RATPU': {'coordinates': ('N056.55.32.000', 'W002.06.59.000')}, 'RATSU': {'coordinates': ('N061.00.00.000', 'W010.00.00.000')}, 'RAVDI': {'coordinates': ('N059.29.57.000', 'W001.22.42.000')}, 'RAVLO': {'coordinates': ('N052.55.01.000', 'E003.08.54.000')}, 'REDFA': {'coordinates': ('N052.06.53.000', 'E002.29.17.000')}, 'REGSI': {'coordinates': ('N052.51.19.000', 'E000.01.51.000')}, 'REKNA': {'coordinates': ('N056.10.29.000', 'E003.30.00.000')}, 'REMSI': {'coordinates': ('N053.57.37.000', 'W003.49.32.000')}, 'RESNO': {'coordinates': ('N055.00.00.000', 'W015.00.00.000')}, 'RETSI': {'coordinates': ('N052.30.42.000', 'W002.52.33.000')}, 'REXAM': {'coordinates': ('N053.04.00.000', 'W003.09.37.000')}, 'RIBEL': {'coordinates': ('N054.00.58.000', 'W002.17.23.000')}, 'RIGDI': {'coordinates': ('N050.16.15.000', 'W005.59.29.000')}, 'RIGVU': {'coordinates': ('N058.47.36.000', 'E002.09.18.000')}, 'RILES': {'coordinates': ('N051.56.55.000', 'W002.55.22.000')}, 'RIMOL': {'coordinates': ('N057.32.33.000', 'W004.02.30.000')}, 'RIMTO': {'coordinates': ('N053.43.03.000', 'W001.25.59.000')}, 'RIMVU': {'coordinates': ('N053.45.11.000', 'W005.08.25.000')}, 'RINGA': {'coordinates': ('N054.23.54.000', 'W005.34.16.000')}, 'RINIS': {'coordinates': ('N051.54.30.000', 'E002.19.21.000')}, 'RINTI': {'coordinates': ('N051.01.58.000', 'E001.36.56.000')}, 'RIPNO': {'coordinates': ('N054.56.32.000', 'W003.44.15.000')}, 'RISDU': {'coordinates': ('N057.36.31.000', 'W001.48.24.000')}, 'RISIN': {'coordinates': ('N051.41.39.000', 'W001.26.28.000')}, 'RISLA': {'coordinates': ('N053.01.51.000', 'W002.53.12.000')}, 'RITSI': {'coordinates': ('N055.42.09.000', 'W001.39.26.000')}, 'RIVOT': {'coordinates': ('N056.24.29.000', 'E000.46.37.000')}, 'RIXUN': {'coordinates': ('N061.00.00.000', 'W003.00.00.000')}, 'ROBBO': {'coordinates': ('N055.53.23.000', 'W004.54.37.000')}, 'ROBEM': {'coordinates': ('N055.27.33.000', 'W001.01.14.000')}, 'ROBOP': {'coordinates': ('N054.27.09.000', 'W005.20.05.000')}, 'ROBVA': {'coordinates': ('N052.48.04.000', 'W002.53.34.000')}, 'RODEL': {'coordinates': ('N050.30.00.000', 'W015.00.00.000')}, 'RODOL': {'coordinates': ('N053.14.17.000', 'W001.51.43.000')}, 'RODSI': {'coordinates': ('N054.47.56.000', 'E002.53.35.000')}, 'ROGAG': {'coordinates': ('N053.14.06.000', 'W000.17.45.000')}, 'ROGBI': {'coordinates': ('N052.17.47.000', 'W001.20.06.000')}, 'ROKAN': {'coordinates': ('N053.39.48.000', 'E003.11.20.000')}, 'ROKKE': {'coordinates': ('N051.04.12.000', 'E000.02.42.000')}, 'ROKSI': {'coordinates': ('N050.30.13.000', 'W002.12.51.000')}, 'ROKUP': {'coordinates': ('N053.01.02.000', 'W001.30.57.000')}, 'ROLEX': {'coordinates': ('N053.25.42.000', 'W003.58.04.000')}, 'ROLUM': {'coordinates': ('N054.18.20.000', 'E003.18.31.000')}, 'ROPAL': {'coordinates': ('N055.08.17.000', 'E003.56.43.000')}, 'ROPMU': {'coordinates': ('N052.06.14.000', 'E000.29.17.000')}, 'ROSUN': {'coordinates': ('N053.40.08.000', 'W002.20.57.000')}, 'ROTEV': {'coordinates': ('N054.01.44.000', 'W006.03.58.000')}, 'ROTNO': {'coordinates': ('N050.51.26.000', 'E000.46.41.000')}, 'ROVNI': {'coordinates': ('N053.53.13.000', 'E000.39.01.000')}, 'ROXAT': {'coordinates': ('N053.24.49.000', 'E003.00.00.000')}, 'ROXOG': {'coordinates': ('N050.15.58.000', 'W001.33.44.000')}, 'RUBEX': {'coordinates': ('N055.19.12.000', 'W006.56.07.000')}, 'RUBMI': {'coordinates': ('N050.16.46.000', 'W002.19.43.000')}, 'RUDMO': {'coordinates': ('N050.46.40.000', 'W001.01.27.000')}, 'RUGER': {'coordinates': ('N053.28.58.000', 'W003.26.52.000')}, 'RUGID': {'coordinates': ('N057.22.09.000', 'W004.53.55.000')}, 'RULAV': {'coordinates': ('N053.26.14.000', 'W005.10.01.000')}, 'SABER': {'coordinates': ('N051.42.14.000', 'E000.56.58.000')}, 'SALCO': {'coordinates': ('N049.44.14.000', 'W003.31.46.000')}, 'SAMON': {'coordinates': ('N051.19.21.000', 'W007.25.04.000')}, 'SANBA': {'coordinates': ('N053.08.22.000', 'W002.20.03.000')}, 'SANDY': {'coordinates': ('N051.03.51.000', 'E001.04.03.000')}, 'SAPCO': {'coordinates': ('N052.32.25.000', 'W001.21.25.000')}, 'SEMMU': {'coordinates': ('N052.46.41.000', 'W004.02.22.000')}, 'SETEL': {'coordinates': ('N054.00.45.000', 'W002.26.09.000')}, 'SHAPP': {'coordinates': ('N054.30.00.000', 'W002.37.38.000')}, 'SIDDI': {'coordinates': ('N050.31.07.000', 'W003.58.02.000')}, 'SILVA': {'coordinates': ('N051.50.51.000', 'W001.00.19.000')}, 'SIREN': {'coordinates': ('N051.34.15.000', 'W001.52.29.000')}, 'SIRGO': {'coordinates': ('N051.37.46.000', 'E000.38.47.000')}, 'SITET': {'coordinates': ('N050.06.00.000', 'E000.00.00.000')}, 'SITKU': {'coordinates': ('N053.44.36.000', 'W004.49.54.000')}, 'SIVBU': {'coordinates': ('N053.27.55.000', 'W000.34.44.000')}, 'SIVDA': {'coordinates': ('N052.21.22.000', 'E000.01.47.000')}, 'SKERY': {'coordinates': ('N050.00.00.000', 'W003.10.23.000')}, 'SKESO': {'coordinates': ('N049.49.29.000', 'W003.02.03.000')}, 'SKINA': {'coordinates': ('N052.48.17.000', 'W001.46.23.000')}, 'SLANY': {'coordinates': ('N052.09.31.000', 'W005.50.32.000')}, 'SLYDA': {'coordinates': ('N054.12.04.000', 'W005.05.15.000')}, 'SMOKI': {'coordinates': ('N057.46.37.000', 'W002.35.56.000')}, 'SOBDO': {'coordinates': ('N050.04.26.000', 'W007.07.00.000')}, 'SOKDU': {'coordinates': ('N050.39.39.000', 'W002.01.33.000')}, 'SOMAX': {'coordinates': ('N050.00.00.000', 'W015.00.00.000')}, 'SOMVA': {'coordinates': ('N052.18.26.000', 'E002.38.38.000')}, 'SONDO': {'coordinates': ('N052.05.06.000', 'E001.48.42.000')}, 'SONEX': {'coordinates': ('N053.29.53.000', 'W002.10.21.000')}, 'SONOG': {'coordinates': ('N052.06.20.000', 'E002.16.10.000')}, 'SOPAX': {'coordinates': ('N053.38.49.000', 'W004.27.08.000')}, 'SOPEK': {'coordinates': ('N053.06.32.000', 'E002.30.00.000')}, 'SOPIT': {'coordinates': ('N051.57.30.000', 'W001.06.26.000')}, 'SOPTO': {'coordinates': ('N055.18.20.000', 'E005.00.00.000')}, 'SOPTU': {'coordinates': ('N050.35.54.000', 'W004.09.18.000')}, 'SOSAR': {'coordinates': ('N061.00.00.000', 'W002.00.00.000')}, 'SOSIM': {'coordinates': ('N053.48.56.000', 'W004.30.30.000')}, 'SOTUN': {'coordinates': ('N054.10.36.000', 'E003.37.37.000')}, 'SOVAT': {'coordinates': ('N050.46.46.000', 'E001.28.00.000')}, 'SOVED': {'coordinates': ('N056.00.00.000', 'W014.00.00.000')}, 'SOXON': {'coordinates': ('N058.58.06.000', 'W003.52.22.000')}, 'SOXUX': {'coordinates': ('N050.35.46.000', 'E000.55.45.000')}, 'SPEAR': {'coordinates': ('N051.34.34.000', 'E000.42.01.000')}, 'STAFA': {'coordinates': ('N052.51.41.000', 'W002.14.35.000')}, 'STIRA': {'coordinates': ('N056.08.02.000', 'W003.50.01.000')}, 'STOAT': {'coordinates': ('N052.01.33.000', 'W000.01.15.000')}, 'SUBIP': {'coordinates': ('N050.26.44.000', 'E000.53.06.000')}, 'SUBUK': {'coordinates': ('N054.22.48.000', 'W003.17.11.000')}, 'SUDBY': {'coordinates': ('N055.17.45.000', 'W004.16.06.000')}, 'SUMIN': {'coordinates': ('N055.19.46.000', 'W004.03.18.000')}, 'SUMUM': {'coordinates': ('N051.38.14.000', 'E002.06.28.000')}, 'SUNFI': {'coordinates': ('N050.58.02.000', 'W003.52.02.000')}, 'SUNOT': {'coordinates': ('N057.00.00.000', 'W015.00.00.000')}, 'SUNUP': {'coordinates': ('N051.27.31.000', 'E001.52.27.000')}, 'SUPAP': {'coordinates': ('N049.26.02.000', 'W005.49.22.000')}, 'SUPEL': {'coordinates': ('N053.13.40.000', 'E000.35.28.000')}, 'SUPIT': {'coordinates': ('N057.19.57.000', 'W007.05.06.000')}, 'SURAT': {'coordinates': ('N055.54.56.000', 'E003.30.00.000')}, 'SUSIS': {'coordinates': ('N055.36.17.000', 'W000.21.02.000')}, 'SWANY': {'coordinates': ('N051.33.34.000', 'W004.07.48.000')}, 'TABEN': {'coordinates': ('N051.21.49.000', 'W001.10.22.000')}, 'TABIS': {'coordinates': ('N052.00.32.000', 'E000.26.44.000')}, 'TADAL': {'coordinates': ('N053.35.14.000', 'W003.01.35.000')}, 'TADEX': {'coordinates': ('N054.51.24.000', 'W008.14.01.000')}, 'TAKAS': {'coordinates': ('N049.00.00.000', 'W008.00.00.000')}, 'TALGA': {'coordinates': ('N051.57.30.000', 'W003.14.44.000')}, 'TALIG': {'coordinates': ('N049.30.59.000', 'W005.48.04.000')}, 'TAMEL': {'coordinates': ('N048.43.43.000', 'W010.29.50.000')}, 'TANET': {'coordinates': ('N051.26.58.000', 'E000.55.32.000')}, 'TARTN': {'coordinates': ('N055.43.02.000', 'W003.08.19.000')}, 'TAWNY': {'coordinates': ('N051.38.46.000', 'E000.09.06.000')}, 'TEBRA': {'coordinates': ('N051.29.20.000', 'E001.36.43.000')}, 'TEDSA': {'coordinates': ('N052.03.03.000', 'E001.07.39.000')}, 'TELBA': {'coordinates': ('N052.39.52.000', 'W002.19.07.000')}, 'TELTU': {'coordinates': ('N050.48.40.000', 'W000.45.18.000')}, 'TENDO': {'coordinates': ('N054.01.44.000', 'E000.14.47.000')}, 'TENSO': {'coordinates': ('N053.36.02.000', 'W002.12.27.000')}, 'TERKO': {'coordinates': ('N051.37.32.000', 'E000.44.48.000')}, 'TESDO': {'coordinates': ('N050.07.26.000', 'W007.07.00.000')}, 'TEVMO': {'coordinates': ('N051.10.25.000', 'E001.24.59.000')}, 'TEVSI': {'coordinates': ('N051.21.01.000', 'W001.03.01.000')}, 'THRED': {'coordinates': ('N050.29.55.000', 'W001.39.50.000')}, 'TIBGA': {'coordinates': ('N052.23.26.000', 'W005.27.05.000')}, 'TIGER': {'coordinates': ('N051.04.02.000', 'E000.26.22.000')}, 'TIGGY': {'coordinates': ('N050.37.01.000', 'W000.51.30.000')}, 'TILNI': {'coordinates': ('N054.32.51.000', 'W001.51.18.000')}, 'TIMBA': {'coordinates': ('N050.56.44.000', 'E000.15.42.000')}, 'TIMIS': {'coordinates': ('N053.57.01.000', 'W004.35.56.000')}, 'TIMPO': {'coordinates': ('N052.33.32.000', 'W001.09.50.000')}, 'TINAC': {'coordinates': ('N056.15.03.000', 'E005.00.00.000')}, 'TINAN': {'coordinates': ('N050.40.00.000', 'W003.26.54.000')}, 'TINDI': {'coordinates': ('N054.02.14.000', 'E003.15.29.000')}, 'TIPAN': {'coordinates': ('N054.35.54.000', 'E004.23.56.000')}, 'TIPIL': {'coordinates': ('N053.10.00.000', 'W001.44.23.000')}, 'TIPOD': {'coordinates': ('N053.26.08.000', 'W003.17.05.000')}, 'TIPTA': {'coordinates': ('N054.48.13.000', 'W001.36.41.000')}, 'TIVER': {'coordinates': ('N050.48.33.000', 'W003.25.26.000')}, 'TIXEX': {'coordinates': ('N052.07.48.000', 'W001.00.43.000')}, 'TOBID': {'coordinates': ('N052.13.00.000', 'W001.27.59.000')}, 'TOBIX': {'coordinates': ('N051.36.17.000', 'E001.16.51.000')}, 'TOBMO': {'coordinates': ('N056.45.38.000', 'W006.28.11.000')}, 'TOFED': {'coordinates': ('N051.17.36.000', 'W002.15.49.000')}, 'TOLSA': {'coordinates': ('N053.07.37.000', 'E003.06.29.000')}, 'TOPPA': {'coordinates': ('N053.24.09.000', 'E003.33.41.000')}, 'TOPRO': {'coordinates': ('N051.54.42.000', 'W003.14.57.000')}, 'TORGO': {'coordinates': ('N053.07.57.000', 'W003.15.59.000')}, 'TOSVA': {'coordinates': ('N051.40.37.000', 'E002.04.26.000')}, 'TOTRI': {'coordinates': ('N051.46.30.000', 'E000.11.48.000')}, 'TOVGU': {'coordinates': ('N051.44.03.000', 'E000.45.38.000')}, 'TRIPO': {'coordinates': ('N051.42.47.000', 'E001.04.58.000')}, 'TUBSU': {'coordinates': ('N052.04.06.000', 'W001.40.28.000')}, 'TUFOZ': {'coordinates': ('N051.01.01.000', 'W000.30.24.000')}, 'TUGPO': {'coordinates': ('N051.37.10.000', 'E000.54.52.000')}, 'TULTA': {'coordinates': ('N048.34.37.000', 'W008.00.00.000')}, 'TUMTI': {'coordinates': ('N053.04.40.000', 'W001.36.15.000')}, 'TUNBY': {'coordinates': ('N051.10.08.000', 'E000.19.29.000')}, 'TUNSO': {'coordinates': ('N055.06.40.000', 'W004.57.41.000')}, 'TUPEM': {'coordinates': ('N053.50.17.000', 'W003.28.55.000')}, 'TURLU': {'coordinates': ('N050.04.35.000', 'W008.00.00.000')}, 'TUXOS': {'coordinates': ('N055.58.24.000', 'W007.31.35.000')}, 'TUZLA': {'coordinates': ('N052.20.41.000', 'E001.00.12.000')}, 'TWEED': {'coordinates': ('N055.40.37.000', 'W003.16.38.000')}, 'UGBET': {'coordinates': ('N051.37.01.000', 'W000.07.05.000')}, 'UGBUD': {'coordinates': ('N051.47.14.000', 'W003.39.10.000')}, 'UGNUS': {'coordinates': ('N051.42.32.000', 'W002.08.23.000')}, 'ULLAP': {'coordinates': ('N057.54.00.000', 'W005.10.09.000')}, 'ULSAX': {'coordinates': ('N053.35.54.000', 'W001.57.00.000')}, 'ULTIB': {'coordinates': ('N051.39.36.000', 'W000.16.44.000')}, 'UMBAG': {'coordinates': ('N052.20.52.000', 'E000.46.57.000')}, 'UMBUR': {'coordinates': ('N051.27.33.000', 'E000.07.08.000')}, 'UMKIL': {'coordinates': ('N053.11.06.000', 'W002.37.23.000')}, 'UMLAT': {'coordinates': ('N051.40.20.000', 'W000.41.39.000')}, 'UMLUX': {'coordinates': ('N052.22.37.000', 'W002.00.19.000')}, 'UMOLO': {'coordinates': ('N052.08.03.000', 'W003.13.57.000')}, 'UMTUM': {'coordinates': ('N051.12.27.000', 'E001.01.03.000')}, 'UNBIG': {'coordinates': ('N051.44.27.000', 'W003.35.02.000')}, 'UNDUG': {'coordinates': ('N050.47.24.000', 'E000.25.30.000')}, 'UNDUX': {'coordinates': ('N053.38.30.000', 'W004.18.39.000')}, 'UNGAP': {'coordinates': ('N052.21.29.000', 'W001.23.39.000')}, 'UNIGO': {'coordinates': ('N053.33.40.000', 'W001.33.39.000')}, 'UNPUP': {'coordinates': ('N050.32.24.000', 'W004.34.30.000')}, 'UNSAD': {'coordinates': ('N051.33.40.000', 'E000.57.46.000')}, 'UNTAL': {'coordinates': ('N054.54.35.000', 'W001.44.23.000')}, 'UNURO': {'coordinates': ('N055.21.11.000', 'W003.21.02.000')}, 'UPDUK': {'coordinates': ('N052.34.27.000', 'W000.56.30.000')}, 'UPGAS': {'coordinates': ('N055.14.41.000', 'E005.00.00.000')}, 'UPGET': {'coordinates': ('N056.39.35.000', 'W002.00.50.000')}, 'UPTON': {'coordinates': ('N053.35.13.000', 'W001.18.03.000')}, 'USEKA': {'coordinates': ('N053.42.02.000', 'W000.26.45.000')}, 'UTAVA': {'coordinates': ('N051.58.47.000', 'W000.04.19.000')}, 'UTOGU': {'coordinates': ('N055.07.40.000', 'W003.04.43.000')}, 'UTUXA': {'coordinates': ('N053.00.30.000', 'W002.13.13.000')}, 'UVAVU': {'coordinates': ('N054.25.00.000', 'W001.53.46.000')}, 'UVPOK': {'coordinates': ('N053.59.40.000', 'W004.12.35.000')}, 'VABIK': {'coordinates': ('N051.14.47.000', 'E002.00.00.000')}, 'VABKA': {'coordinates': ('N053.18.08.000', 'W002.44.10.000')}, 'VADNO': {'coordinates': ('N057.27.11.000', 'W003.37.38.000')}, 'VAKPO': {'coordinates': ('N054.11.25.000', 'W005.22.39.000')}, 'VALBO': {'coordinates': ('N055.07.43.000', 'E005.00.00.000')}, 'VAMEB': {'coordinates': ('N054.00.10.000', 'W002.48.32.000')}, 'VAPID': {'coordinates': ('N051.15.14.000', 'W001.02.42.000')}, 'VASUX': {'coordinates': ('N050.30.09.000', 'W001.15.17.000')}, 'VATON': {'coordinates': ('N051.26.04.000', 'W000.20.56.000')}, 'VATRY': {'coordinates': ('N052.33.16.000', 'W005.30.00.000')}, 'VAXIT': {'coordinates': ('N056.32.15.000', 'E005.00.00.000')}, 'VEGAR': {'coordinates': ('N053.02.34.000', 'W001.58.23.000')}, 'VEGUS': {'coordinates': ('N053.24.01.000', 'W000.21.11.000')}, 'VELAG': {'coordinates': ('N052.24.36.000', 'W001.00.50.000')}, 'VENAS': {'coordinates': ('N054.18.20.000', 'E003.39.08.000')}, 'VENER': {'coordinates': ('N054.30.00.000', 'W015.00.00.000')}, 'VEXEN': {'coordinates': ('N050.08.22.000', 'W001.49.34.000')}, 'VIDOK': {'coordinates': ('N052.24.39.000', 'W002.02.31.000')}, 'VOLFI': {'coordinates': ('N053.20.35.000', 'W003.21.51.000')}, 'VOUGA': {'coordinates': ('N051.03.33.000', 'W000.57.50.000')}, 'WAFFU': {'coordinates': ('N050.34.57.000', 'E000.20.59.000')}, 'WEALD': {'coordinates': ('N051.19.51.000', 'E000.02.05.000')}, 'WELIN': {'coordinates': ('N052.14.50.000', 'W000.51.08.000')}, 'WESUL': {'coordinates': ('N051.40.15.000', 'E000.29.09.000')}, 'WILLO': {'coordinates': ('N050.59.06.000', 'W000.11.30.000')}, 'WIZAD': {'coordinates': ('N051.07.00.000', 'E000.57.11.000')}, 'WOBUN': {'coordinates': ('N052.01.10.000', 'W000.44.00.000')}, 'WOTAN': {'coordinates': ('N051.37.26.000', 'W002.20.06.000')}, 'XAMAB': {'coordinates': ('N050.12.16.000', 'E000.15.53.000')}, 'XAMAN': {'coordinates': ('N051.47.05.000', 'E002.13.27.000')}, 'XAPOS': {'coordinates': ('N053.17.29.000', 'W001.37.51.000')}, 'XAVAP': {'coordinates': ('N049.14.02.000', 'W006.08.35.000')}, 'XIDIL': {'coordinates': ('N050.21.06.000', 'E000.38.29.000')}, 'ZEDDO': {'coordinates': ('N051.31.42.000', 'W004.33.35.000')}})
+
if __name__ == '__main__':
unittest.main()
| Create UK AIP Parser Tooling
# What are the suggested changes?
Create the AIP API and use that API to automatically update incorrect parts of the sector file.
# Sources (if applicable)
AIP
# Files to be changed (if known)
_data/Tools/
| 2023-07-28T08:43:31Z | [] | [] |
|
arviz-devs/arviz | 131 | arviz-devs__arviz-131 | [
"125"
] | 0e03c99008cf5a98cc09c1a15a67b922b2405702 | diff --git a/arviz/plots/jointplot.py b/arviz/plots/jointplot.py
--- a/arviz/plots/jointplot.py
+++ b/arviz/plots/jointplot.py
@@ -2,38 +2,33 @@
from matplotlib.ticker import NullFormatter
from .kdeplot import kdeplot
-from ..utils import trace_to_dataframe, get_varnames
-from .plot_utils import _scale_text, get_bins
+from ..utils import convert_to_xarray
+from .plot_utils import _scale_text, get_bins, xarray_var_iter, make_label
-def jointplot(trace, varnames=None, figsize=None, textsize=None, kind='scatter', gridsize='auto',
- skip_first=0, joint_kwargs=None, marginal_kwargs=None):
+def jointplot(data, var_names=None, coords=None, figsize=None, textsize=None, kind='scatter',
+ gridsize='auto', joint_kwargs=None, marginal_kwargs=None):
"""
Plot a scatter or hexbin of two variables with their respective marginals distributions.
Parameters
----------
- trace : Pandas DataFrame or PyMC3 trace
+ data : xarray, or object that can be converted (pystan or pymc3 draws)
Posterior samples
- varnames : list of variable names
+ var_names : list of variable names
Variables to be plotted, two variables are required.
+ coords : mapping, optional
+ Coordinates of var_names to be plotted. Passed to `Dataset.sel`
figsize : figure size tuple
If None, size is (8, 8)
textsize: int
Text size for labels
kind : str
Type of plot to display (scatter of hexbin)
- hexbin : Boolean
- If True draws an hexbin plot
gridsize : int or (int, int), optional.
- Only works when hexbin is True.
- The number of hexagons in the x-direction. The corresponding number of hexagons in the
- y-direction is chosen such that the hexagons are approximately regular.
- Alternatively, gridsize can be a tuple with two elements specifying the number of hexagons
- in the x-direction and the y-direction.
- skip_first : int
- Number of first samples not shown in plots (burn-in)
+ The number of hexagons in the x-direction. Ignored when hexbin is False. See `plt.hexbin`
+ for details
joint_shade : dicts, optional
Additional keywords modifying the join distribution (central subplot)
marginal_shade : dicts, optional
@@ -45,17 +40,21 @@ def jointplot(trace, varnames=None, figsize=None, textsize=None, kind='scatter',
ax_hist_x : matplotlib axes, x (top) distribution
ax_hist_y : matplotlib axes, y (right) distribution
"""
- trace = trace_to_dataframe(trace[skip_first:], combined=True)
- varnames = get_varnames(trace, varnames)
+
+ data = convert_to_xarray(data)
+ if coords is None:
+ coords = {}
+
+ plotters = list(xarray_var_iter(data.sel(**coords), var_names=var_names, combined=True))
+
+ if len(plotters) != 2:
+ raise Exception(f'Number of variables to be plotted must 2 (you supplied {len(plotters)})')
if figsize is None:
figsize = (6, 6)
textsize, linewidth, _ = _scale_text(figsize, textsize=textsize)
- if len(varnames) != 2:
- raise Exception('Number of variables to be plotted must 2')
-
if joint_kwargs is None:
joint_kwargs = {}
@@ -66,11 +65,11 @@ def jointplot(trace, varnames=None, figsize=None, textsize=None, kind='scatter',
axjoin, ax_hist_x, ax_hist_y = _define_axes()
- x_var_name = varnames[0]
- y_var_name = varnames[1]
+ x_var_name = make_label(*plotters[0][:2])
+ y_var_name = make_label(*plotters[1][:2])
- x = trace[x_var_name].values
- y = trace[y_var_name].values
+ x = plotters[0][2].flatten()
+ y = plotters[1][2].flatten()
axjoin.set_xlabel(x_var_name, fontsize=textsize)
axjoin.set_ylabel(y_var_name, fontsize=textsize)
@@ -80,26 +79,22 @@ def jointplot(trace, varnames=None, figsize=None, textsize=None, kind='scatter',
axjoin.scatter(x, y, **joint_kwargs)
elif kind == 'hexbin':
if gridsize == 'auto':
- gridsize = int(len(trace)**0.35)
+ gridsize = int(len(x)**0.35)
axjoin.hexbin(x, y, mincnt=1, gridsize=gridsize, **joint_kwargs)
axjoin.grid(False)
else:
- raise ValueError('Plot type {} not recognized.'.format(kind))
-
- if x.dtype.kind == 'i':
- bins = get_bins(x)
- ax_hist_x.hist(x, bins=bins, align='left', density=True,
- **marginal_kwargs)
- else:
- kdeplot(x, ax=ax_hist_x, **marginal_kwargs)
- if y.dtype.kind == 'i':
- bins = get_bins(y)
- ax_hist_y.hist(y, bins=bins, align='left', density=True, orientation='horizontal',
- **marginal_kwargs)
- else:
- marginal_kwargs.setdefault('plot_kwargs', {})
- marginal_kwargs['plot_kwargs']['linewidth'] = linewidth
- kdeplot(y, ax=ax_hist_y, rotated=True, **marginal_kwargs)
+ raise ValueError(f'Plot type {kind} not recognized.')
+
+ for val, ax, orient, rotate in ((x, ax_hist_x, 'vertical', False),
+ (y, ax_hist_y, 'horizontal', True)):
+ if val.dtype.kind == 'i':
+ bins = get_bins(val)
+ ax.hist(val, bins=bins, align='left', density=True,
+ orientation=orient, **marginal_kwargs)
+ else:
+ marginal_kwargs.setdefault('plot_kwargs', {})
+ marginal_kwargs['plot_kwargs']['linewidth'] = linewidth
+ kdeplot(val, rotated=rotate, ax=ax, **marginal_kwargs)
ax_hist_x.set_xlim(axjoin.get_xlim())
ax_hist_y.set_ylim(axjoin.get_ylim())
diff --git a/examples/jointplot.py b/examples/jointplot.py
--- a/examples/jointplot.py
+++ b/examples/jointplot.py
@@ -8,5 +8,11 @@
az.style.use('arviz-darkgrid')
-trace = az.utils.load_trace('data/non_centered_eight_trace.gzip')
-az.jointplot(trace, kind='hexbin', varnames=('tau', 'mu'))
+
+data = az.load_data('data/non_centered_eight.nc')
+
+az.jointplot(data,
+ var_names=['theta'],
+ coords={'school': ['Choate', 'Phillips Andover']},
+ kind='hexbin',
+ figsize=(10, 10))
| diff --git a/arviz/tests/test_plots.py b/arviz/tests/test_plots.py
--- a/arviz/tests/test_plots.py
+++ b/arviz/tests/test_plots.py
@@ -2,7 +2,7 @@
from pandas import DataFrame
import numpy as np
import pymc3 as pm
-from pytest import raises
+import pytest
from .helpers import eight_schools_params, load_cached_models
from ..plots import (densityplot, traceplot, energyplot, posteriorplot, autocorrplot, forestplot,
@@ -51,17 +51,19 @@ def test_forestplot(self):
assert axes.shape == (1,)
def test_energyplot(self):
- with raises(AttributeError):
+ with pytest.raises(AttributeError):
energyplot(self.df_trace)
assert energyplot(self.short_trace)
def test_parallelplot(self):
- with raises(ValueError):
+ with pytest.raises(ValueError):
parallelplot(self.df_trace)
assert parallelplot(self.short_trace)
- def test_jointplot(self):
- jointplot(self.short_trace, varnames=['mu', 'tau'])
+ @pytest.mark.parametrize('kind', ['scatter', 'hexbin'])
+ def test_jointplot(self, kind):
+ for obj in (self.short_trace, self.fit):
+ jointplot(obj, var_names=('mu', 'tau'), kind=kind)
def test_pairplot(self):
pairplot(self.short_trace, varnames=['theta__0', 'theta__1'], divergences=True,
| Joinplot needs update to work with xarray
| This is nearly done (so no one else starts on it!) | 2018-07-16T22:55:17Z | [] | [] |
arviz-devs/arviz | 198 | arviz-devs__arviz-198 | [
"194"
] | d848a97d47e6b50ecf309b9cb2d4e1774efe9e1c | diff --git a/arviz/__init__.py b/arviz/__init__.py
--- a/arviz/__init__.py
+++ b/arviz/__init__.py
@@ -1,4 +1,5 @@
# pylint: disable=wildcard-import,invalid-name,wrong-import-position
+"""ArviZ is a library for exploratory analysis of Bayesian models."""
__version__ = '0.1.0'
from matplotlib.pyplot import style
diff --git a/arviz/compat/__init__.py b/arviz/compat/__init__.py
--- a/arviz/compat/__init__.py
+++ b/arviz/compat/__init__.py
@@ -1,3 +1,4 @@
+"""Provide optional dependencies."""
#pylint: disable=invalid-name
from .optional_dep import OptionalDep
pymc3 = OptionalDep('pymc3')
diff --git a/arviz/compat/optional_dep.py b/arviz/compat/optional_dep.py
--- a/arviz/compat/optional_dep.py
+++ b/arviz/compat/optional_dep.py
@@ -1,3 +1,4 @@
+"""Wrapper for optional library dependencies."""
import importlib
import sys
@@ -18,11 +19,13 @@ def trace_to_dataframe(*args, **kwargs):
pm.trace_to_dataframe(trace) # calls the OptionalDep method
pm.Normal('x', 0, 1) # calls pymc3.Normal
"""
+
def __init__(self, name):
self.__name = name
self.__module = None
def __getattr__(self, name):
+ """Try to import selected module in a lazy manner."""
if self.__module is None:
try:
self.__module = importlib.import_module(self.__name)
diff --git a/arviz/inference_data.py b/arviz/inference_data.py
--- a/arviz/inference_data.py
+++ b/arviz/inference_data.py
@@ -1,3 +1,4 @@
+"""Data structure for using netcdf groups with xarray."""
import netCDF4 as nc
import xarray as xr
@@ -6,15 +7,16 @@ class InferenceData():
"""Container for accessing netCDF files using xarray."""
def __init__(self, *_, **kwargs):
- """Attach to a netcdf file.
+ """Initialize InferenceData object from keyword xarray datasets.
- This will inspect the netcdf for the available groups, so that they can be
- later loaded into memory.
+ Examples
+ --------
+ InferenceData(posterior=posterior, prior=prior)
- Parameters:
- -----------
- filename : str
- netcdf4 file that contains groups for accessing with xarray.
+ Parameters
+ ----------
+ kwargs :
+ Keyword arguments of xarray datasets
"""
self._groups = []
for key, dataset in kwargs.items():
@@ -27,18 +29,44 @@ def __init__(self, *_, **kwargs):
self._groups.append(key)
def __repr__(self):
+ """Make string representation of object."""
return 'Inference data with groups:\n\t> {options}'.format(
options='\n\t> '.join(self._groups)
)
@staticmethod
def from_netcdf(filename):
+ """Initialize object from a netcdf file.
+
+ Expects that the file will have groups, each of which can be loaded by xarray.
+
+ Parameters
+ ----------
+ filename : str
+ location of netcdf file
+
+ Returns
+ -------
+ InferenceData object
+ """
groups = {}
for group in nc.Dataset(filename, mode='r').groups:
groups[group] = xr.open_dataset(filename, group=group)
return InferenceData(**groups)
def to_netcdf(self, filename):
+ """Write InferenceData to file using netcdf4.
+
+ Parameters
+ ----------
+ filename : str
+ Location to write to
+
+ Returns
+ -------
+ str
+ Location of netcdf file
+ """
mode = 'w' # overwrite first, then append
for group in self._groups:
data = getattr(self, group)
diff --git a/arviz/plots/__init__.py b/arviz/plots/__init__.py
--- a/arviz/plots/__init__.py
+++ b/arviz/plots/__init__.py
@@ -1,3 +1,4 @@
+"""Plotting functions."""
from .autocorrplot import autocorrplot
from .compareplot import compareplot
from .densityplot import densityplot
diff --git a/arviz/plots/autocorrplot.py b/arviz/plots/autocorrplot.py
--- a/arviz/plots/autocorrplot.py
+++ b/arviz/plots/autocorrplot.py
@@ -1,3 +1,4 @@
+"""Autocorrelation plot of data."""
import numpy as np
from .plot_utils import _scale_text, default_grid, make_label, xarray_var_iter, _create_axes_grid
@@ -7,8 +8,9 @@
def autocorrplot(data, var_names=None, max_lag=100, combined=False,
figsize=None, textsize=None):
- """
- Bar plot of the autocorrelation function for a posterior.
+ """Bar plot of the autocorrelation function for a sequence of data.
+
+ Useful in particular for posteriors from MCMC samples which may display correlation.
Parameters
----------
diff --git a/arviz/plots/compareplot.py b/arviz/plots/compareplot.py
--- a/arviz/plots/compareplot.py
+++ b/arviz/plots/compareplot.py
@@ -1,3 +1,4 @@
+"""Summary plot for model comparison."""
import numpy as np
import matplotlib.pyplot as plt
from .plot_utils import _scale_text
@@ -5,8 +6,7 @@
def compareplot(comp_df, insample_dev=True, plot_standard_error=True, plot_ic_diff=True,
figsize=None, textsize=None, plot_kwargs=None, ax=None):
- """
- Summary plot for model comparison.
+ """Summary plot for model comparison.
This plot is in the style of the one used in the book Statistical Rethinking
by Richard McElreath.
@@ -38,7 +38,6 @@ def compareplot(comp_df, insample_dev=True, plot_standard_error=True, plot_ic_di
-------
ax : matplotlib axes
"""
-
if figsize is None:
figsize = (6, len(comp_df))
diff --git a/arviz/plots/densityplot.py b/arviz/plots/densityplot.py
--- a/arviz/plots/densityplot.py
+++ b/arviz/plots/densityplot.py
@@ -1,3 +1,4 @@
+"""KDE and histogram plots for multiple variables."""
import numpy as np
import matplotlib.pyplot as plt
@@ -10,8 +11,8 @@
def densityplot(data, data_labels=None, var_names=None, credible_interval=0.94,
point_estimate='mean', colors='cycle', outline=True, hpd_markers='', shade=0.,
bw=4.5, figsize=None, textsize=None):
- """
- Generates KDE plots for continuous variables and histograms for discretes ones.
+ """Generate KDE plots for continuous variables and histograms for discretes ones.
+
Plots are truncated at their 100*(1-alpha)% credible intervals. Plots are grouped per variable
and colors assigned to models.
@@ -54,9 +55,7 @@ def densityplot(data, data_labels=None, var_names=None, credible_interval=0.94,
Returns
-------
-
ax : Matplotlib axes
-
"""
if not isinstance(data, (list, tuple)):
datasets = [convert_to_dataset(data, group='posterior')]
@@ -124,7 +123,10 @@ def densityplot(data, data_labels=None, var_names=None, credible_interval=0.94,
def _d_helper(vec, vname, color, bw, textsize, linewidth, markersize, credible_interval,
point_estimate, hpd_markers, outline, shade, ax):
- """
+ """Plot an individual dimension.
+
+ Parameters
+ ----------
vec : array
1D array from trace
vname : str
diff --git a/arviz/plots/energyplot.py b/arviz/plots/energyplot.py
--- a/arviz/plots/energyplot.py
+++ b/arviz/plots/energyplot.py
@@ -1,3 +1,4 @@
+"""Plot energy transition distribution in HMC inference."""
import numpy as np
import matplotlib.pyplot as plt
from .kdeplot import kdeplot
@@ -8,8 +9,9 @@
def energyplot(trace, kind='kde', bfmi=True, figsize=None, legend=True, fill_alpha=(1, .75),
fill_color=('C0', 'C5'), bw=4.5, skip_first=0, fill_kwargs=None, ax=None,
**kwargs):
- """Plot energy transition distribution and marginal energy distribution in
- order to diagnose poor exploration by HMC algorithms.
+ """Plot energy transition distribution and marginal energy distribution in HMC algorithms.
+
+ This may help to diagnose poor exploration by gradient-based algorithms like HMC or NUTS.
Parameters
----------
@@ -44,7 +46,6 @@ def energyplot(trace, kind='kde', bfmi=True, figsize=None, legend=True, fill_alp
-------
ax : matplotlib axes
"""
-
energy = get_stats(trace[skip_first:], 'energy')
if figsize is None:
diff --git a/arviz/plots/forestplot.py b/arviz/plots/forestplot.py
--- a/arviz/plots/forestplot.py
+++ b/arviz/plots/forestplot.py
@@ -1,3 +1,4 @@
+"""Forest plot."""
from collections import defaultdict
from itertools import tee
@@ -22,8 +23,7 @@ def forestplot(data, kind='forestplot', model_names=None, var_names=None, combin
credible_interval=0.94, quartiles=True, r_hat=True, n_eff=True, colors='cycle',
textsize=None, linewidth=None, markersize=None, ridgeplot_alpha=None,
ridgeplot_overlap=2, figsize=None):
- """
- Forest plot
+ """Forest plot to compare credible intervals from a number of distributions.
Generates a forest plot of 100*(credible_interval)% credible intervals from
a trace or list of traces.
@@ -76,7 +76,6 @@ def forestplot(data, kind='forestplot', model_names=None, var_names=None, combin
gridspec : matplotlib GridSpec
"""
-
ncols, width_ratios = 1, [3]
if n_eff:
@@ -153,6 +152,8 @@ def forestplot(data, kind='forestplot', model_names=None, var_names=None, combin
class PlotHandler():
+ """Class to handle logic from ForestPlot."""
+
def __init__(self, data, var_names, model_names, combined, colors):
if not isinstance(data, (list, tuple)):
data = [data]
@@ -187,6 +188,7 @@ def __init__(self, data, var_names, model_names, combined, colors):
self.plotters = self.make_plotters()
def make_plotters(self):
+ """Initialize an object for each variable to be plotted."""
plotters, y = {}, 0
for var_name in self.var_names:
plotters[var_name] = VarHandler(var_name, self.data, y,
@@ -198,6 +200,7 @@ def make_plotters(self):
return plotters
def labels_and_ticks(self):
+ """Collect labels and ticks from plotters."""
labels, idxs = [], []
for plotter in self.plotters.values():
sub_labels, sub_idxs, _, _ = plotter.labels_ticks_and_vals()
@@ -206,6 +209,21 @@ def labels_and_ticks(self):
return np.concatenate(labels), np.concatenate(idxs)
def ridgeplot(self, mult, textsize, linewidth, alpha, ax):
+ """Draw ridgeplot for each plotter.
+
+ Parameters
+ ----------
+ mult : float
+ How much to multiply height by. Set this to greater than 1 to have some overlap.
+ textsize : float
+ Size of tick text
+ linewidth : float
+ Width of line on border of ridges
+ alpha : float
+ Transparency of ridges
+ ax : Axes
+ Axes to draw on
+ """
if alpha is None:
alpha = 1.
zorder = 0
@@ -225,6 +243,23 @@ def ridgeplot(self, mult, textsize, linewidth, alpha, ax):
return ax
def forestplot(self, credible_interval, quartiles, textsize, linewidth, markersize, ax):
+ """Draw forestplot for each plotter.
+
+ Parameters
+ ----------
+ credible_interval : float
+ How wide each line should be
+ quartiles : bool
+ Whether to mark quartiles
+ textsize : float
+ Size of tick text
+ linewidth : float
+ Width of forestplot line
+ markersize : float
+ Size of marker in center of forestplot line
+ ax : Axes
+ Axes to draw on
+ """
# Quantiles to be calculated
endpoint = 100 * (1 - credible_interval) / 2
if quartiles:
@@ -250,6 +285,7 @@ def forestplot(self, credible_interval, quartiles, textsize, linewidth, markersi
return ax
def plot_neff(self, ax, textsize, markersize):
+ """Draw effective n for each plotter."""
for plotter in self.plotters.values():
for y, n_eff, color in plotter.n_eff():
if n_eff is not None:
@@ -260,6 +296,7 @@ def plot_neff(self, ax, textsize, markersize):
return ax
def plot_rhat(self, ax, textsize, markersize):
+ """Draw r-hat for each plotter."""
for plotter in self.plotters.values():
for y, r_hat, color in plotter.r_hat():
if r_hat is not None:
@@ -271,6 +308,7 @@ def plot_rhat(self, ax, textsize, markersize):
return ax
def make_bands(self, ax):
+ """Draw shaded horizontal bands for each plotter."""
y_vals, y_prev, is_zero = [0], None, False
prev_color_index = 0
plotter = None # To make sure it is defined
@@ -296,16 +334,20 @@ def make_bands(self, ax):
return ax
def fig_height(self):
+ """Figure out the height of this plot."""
# hand-tuned
return (len(self.data) * len(self.var_names) - 1 +
0.25 * sum(1 for j in self.plotters.values() for _ in j.iterator())
)
def y_max(self):
+ """Get maximum y value for the plot."""
return max(p.y_max() for p in self.plotters.values())
class VarHandler():
+ """Handle individual variable logic."""
+
def __init__(self, var_name, data, y_start, model_names, combined, colors):
self.var_name = var_name
self.data = data
@@ -319,6 +361,7 @@ def __init__(self, var_name, data, y_start, model_names, combined, colors):
self.group_offset = 2 * self.var_offset
def iterator(self):
+ """Iterate over models and chains for each variable."""
if self.combined:
grouped_data = [[(0, datum)] for datum in self.data]
skip_dims = {'chain'}
@@ -355,6 +398,7 @@ def iterator(self):
y += self.group_offset
def labels_ticks_and_vals(self):
+ """Get labels, ticks, values, and colors for the variable."""
y_ticks = defaultdict(list)
for y, label, vals, color in self.iterator():
y_ticks[label].append((y, vals, color))
@@ -367,12 +411,14 @@ def labels_ticks_and_vals(self):
return labels, ticks, vals, colors
def treeplot(self, qlist, credible_interval):
+ """Get data for each treeplot for the variable."""
for y, _, values, color in self.iterator():
ntiles = np.percentile(values.flatten(), qlist)
ntiles[0], ntiles[-1] = hpd(values.flatten(), credible_interval)
yield y, ntiles, color
def ridgeplot(self, mult):
+ """Get data for each ridgeplot for the variable."""
xvals, yvals, pdfs, colors = [], [], [], []
for y, _, values, color in self.iterator():
yvals.append(y)
@@ -388,6 +434,7 @@ def ridgeplot(self, mult):
yield x, y, mult * pdf / scaling + y, color
def n_eff(self):
+ """Get effective n data for the variable."""
_, y_vals, values, colors = self.labels_ticks_and_vals()
for y, value, color in zip(y_vals, values, colors):
if value.ndim != 2 or value.shape[0] < 2:
@@ -396,6 +443,7 @@ def n_eff(self):
yield y, _get_neff(value), color
def r_hat(self):
+ """Get rhat data for the variable."""
_, y_vals, values, colors = self.labels_ticks_and_vals()
for y, value, color in zip(y_vals, values, colors):
if value.ndim != 2 or value.shape[0] < 2:
@@ -404,6 +452,7 @@ def r_hat(self):
yield y, _get_rhat(value), color
def y_max(self):
+ """Get max y value for the variable."""
end_y = max(y for y, *_ in self.iterator())
if self.combined:
diff --git a/arviz/plots/jointplot.py b/arviz/plots/jointplot.py
--- a/arviz/plots/jointplot.py
+++ b/arviz/plots/jointplot.py
@@ -1,3 +1,4 @@
+"""Joint scatter plot of two variables."""
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter
@@ -13,7 +14,6 @@ def jointplot(data, var_names=None, coords=None, figsize=None, textsize=None, ki
Parameters
----------
-
data : xarray, or object that can be converted (pystan or pymc3 draws)
Posterior samples
var_names : list of variable names
@@ -34,13 +34,13 @@ def jointplot(data, var_names=None, coords=None, figsize=None, textsize=None, ki
marginal_shade : dicts, optional
Additional keywords modifying the marginals distributions (top and right subplot)
(to control the shade)
+
Returns
-------
axjoin : matplotlib axes, join (central) distribution
ax_hist_x : matplotlib axes, x (top) distribution
ax_hist_y : matplotlib axes, y (right) distribution
"""
-
data = convert_to_dataset(data, group='posterior')
if coords is None:
coords = {}
diff --git a/arviz/plots/kdeplot.py b/arviz/plots/kdeplot.py
--- a/arviz/plots/kdeplot.py
+++ b/arviz/plots/kdeplot.py
@@ -1,3 +1,4 @@
+"""One-dimensional kernel density estimate plots."""
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import gaussian, convolve # pylint: disable=no-name-in-module
@@ -9,8 +10,7 @@
def kdeplot(values, cumulative=False, rug=False, label=None, bw=4.5, rotated=False,
figsize=None, textsize=None, plot_kwargs=None, fill_kwargs=None,
rug_kwargs=None, ax=None):
- """
- 1D KDE plot taking into account boundary conditions
+ """1D KDE plot taking into account boundary conditions.
Parameters
----------
@@ -98,8 +98,8 @@ def kdeplot(values, cumulative=False, rug=False, label=None, bw=4.5, rotated=Fal
def fast_kde(x, cumulative=False, bw=4.5):
- """
- A fft-based Gaussian kernel density estimate (KDE)
+ """Fast Fourier transform-based Gaussian kernel density estimate (KDE).
+
The code was adapted from https://github.com/mfouesneau/faststats
Parameters
diff --git a/arviz/plots/khatplot.py b/arviz/plots/khatplot.py
--- a/arviz/plots/khatplot.py
+++ b/arviz/plots/khatplot.py
@@ -1,5 +1,7 @@
+"""Pareto tail indices plot."""
import matplotlib.pyplot as plt
import numpy as np
+
from .plot_utils import _scale_text
@@ -27,7 +29,6 @@ def khatplot(khats, figsize=None, textsize=None, ax=None, hlines_kwargs=None, **
ax : axes
Matplotlib axes.
"""
-
if figsize is None:
figsize = (8, 5)
diff --git a/arviz/plots/pairplot.py b/arviz/plots/pairplot.py
--- a/arviz/plots/pairplot.py
+++ b/arviz/plots/pairplot.py
@@ -1,3 +1,4 @@
+"""Plot a scatter or hexbin of sampled parameters."""
import matplotlib.pyplot as plt
from matplotlib import gridspec
from matplotlib.ticker import NullFormatter
diff --git a/arviz/plots/parallelplot.py b/arviz/plots/parallelplot.py
--- a/arviz/plots/parallelplot.py
+++ b/arviz/plots/parallelplot.py
@@ -1,13 +1,16 @@
+"""Parallel coordinates plot showing posterior points with and without divergences marked."""
import matplotlib.pyplot as plt
import numpy as np
+
from ..utils import trace_to_dataframe, get_varnames, get_stats
from .plot_utils import _scale_text
def parallelplot(trace, varnames=None, figsize=None, textsize=None, legend=True, colornd='k',
colord='C1', shadend=.025, skip_first=0, ax=None):
- """
- A parallel coordinates plot showing posterior points with and without divergences
+ """Parallel coordinates plot showing posterior points with and without divergences marked.
+
+ Described by https://arxiv.org/abs/1709.01449, suggested by Ari Hartikainen
Parameters
----------
diff --git a/arviz/plots/plot_utils.py b/arviz/plots/plot_utils.py
--- a/arviz/plots/plot_utils.py
+++ b/arviz/plots/plot_utils.py
@@ -1,3 +1,4 @@
+"""Utilities for plotting."""
import itertools
import numpy as np
@@ -31,7 +32,6 @@ def _scale_text(figsize, textsize, scale_ratio=2):
scale_ratio : float (default: 2)
Ratio of size of elements compared to figsize. Larger is bigger.
"""
-
if textsize is None and figsize is not None:
textsize = figsize[0] * scale_ratio
@@ -41,8 +41,7 @@ def _scale_text(figsize, textsize, scale_ratio=2):
def get_bins(ary, max_bins=50, fenceposts=2):
- """
- Compute number of bins (or ticks)
+ """Compute number of bins (or ticks).
Parameters
----------
@@ -63,7 +62,7 @@ def get_bins(ary, max_bins=50, fenceposts=2):
def default_grid(n_items, max_cols=6, min_cols=3):
- """Makes a grid for subplots
+ """Make a grid for subplots.
Tries to get as close to sqrt(n_items) x sqrt(n_items) as it can,
but allows for custom logic
@@ -98,7 +97,8 @@ def in_bounds(val):
def _create_axes_grid(length_plotters, rows, cols, **kwargs):
- """
+ """Create figure and axes for grids with multiple plots.
+
Parameters
----------
n_items : int
@@ -139,7 +139,7 @@ def selection_to_string(selection):
def make_label(var_name, selection):
- """Consistent labelling for plots
+ """Consistent labelling for plots.
Parameters
----------
@@ -160,10 +160,10 @@ def make_label(var_name, selection):
def xarray_var_iter(data, var_names=None, combined=False, skip_dims=None, reverse_selections=False):
- """Converts xarray data to an iterator over vectors
+ """Convert xarray data to an iterator over vectors.
Iterates over each var_name and all of its coordinates, returning the 1d
- data
+ data.
Parameters
----------
diff --git a/arviz/plots/posteriorplot.py b/arviz/plots/posteriorplot.py
--- a/arviz/plots/posteriorplot.py
+++ b/arviz/plots/posteriorplot.py
@@ -1,3 +1,4 @@
+"""Plot posterior densities."""
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import mode
@@ -10,8 +11,7 @@
def posteriorplot(data, var_names=None, coords=None, figsize=None, textsize=None,
credible_interval=0.94, round_to=1, point_estimate='mean', rope=None,
ref_val=None, kind='kde', bw=4.5, bins=None, ax=None, **kwargs):
- """
- Plot Posterior densities in the style of John K. Kruschke's book.
+ """Plot Posterior densities in the style of John K. Kruschke's book.
Parameters
----------
@@ -60,7 +60,6 @@ def posteriorplot(data, var_names=None, coords=None, figsize=None, textsize=None
Examples
--------
-
Show a default kernel density plot following style of John Kruschke
.. plot::
@@ -159,9 +158,7 @@ def posteriorplot(data, var_names=None, coords=None, figsize=None, textsize=None
def _plot_posterior_op(values, var_name, selection, ax, bw, linewidth, bins, kind, point_estimate,
round_to, credible_interval, ref_val, rope, textsize, **kwargs):
- """
- Artist to draw posterior.
- """
+ """Artist to draw posterior."""
def format_as_percent(x, round_to=0):
return '{0:.{1:d}f}%'.format(100 * x, round_to)
diff --git a/arviz/plots/ppcplot.py b/arviz/plots/ppcplot.py
--- a/arviz/plots/ppcplot.py
+++ b/arviz/plots/ppcplot.py
@@ -1,3 +1,4 @@
+"""Posterior predictive plot."""
import numpy as np
from .kdeplot import kdeplot
from .plot_utils import _scale_text, _create_axes_grid, default_grid
@@ -5,7 +6,7 @@
def ppcplot(data, ppc_sample, kind='kde', mean=True, figsize=None, textsize=None, ax=None):
"""
- Plot for Posterior Predictive Checks
+ Plot for Posterior Predictive checks.
Parameters
----------
@@ -28,7 +29,6 @@ def ppcplot(data, ppc_sample, kind='kde', mean=True, figsize=None, textsize=None
-------
ax : matplotlib axes
"""
-
rows, cols = default_grid(len(ppc_sample))
if figsize is None:
diff --git a/arviz/plots/traceplot.py b/arviz/plots/traceplot.py
--- a/arviz/plots/traceplot.py
+++ b/arviz/plots/traceplot.py
@@ -1,3 +1,4 @@
+"""Plot histogram and values from MCMC samples."""
import matplotlib.pyplot as plt
import numpy as np
diff --git a/arviz/plots/violintraceplot.py b/arviz/plots/violintraceplot.py
--- a/arviz/plots/violintraceplot.py
+++ b/arviz/plots/violintraceplot.py
@@ -1,3 +1,4 @@
+"""Plot posterior traces as violin plot."""
import numpy as np
import matplotlib.pyplot as plt
@@ -9,8 +10,7 @@
def violintraceplot(data, var_names=None, quartiles=True, credible_interval=0.94, shade=0.35,
bw=4.5, sharey=True, figsize=None, textsize=None, ax=None, kwargs_shade=None):
- """
- Plot posterior of traces as Violinplot
+ """Plot posterior of traces as violin plot.
Notes
-----
@@ -44,12 +44,11 @@ def violintraceplot(data, var_names=None, quartiles=True, credible_interval=0.94
ax : matplotlib axes
kwargs_shade : dicts, optional
Additional keywords passed to `fill_between`, or `barh` to control the shade
+
Returns
- ----------
+ -------
ax : matplotlib axes
-
"""
-
data = convert_to_dataset(data, group='posterior')
plotters = list(xarray_var_iter(data, var_names=var_names, combined=True))
@@ -93,9 +92,7 @@ def violintraceplot(data, var_names=None, quartiles=True, credible_interval=0.94
def _violinplot(val, shade, bw, ax, **kwargs_shade):
- """
- Auxiliary function to plot violinplots
- """
+ """Auxiliary function to plot violinplots."""
density, low_b, up_b = fast_kde(val, bw=bw)
x = np.linspace(low_b, up_b, len(density))
@@ -106,9 +103,7 @@ def _violinplot(val, shade, bw, ax, **kwargs_shade):
def cat_hist(val, shade, ax, **kwargs_shade):
- """
- Auxiliary function to plot discrete-violinplots
- """
+ """Auxiliary function to plot discrete-violinplots."""
bins = get_bins(val)
binned_d, _ = np.histogram(val, bins=bins, normed=True)
diff --git a/arviz/stats/__init__.py b/arviz/stats/__init__.py
--- a/arviz/stats/__init__.py
+++ b/arviz/stats/__init__.py
@@ -1,2 +1,3 @@
+"""Statistical tests and diagnostics for ArviZ."""
from .stats import bfmi, compare, hpd, loo, r2_score, summary, waic, psislw
from .diagnostics import effective_n, gelman_rubin, geweke
diff --git a/arviz/stats/diagnostics.py b/arviz/stats/diagnostics.py
--- a/arviz/stats/diagnostics.py
+++ b/arviz/stats/diagnostics.py
@@ -1,3 +1,4 @@
+"""Diagnostic functions for ArviZ."""
import warnings
import numpy as np
@@ -12,8 +13,7 @@
def effective_n(trace, varnames=None, round_to=2):
- R"""
- Returns estimate of the effective sample size of a set of traces.
+ r"""Calculate estimate of the effective sample size.
Parameters
----------
@@ -47,7 +47,6 @@ def effective_n(trace, varnames=None, round_to=2):
----------
Gelman et al. BDA (2014)
"""
-
trace = trace_to_dataframe(trace, combined=False)
varnames = get_varnames(trace, varnames)
@@ -64,9 +63,7 @@ def effective_n(trace, varnames=None, round_to=2):
def _get_neff(trace_value):
- """
- Compute the effective sample size for a 2D array
- """
+ """Compute the effective sample size for a 2D array."""
nchain, n_samples = trace_value.shape
acov = np.asarray([_autocov(trace_value[chain]) for chain in range(nchain)])
@@ -107,9 +104,9 @@ def _get_neff(trace_value):
def autocorr(x):
- """
- Compute autocorrelation using FFT for every lag for the input array
- https://en.wikipedia.org/wiki/autocorrelation#Efficient_computation
+ """Compute autocorrelation using FFT for every lag for the input array.
+
+ See https://en.wikipedia.org/wiki/autocorrelation#Efficient_computation
Parameters
----------
@@ -130,8 +127,7 @@ def autocorr(x):
def _autocov(x):
- """
- Compute autocovariance estimates for every lag for the input array
+ """Compute autocovariance estimates for every lag for the input array.
Parameters
----------
@@ -149,8 +145,7 @@ def _autocov(x):
def gelman_rubin(trace, varnames=None, round_to=2):
- R"""
- Returns estimate of R for a set of traces.
+ r"""Compute estimate of R-hat for a set of traces.
The Gelman-Rubin diagnostic tests for lack of convergence by comparing the variance between
multiple chains to the variance within each chain. If convergence has been achieved, the
@@ -174,7 +169,6 @@ def gelman_rubin(trace, varnames=None, round_to=2):
Notes
-----
-
The diagnostic is computed by:
.. math:: \hat{R} = \frac{\hat{V}}{W}
@@ -189,7 +183,6 @@ def gelman_rubin(trace, varnames=None, round_to=2):
Brooks and Gelman (1998)
Gelman and Rubin (1992)
"""
-
trace = trace_to_dataframe(trace, combined=False)
varnames = get_varnames(trace, varnames)
@@ -204,8 +197,7 @@ def gelman_rubin(trace, varnames=None, round_to=2):
def _get_rhat(values, round_to=2):
- """Compute the rhat for a 2d array
- """
+ """Compute the rhat for a 2d array."""
num_samples = values.shape[1]
# Calculate between-chain variance
between_chain_variance = num_samples * np.var(np.mean(values, axis=1), axis=0, ddof=1)
@@ -219,8 +211,7 @@ def _get_rhat(values, round_to=2):
def geweke(trace, varnames=None, first=.1, last=.5, intervals=20):
- R"""
- Return z-scores for convergence diagnostics.
+ r"""Compute z-scores for convergence diagnostics.
Compare the mean of the first % of series with the mean of the last % of series. x is divided
into a number of segments for which this difference is computed. If the series is converged,
@@ -246,7 +237,6 @@ def geweke(trace, varnames=None, first=.1, last=.5, intervals=20):
Notes
-----
-
The Geweke score on some series x is computed by:
.. math:: \frac{E[x_s] - E[x_e]}{\sqrt{V[x_s] + V[x_e]}}
@@ -259,7 +249,6 @@ def geweke(trace, varnames=None, first=.1, last=.5, intervals=20):
----------
Geweke (1992)
"""
-
trace = trace_to_dataframe(trace, combined=False)
varnames = get_varnames(trace, varnames)
diff --git a/arviz/stats/stats.py b/arviz/stats/stats.py
--- a/arviz/stats/stats.py
+++ b/arviz/stats/stats.py
@@ -1,3 +1,4 @@
+"""Statistical functions in ArviZ."""
import warnings
import numpy as np
@@ -39,12 +40,11 @@ def bfmi(trace):
def compare(model_dict, ic='waic', method='stacking', b_samples=1000, alpha=1,
seed=None, round_to=2):
- R"""
- Compare models based on the widely applicable information criterion (WAIC) or leave-one-out
- (LOO) cross-validation.
+ R"""Compare models based on WAIC or LOO cross validation.
- Read more theory here - in a paper by some of the leading authorities on model selection
- - dx.doi.org/10.1111/1467-9868.00353
+ WAIC is Widely applicable information criterion, and LOO is leave-one-out
+ (LOO) cross-validation. Read more theory here - in a paper by some of the
+ leading authorities on model selection - dx.doi.org/10.1111/1467-9868.00353
Parameters
----------
@@ -97,7 +97,6 @@ def compare(model_dict, ic='waic', method='stacking', b_samples=1000, alpha=1,
warning : A value of 1 indicates that the computation of the IC may not be reliable. This could
be indication of WAIC/LOO starting to fail see http://arxiv.org/abs/1507.04544 for details.
"""
-
names = [model.name for model in model_dict if model.name]
if not names:
names = np.arange(len(model_dict))
@@ -213,9 +212,7 @@ def gradient(weights):
def _ic_matrix(ics, ic_i):
- """
- Store the previously computed pointwise predictive accuracy values (ics) in a 2D matrix array.
- """
+ """Store the previously computed pointwise predictive accuracy values (ics) in a 2D matrix."""
cols, _ = ics.shape
rows = len(ics[ic_i].iloc[0])
ic_i_val = np.zeros((rows, cols))
@@ -287,8 +284,7 @@ def hpd(x, credible_interval=0.94, transform=lambda x: x, circular=False):
def loo(trace, model, pointwise=False, reff=None):
- """
- Pareto-smoothed importance sampling leave-one-out cross-validation
+ """Pareto-smoothed importance sampling leave-one-out cross-validation.
Calculates leave-one-out (LOO) cross-validation for out of sample predictive model fit,
following Vehtari et al. (2015). Cross-validation is computed using Pareto-smoothed
@@ -315,7 +311,6 @@ def loo(trace, model, pointwise=False, reff=None):
Pareto distribution is greater than 0.7 for one or more samples
loo_i: array of pointwise predictive accuracy, only if pointwise True
"""
-
if reff is None:
df = trace_to_dataframe(trace, combined=False)
nchains = df.columns.value_counts()[0]
@@ -428,8 +423,8 @@ def psislw(log_weights, reff=1.):
def _gpdfit(x):
- """
- Estimate the parameters for the Generalized Pareto Distribution (GPD)
+ """Estimate the parameters for the Generalized Pareto Distribution (GPD).
+
Empirical Bayes estimate for the parameters of the generalized Pareto
distribution given the data.
@@ -478,7 +473,7 @@ def _gpdfit(x):
def _gpinv(probs, kappa, sigma):
- """Inverse Generalized Pareto distribution function"""
+ """Inverse Generalized Pareto distribution function."""
x = np.full_like(probs, np.nan)
if sigma <= 0:
return x
@@ -505,8 +500,7 @@ def _gpinv(probs, kappa, sigma):
def r2_score(y_true, y_pred, round_to=2):
- """
- R² for Bayesian regression models. Only valid for linear models.
+ """R² for Bayesian regression models. Only valid for linear models.
Parameters
----------
@@ -516,6 +510,7 @@ def r2_score(y_true, y_pred, round_to=2):
Estimated target values.
round_to : int
Number of decimals used to round results. Defaults to 2.
+
Returns
-------
Pandas Series with the following indices:
@@ -537,8 +532,7 @@ def r2_score(y_true, y_pred, round_to=2):
def summary(trace, varnames=None, round_to=2, transform=lambda x: x, circ_varnames=None,
stat_funcs=None, extend=False, credible_interval=0.94, skip_first=0, batches=None):
- R"""
- Create a data frame with summary statistics.
+ R"""Create a data frame with summary statistics.
Parameters
----------
@@ -587,7 +581,6 @@ def summary(trace, varnames=None, round_to=2, transform=lambda x: x, circ_varnam
Examples
--------
-
.. code:: ipython
>>> az.summary(trace, ['mu'])
@@ -667,10 +660,10 @@ def summary(trace, varnames=None, round_to=2, transform=lambda x: x, circ_varnam
def _mc_error(x, batches=5, circular=False):
- """
- Calculates the simulation standard error, accounting for non-independent
- samples. The trace is divided into batches, and the standard deviation of
- the batch means is calculated.
+ """Calculate the simulation standard error, accounting for non-independent samples.
+
+ The trace is divided into batches, and the standard deviation of the batch
+ means is calculated.
Parameters
----------
@@ -721,10 +714,11 @@ def _mc_error(x, batches=5, circular=False):
def waic(trace, model, pointwise=False):
- """
- Calculate the widely available information criterion, its standard error and the effective
- number of parameters of the samples in trace from model.
- Read more theory here - in a paper by some of the leading authorities on model selection
+ """Calculate the widely available information criterion.
+
+ Also calculates the WAIC's standard error and the effective number of
+ parameters of the samples in trace from model. Read more theory here - in
+ a paper by some of the leading authorities on model selection
dx.doi.org/10.1111/1467-9868.00353
Parameters
@@ -745,7 +739,6 @@ def waic(trace, model, pointwise=False):
densities exceeds 0.4
waic_i: and array of the pointwise predictive accuracy, only if pointwise True
"""
-
log_py = log_post_trace(trace, model)
lppd_i = logsumexp(log_py, axis=0, b=1.0 / log_py.shape[0])
diff --git a/arviz/utils/__init__.py b/arviz/utils/__init__.py
--- a/arviz/utils/__init__.py
+++ b/arviz/utils/__init__.py
@@ -1,3 +1,4 @@
+"""Utility module for ArviZ."""
from .utils import (trace_to_dataframe, get_stats, expand_variable_names, get_varnames,
_create_flat_names, log_post_trace, load_data,
save_trace, load_trace, untransform_varnames, load_arviz_data)
diff --git a/arviz/utils/utils.py b/arviz/utils/utils.py
--- a/arviz/utils/utils.py
+++ b/arviz/utils/utils.py
@@ -1,3 +1,4 @@
+"""Miscellaneous utilities for supporting ArviZ."""
import bz2
import gzip
import importlib
@@ -28,14 +29,14 @@ def _has_type(object_, typename, module_path):
Is fed to `importlib.import_module`.
Returns
- ----------
+ -------
has_type : bool
`True` if `isinstance(object, getattr(importlib.import_module(module_path), typename))`.
`False` if this condition does not hold or the module under
`module_path` is not installed.
Examples
- ----------
+ --------
A string is not a real number:
>>> _has_type("aha", typename="Real", module_path="numbers")
@@ -102,9 +103,7 @@ def untransform_varnames(varnames):
def expand_variable_names(trace, varnames):
- """
- Expand the name of variables to include multidimensional variables
- """
+ """Expand the name of variables to include multidimensional variables."""
tmp = []
for vtrace in pd.unique(trace.columns):
for varname in varnames:
@@ -114,8 +113,7 @@ def expand_variable_names(trace, varnames):
def get_stats(trace, stat=None, combined=True):
- """
- Get sampling statistics from trace
+ """Get sampling statistics from trace.
Parameters
----------
@@ -125,8 +123,9 @@ def get_stats(trace, stat=None, combined=True):
Statistics
combined : Bool
If True multiple statistics from different chains will be combined together.
+
Returns
- ----------
+ -------
stat: array with the chosen statistic
"""
if _has_type(trace, typename="MultiTrace", module_path="pymc3.backends.base"):
@@ -146,6 +145,7 @@ def get_stats(trace, stat=None, combined=True):
def get_varnames(trace, varnames):
+ """Extract variable names from a trace."""
if varnames is None:
return np.unique(trace.columns)
else:
@@ -153,8 +153,8 @@ def get_varnames(trace, varnames):
def log_post_trace(trace, model):
- """
- Calculate the elementwise log-posterior for the sampled trace.
+ """Calculate the elementwise log-posterior for the sampled trace.
+
Currently only supports trace and models from PyMC3.
Parameters
@@ -241,6 +241,7 @@ def trace_to_dataframe(trace, combined=True):
def _create_flat_names(varname, shape):
"""Return flat variable names for `varname` of `shape`.
+
Examples
--------
>>> create_flat_names('x', (5,))
@@ -256,8 +257,7 @@ def _create_flat_names(varname, shape):
def _is_transformed_name(name):
- """
- Quickly check if a name was transformed with `get_transformed_name`
+ """Quickly check if a name was transformed with `get_transformed_name`.
Parameters
----------
@@ -345,8 +345,7 @@ def load_trace(filepath, combined=False):
def load_data(filename):
- """
- Load netcdf file back into an arviz.InferenceData
+ """Load netcdf file back into an arviz.InferenceData.
Parameters
----------
@@ -357,7 +356,7 @@ def load_data(filename):
def load_arviz_data(dataset):
- """Load built-in arviz dataset into memory
+ """Load built-in arviz dataset into memory.
Will print out available datasets in case of error.
diff --git a/arviz/utils/xarray_utils.py b/arviz/utils/xarray_utils.py
--- a/arviz/utils/xarray_utils.py
+++ b/arviz/utils/xarray_utils.py
@@ -1,3 +1,4 @@
+"""Utilities for converting and working with netcdf and xarray data."""
import re
import warnings
@@ -9,7 +10,7 @@
def convert_to_inference_data(obj, *_, group='posterior', coords=None, dims=None):
- """Convert a supported object to an InferenceData object
+ """Convert a supported object to an InferenceData object.
This function sends `obj` to the right conversion function. It is idempotent,
in that it will return arviz.InferenceData objects unchanged.
@@ -72,7 +73,7 @@ def convert_to_inference_data(obj, *_, group='posterior', coords=None, dims=None
def convert_to_dataset(obj, *_, group='posterior', coords=None, dims=None):
- """Convert a supported object to an xarray dataset
+ """Convert a supported object to an xarray dataset.
This function is idempotent, in that it will return xarray.Dataset functions
unchanged. Raises `ValueError` if the desired group can not be extracted.
@@ -115,12 +116,15 @@ def convert_to_dataset(obj, *_, group='posterior', coords=None, dims=None):
class requires: # pylint: disable=invalid-name
- """Decorator to return None if an object does not have the required attribute"""
+ """Decorator to return None if an object does not have the required attribute."""
+
def __init__(self, *props):
self.props = props
def __call__(self, func):
+ """Wrap the decorated function."""
def wrapped(cls, *args, **kwargs):
+ """Return None if not all props are available."""
for prop in self.props:
if getattr(cls, prop) is None:
return None
@@ -194,6 +198,12 @@ def numpy_to_data_array(ary, *_, var_name='data', coords=None, dims=None):
def dict_to_dataset(data, *_, coords=None, dims=None):
+ """Convert a dictionary of numpy arrays to an xarray.Dataset.
+
+ Examples
+ --------
+ dict_to_dataset({'x': np.random.randn(4, 100), 'y', np.random.rand(4, 100)})
+ """
if dims is None:
dims = {}
@@ -207,6 +217,8 @@ def dict_to_dataset(data, *_, coords=None, dims=None):
class PyMC3Converter:
+ """Encapsulate PyMC3 specific logic."""
+
def __init__(self, *_, trace=None, prior=None, posterior_predictive=None,
coords=None, dims=None):
self.trace = trace
@@ -217,8 +229,7 @@ def __init__(self, *_, trace=None, prior=None, posterior_predictive=None,
@requires('trace')
def posterior_to_xarray(self):
- """Convert the posterior to an xarray dataset
- """
+ """Convert the posterior to an xarray dataset."""
var_names = pm.utils.get_default_varnames(self.trace.varnames, include_transformed=False)
data = {}
for var_name in var_names:
@@ -227,6 +238,7 @@ def posterior_to_xarray(self):
@requires('trace')
def sample_stats_to_xarray(self):
+ """Extract sample_stats from PyMC3 trace."""
data = {}
for stat in self.trace.stat_names:
data[stat] = np.array(self.trace.get_sampler_stats(stat, combine=False))
@@ -234,16 +246,24 @@ def sample_stats_to_xarray(self):
@requires('posterior_predictive')
def posterior_predictive_to_xarray(self):
+ """Convert posterior_predictive samples to xarray."""
data = {k: np.expand_dims(v, 0) for k, v in self.posterior_predictive.items()}
return dict_to_dataset(data, coords=self.coords, dims=self.dims)
@requires('prior')
def prior_to_xarray(self):
+ """Convert prior samples to xarray."""
return dict_to_dataset({k: np.expand_dims(v, 0) for k, v in self.prior.items()},
coords=self.coords,
dims=self.dims)
def to_inference_data(self):
+ """Convert all available data to an InferenceData object.
+
+ Note that if groups can not be created (i.e., there is no `trace`, so
+ the `posterior` and `sample_stats` can not be extracted), then the InferenceData
+ will not have those groups.
+ """
return InferenceData(**{
'posterior': self.posterior_to_xarray(),
'sample_stats': self.sample_stats_to_xarray(),
@@ -253,6 +273,8 @@ def to_inference_data(self):
class PyStanConverter:
+ """Encapsulate PyStan specific logic."""
+
def __init__(self, *_, fit=None, coords=None, dims=None):
self.fit = fit
self.coords = coords
@@ -261,6 +283,7 @@ def __init__(self, *_, fit=None, coords=None, dims=None):
@requires('fit')
def posterior_to_xarray(self):
+ """Extract posterior samples from fit."""
dtypes = self.infer_dtypes()
data = {}
var_dict = self.fit.extract(self._var_names, dtypes=dtypes, permuted=False)
@@ -281,6 +304,7 @@ def posterior_to_xarray(self):
@requires('fit')
def sample_stats_to_xarray(self):
+ """Extract sample_stats from fit."""
dtypes = {
'divergent__' : bool,
'n_leapfrog__' : np.int64,
@@ -323,9 +347,10 @@ def sample_stats_to_xarray(self):
@requires('fit')
def infer_dtypes(self):
- """Infer dtypes from Stan model code. Function strips out generated quantities block
- and searchs for `int` dtypes after stripping out comments inside the block.
+ """Infer dtypes from Stan model code.
+ Function strips out generated quantities block and searchs for `int`
+ dtypes after stripping out comments inside the block.
"""
pattern_remove_comments = re.compile(
r'//.*?$|/\*.*?\*/|\'(?:\\.|[^\\\'])*\'|"(?:\\.|[^\\"])*"',
@@ -351,7 +376,7 @@ def infer_dtypes(self):
return dtypes
def unpermute(self, ary, idx, nchain):
- """Unpermute permuted sample
+ """Unpermute permuted sample.
Returns output compatible with PyStan 2.18+
fit.extract(par, permuted=False)[par]
@@ -378,6 +403,12 @@ def unpermute(self, ary, idx, nchain):
return ary
def to_inference_data(self):
+ """Convert all available data to an InferenceData object.
+
+ Note that if groups can not be created (i.e., there is no `trace`, so
+ the `posterior` and `sample_stats` can not be extracted), then the InferenceData
+ will not have those groups.
+ """
return InferenceData(**{
'posterior': self.posterior_to_xarray(),
'sample_stats': self.sample_stats_to_xarray(),
@@ -386,6 +417,7 @@ def to_inference_data(self):
def pymc3_to_inference_data(*_, trace=None, prior=None, posterior_predictive=None,
coords=None, dims=None):
+ """Convert pymc3 data into an InferenceData object."""
return PyMC3Converter(
trace=trace,
prior=prior,
@@ -395,6 +427,7 @@ def pymc3_to_inference_data(*_, trace=None, prior=None, posterior_predictive=Non
def pystan_to_inference_data(*_, fit=None, coords=None, dims=None):
+ """Convert pystan data into an InferenceData object."""
return PyStanConverter(
fit=fit,
coords=coords,
| diff --git a/arviz/tests/__init__.py b/arviz/tests/__init__.py
--- a/arviz/tests/__init__.py
+++ b/arviz/tests/__init__.py
@@ -0,0 +1 @@
+"""Test suite."""
diff --git a/arviz/tests/helpers.py b/arviz/tests/helpers.py
--- a/arviz/tests/helpers.py
+++ b/arviz/tests/helpers.py
@@ -1,3 +1,4 @@
+"""Test helper functions."""
import os
import pickle
import sys
@@ -9,24 +10,35 @@
class BaseArvizTest():
+ """Base class for running arviz tests."""
@classmethod
def setup_class(cls):
+ """Run once for the class.
+
+ This has to exist so subclasses can inherit.
+ """
pass
@classmethod
def teardown_class(cls):
+ """Teardown at end of tests.
+
+ This has to exist so subclasses can inherit.
+ """
pass
def setup_method(self):
+ """Run for every test."""
np.random.seed(1)
def teardown_method(self):
+ """Run for every test."""
plt.close('all')
def eight_schools_params():
- """Share setup for eight schools"""
+ """Share setup for eight schools."""
return {
'J': 8,
'y': np.array([28., 8., -3., 7., -1., 1., 18., 12.]),
@@ -35,6 +47,7 @@ def eight_schools_params():
def pystan_noncentered_schools(data, draws, chains):
+ """Non-centered eight schools implementation for pystan."""
schools_code = '''
data {
int<lower=0> J;
@@ -69,6 +82,7 @@ def pystan_noncentered_schools(data, draws, chains):
def pymc3_noncentered_schools(data, draws, chains):
+ """Non-centered eight schools implementation for pymc3."""
with pm.Model() as model:
mu = pm.Normal('mu', mu=0, sd=5)
tau = pm.HalfCauchy('tau', beta=5)
@@ -80,6 +94,7 @@ def pymc3_noncentered_schools(data, draws, chains):
def load_cached_models(draws, chains):
+ """Load pymc3 and pystan models from pickle."""
here = os.path.dirname(os.path.abspath(__file__))
data = eight_schools_params()
supported = (
| Consistent, good documentation
A good way for this to be done is
```bash
$ pydocstyle --convention=numpy arviz | wc -l
352
```
this could be added to the travis build as well. Note that right now each violation is 2 lines, so there are "only" 176 violations.
I'm working on this now, but it should not effect any code.
| 2018-09-03T23:44:29Z | [] | [] |
|
arviz-devs/arviz | 205 | arviz-devs__arviz-205 | [
"124"
] | be2cf3b89b67d77628df02106c2e07c3ce38dafb | diff --git a/arviz/plots/energyplot.py b/arviz/plots/energyplot.py
--- a/arviz/plots/energyplot.py
+++ b/arviz/plots/energyplot.py
@@ -3,20 +3,21 @@
import matplotlib.pyplot as plt
from .kdeplot import kdeplot
from ..stats import bfmi as e_bfmi
-from ..utils import get_stats
+from .plot_utils import _scale_text
+from ..utils import convert_to_dataset
-def energyplot(trace, kind='kde', bfmi=True, figsize=None, legend=True, fill_alpha=(1, .75),
- fill_color=('C0', 'C5'), bw=4.5, skip_first=0, fill_kwargs=None, ax=None,
- **kwargs):
+def energyplot(data, kind='kde', bfmi=True, figsize=None, legend=True, fill_alpha=(1, .75),
+ fill_color=('C0', 'C5'), bw=4.5, textsize=None, fill_kwargs=None, plot_kwargs=None,
+ ax=None):
"""Plot energy transition distribution and marginal energy distribution in HMC algorithms.
This may help to diagnose poor exploration by gradient-based algorithms like HMC or NUTS.
Parameters
----------
- trace : Pandas DataFrame or PyMC3 trace
- Posterior samples
+ data : xarray dataset, or object that can be converted (must represent
+ `sample_stats` and have an `energy` variable)
kind : str
Type of plot to display (kde or histogram)
bfmi : bool
@@ -35,10 +36,12 @@ def energyplot(trace, kind='kde', bfmi=True, figsize=None, legend=True, fill_alp
Bandwidth scaling factor for the KDE. Should be larger than 0. The higher this number the
smoother the KDE will be. Defaults to 4.5 which is essentially the same as the Scott's rule
of thumb (the default rule used by SciPy). Only works if `kind='kde'`
- skip_first : int
- Number of first samples not shown in plots (burn-in).
+ textsize: int
+ Text size for labels
fill_kwargs : dicts, optional
- Additional keywords passed to `fill_between` (to control the shade)
+ Additional keywords passed to `arviz.kdeplot` (to control the shade)
+ plot_kwargs : dicts, optional
+ Additional keywords passed to `arviz.kdeplot` or `plt.hist` (if type='hist')
ax : axes
Matplotlib axes.
@@ -46,10 +49,10 @@ def energyplot(trace, kind='kde', bfmi=True, figsize=None, legend=True, fill_alp
-------
ax : matplotlib axes
"""
- energy = get_stats(trace[skip_first:], 'energy')
+ energy = convert_to_dataset(data, group='sample_stats').energy.values
if figsize is None:
- figsize = (6, 6)
+ figsize = (8, 6)
if ax is None:
_, ax = plt.subplots(figsize=figsize)
@@ -57,6 +60,10 @@ def energyplot(trace, kind='kde', bfmi=True, figsize=None, legend=True, fill_alp
if fill_kwargs is None:
fill_kwargs = {}
+ if plot_kwargs is None:
+ plot_kwargs = {}
+ textsize, linewidth, _ = _scale_text(figsize, textsize=textsize, scale_ratio=1)
+
series = zip(
fill_alpha,
fill_color,
@@ -68,24 +75,23 @@ def energyplot(trace, kind='kde', bfmi=True, figsize=None, legend=True, fill_alp
for alpha, color, label, value in series:
fill_kwargs['alpha'] = alpha
fill_kwargs['color'] = color
- plot_kwargs = {
- 'color': color,
- 'alpha': 0
- }
- plot_kwargs.update(kwargs)
- kdeplot(value, bw=bw, label=label,
+ plot_kwargs.setdefault('color', color)
+ plot_kwargs.setdefault('alpha', 0)
+ plot_kwargs.setdefault('linewidth', linewidth)
+ kdeplot(value, bw=bw, label=label, textsize=textsize,
plot_kwargs=plot_kwargs, fill_kwargs=fill_kwargs, ax=ax)
elif kind == 'hist':
for alpha, color, label, value in series:
- ax.hist(value, alpha=alpha, label=label, color=color, **kwargs)
+ ax.hist(value.flatten(), bins='auto', density=True, alpha=alpha,
+ label=label, color=color, **plot_kwargs)
else:
raise ValueError('Plot type {} not recognized.'.format(kind))
if bfmi:
- for idx, val in enumerate(e_bfmi(trace)):
- plt.plot([], label='chain {:>2} BFMI = {:.2f}'.format(idx, val), alpha=0)
+ for idx, val in enumerate(e_bfmi(energy)):
+ ax.plot([], label='chain {:>2} BFMI = {:.2f}'.format(idx, val), alpha=0)
ax.set_xticks([])
ax.set_yticks([])
diff --git a/arviz/stats/stats.py b/arviz/stats/stats.py
--- a/arviz/stats/stats.py
+++ b/arviz/stats/stats.py
@@ -7,13 +7,13 @@
from scipy.stats import dirichlet, circmean, circstd
from scipy.optimize import minimize
-from ..utils import get_stats, get_varnames, trace_to_dataframe, log_post_trace
+from ..utils import get_varnames, trace_to_dataframe, log_post_trace
from .diagnostics import effective_n, gelman_rubin
__all__ = ['bfmi', 'compare', 'hpd', 'loo', 'psislw', 'r2_score', 'summary', 'waic']
-def bfmi(trace):
+def bfmi(energy):
R"""Calculate the estimated Bayesian fraction of missing information (BFMI).
BFMI quantifies how well momentum resampling matches the marginal energy distribution. For more
@@ -24,8 +24,10 @@ def bfmi(trace):
Parameters
----------
- trace : Pandas DataFrame or PyMC3 trace
- Result of an HMC/NUTS run, must contain energy information
+ energy : NumPy array
+ Should be extracted from a gradient based sampler, such as in Stan or PyMC3. Typically,
+ after converting a trace or fit to InferenceData, the energy will be in
+ `data.sample_stats.energy`.
Returns
-------
@@ -33,9 +35,8 @@ def bfmi(trace):
The Bayesian fraction of missing information of the model and trace. One element per
chain in the trace.
"""
- energy = np.atleast_2d(get_stats(trace, 'energy', combined=False))
-
- return np.square(np.diff(energy, axis=1)).mean(axis=1) / np.var(energy, axis=1)
+ energy_mat = np.atleast_2d(energy)
+ return np.square(np.diff(energy_mat, axis=1)).mean(axis=1) / np.var(energy_mat, axis=1)
def compare(model_dict, ic='waic', method='stacking', b_samples=1000, alpha=1,
| diff --git a/arviz/tests/test_plots.py b/arviz/tests/test_plots.py
--- a/arviz/tests/test_plots.py
+++ b/arviz/tests/test_plots.py
@@ -54,10 +54,10 @@ def test_forestplot(self):
_, axes = forestplot(obj, kind='ridgeplot', r_hat=False, n_eff=False)
assert axes.shape == (1,)
- def test_energyplot(self):
- with pytest.raises(AttributeError):
- energyplot(self.df_trace)
- assert energyplot(self.short_trace)
+ @pytest.mark.parametrize('kind', ['kde', 'hist'])
+ def test_energyplot(self, kind):
+ for obj in (self.short_trace, self.fit):
+ assert energyplot(obj, kind=kind)
def test_parallelplot(self):
with pytest.raises(ValueError):
diff --git a/arviz/tests/test_stats.py b/arviz/tests/test_stats.py
--- a/arviz/tests/test_stats.py
+++ b/arviz/tests/test_stats.py
@@ -11,8 +11,8 @@
def test_bfmi():
- trace = pd.DataFrame([1, 2, 3, 4], columns=['energy'])
- assert_almost_equal(bfmi(trace), 0.8)
+ energy = np.array([1, 2, 3, 4])
+ assert_almost_equal(bfmi(energy), 0.8)
def test_hpd():
| Energyplot needs update to work with xarray
| 2018-09-06T02:04:09Z | [] | [] |
|
arviz-devs/arviz | 221 | arviz-devs__arviz-221 | [
"181"
] | f4f7dd7cf38327c2a2eca3587ad7c7df81d52bc9 | diff --git a/arviz/inference_data.py b/arviz/inference_data.py
--- a/arviz/inference_data.py
+++ b/arviz/inference_data.py
@@ -58,13 +58,16 @@ def from_netcdf(filename):
groups[group] = data
return InferenceData(**groups)
- def to_netcdf(self, filename):
+ def to_netcdf(self, filename, compress=True):
"""Write InferenceData to file using netcdf4.
Parameters
----------
filename : str
Location to write to
+ compress : bool
+ Whether to compress result. Note this saves disk space, but may make
+ saving and loading somewhat slower (default: True).
Returns
-------
@@ -74,7 +77,10 @@ def to_netcdf(self, filename):
mode = 'w' # overwrite first, then append
for group in self._groups:
data = getattr(self, group)
- data.to_netcdf(filename, mode=mode, group=group)
+ kwargs = {}
+ if compress:
+ kwargs['encoding'] = {var_name: {'zlib': True} for var_name in data.variables}
+ data.to_netcdf(filename, mode=mode, group=group, **kwargs)
data.close()
mode = 'a'
return filename
diff --git a/arviz/plots/autocorrplot.py b/arviz/plots/autocorrplot.py
--- a/arviz/plots/autocorrplot.py
+++ b/arviz/plots/autocorrplot.py
@@ -6,8 +6,7 @@
from ..stats.diagnostics import autocorr
-def autocorrplot(data, var_names=None, max_lag=100, combined=False,
- figsize=None, textsize=None):
+def autocorrplot(data, var_names=None, max_lag=100, combined=False, figsize=None, textsize=None):
"""Bar plot of the autocorrelation function for a sequence of data.
Useful in particular for posteriors from MCMC samples which may display correlation.
diff --git a/arviz/plots/ppcplot.py b/arviz/plots/ppcplot.py
--- a/arviz/plots/ppcplot.py
+++ b/arviz/plots/ppcplot.py
@@ -4,81 +4,104 @@
from .plot_utils import _scale_text, _create_axes_grid, default_grid
-def ppcplot(data, ppc_sample, kind='kde', mean=True, figsize=None, textsize=None, ax=None):
+def ppcplot(data, kind='kde', alpha=0.2, mean=True, figsize=None, textsize=None):
"""
Plot for Posterior Predictive checks.
+ Note that this plot will flatten out any dimensions in the posterior predictive variables.
+
Parameters
----------
data : Array-like
Observed values
- ppc_samples : dict
- Posterior predictive check samples
kind : str
Type of plot to display (kde or cumulative)
+ alpha : float
+ Opacity of posterior predictive density curves
mean : bool
- Whether or not to plot the mean ppc distribution. Defaults to True
+ Whether or not to plot the mean posterior predictive distribution. Defaults to True
figsize : figure size tuple
If None, size is (6, 5)
textsize: int
Text size for labels. If None it will be auto-scaled based on figsize.
- ax: axes
- Matplotlib axes
Returns
-------
- ax : matplotlib axes
+ axes : matplotlib axes
"""
- rows, cols = default_grid(len(ppc_sample))
-
- if figsize is None:
- figsize = (7, 5)
+ for group in ('posterior_predictive', 'observed_data'):
+ if not hasattr(data, group):
+ raise TypeError(
+ '`data` argument must have the group "{group}" for ppcplot'.format(group=group))
- _, ax = _create_axes_grid(len(ppc_sample), rows, cols, figsize=figsize)
+ observed = data.observed_data
+ posterior_predictive = data.posterior_predictive
- textsize, linewidth, _ = _scale_text(figsize, textsize, 2)
+ rows, cols = default_grid(len(observed.data_vars))
+ if figsize is None:
+ figsize = (7 * cols, 5 * rows)
+ _, axes = _create_axes_grid(len(observed.data_vars), rows, cols, figsize=figsize)
- for ax_, (var, ppss) in zip(np.atleast_1d(ax), ppc_sample.items()):
+ textsize, linewidth, _ = _scale_text(figsize, textsize)
+ for ax, var_name in zip(np.atleast_1d(axes), observed.data_vars):
if kind == 'kde':
- kdeplot(data, label='{}'.format(var),
+ kdeplot(observed[var_name].values.flatten(), label='Observed {}'.format(var_name),
plot_kwargs={'color': 'k', 'linewidth': linewidth, 'zorder': 3},
fill_kwargs={'alpha': 0},
- ax=ax_)
- for pps in ppss:
- kdeplot(pps,
- plot_kwargs={'color': 'C5', 'linewidth': 0.5 * linewidth},
- fill_kwargs={'alpha': 0},
- ax=ax_)
- ax_.plot([], color='C5', label='{}_pps'.format(var))
+ ax=ax)
+ for _, chain_vals in posterior_predictive[var_name].groupby('chain'):
+ for _, vals in chain_vals.groupby('draw'):
+ kdeplot(vals,
+ plot_kwargs={'color': 'C4',
+ 'alpha': alpha,
+ 'linewidth': 0.5 * linewidth},
+ fill_kwargs={'alpha': 0},
+ ax=ax)
+ ax.plot([], color='C4', label='Posterior predictive {}'.format(var_name))
if mean:
- kdeplot(ppss,
+ kdeplot(posterior_predictive[var_name].values.flatten(),
plot_kwargs={'color': 'C0',
'linestyle': '--',
'linewidth': linewidth,
'zorder': 2},
- label='mean {}_pps'.format(var),
- ax=ax_)
- ax_.set_xlabel(var, fontsize=textsize)
- ax_.set_yticks([])
+ label='Posterior predictive mean {}'.format(var_name),
+ ax=ax)
+ ax.set_xlabel(var_name, fontsize=textsize)
+ ax.set_yticks([])
elif kind == 'cumulative':
- ax_.plot(*_ecdf(data), color='k', lw=linewidth, label='{}'.format(var), zorder=3)
- for pps in ppss:
- ax_.plot(*_ecdf(pps), alpha=0.2, color='C5', lw=linewidth)
- ax_.plot([], color='C5', label='{}_pps'.format(var))
+ ax.plot(*_empirical_cdf(observed[var_name].values.flatten()),
+ color='k',
+ linewidth=linewidth,
+ label='Observed {}'.format(var_name),
+ zorder=3)
+ for _, chain_vals in posterior_predictive[var_name].groupby('chain'):
+ for _, vals in chain_vals.groupby('draw'):
+ ax.plot(*_empirical_cdf(vals), alpha=alpha, color='C4', linewidth=linewidth)
+ ax.plot([], color='C4', label='Posterior predictive {}'.format(var_name))
if mean:
- ax_.plot(*_ecdf(ppss.flatten()), color='C0', ls='--', lw=linewidth,
- label='mean {}_pps'.format(var))
- ax_.set_xlabel(var, fontsize=textsize)
- ax_.set_yticks([0, 0.5, 1])
- ax_.legend(fontsize=textsize)
- ax_.tick_params(labelsize=textsize)
+ ax.plot(*_empirical_cdf(posterior_predictive[var_name].values.flatten()),
+ color='C0',
+ linestyle='--',
+ linewidth=linewidth,
+ label='Posterior predictive mean {}'.format(var_name))
+ ax.set_xlabel(var_name, fontsize=textsize)
+ ax.set_yticks([0, 0.5, 1])
+ ax.legend(fontsize=textsize)
+ return axes
+
- return ax
+def _empirical_cdf(data):
+ """Compute empirical cdf of a numpy array.
+ Parameters
+ ----------
+ data : np.array
+ 1d array
-def _ecdf(data):
- len_data = len(data)
- data_s = np.sort(data)
- cdf = np.arange(1, len_data + 1) / len_data
- return data_s, cdf
+ Returns
+ -------
+ np.array, np.array
+ x and y coordinates for the empirical cdf of the data
+ """
+ return np.sort(data), np.linspace(0, 1, len(data))
diff --git a/arviz/utils/utils.py b/arviz/utils/utils.py
--- a/arviz/utils/utils.py
+++ b/arviz/utils/utils.py
@@ -385,7 +385,7 @@ def load_arviz_data(dataset):
NUTS in PyMC3. Features named coordinates for each of the eight schools.
''',
'path': os.path.join(data_path, 'non_centered_eight.nc')
- }
+ },
}
if dataset in datasets_available:
return InferenceData.from_netcdf(datasets_available[dataset]['path'])
diff --git a/arviz/utils/xarray_utils.py b/arviz/utils/xarray_utils.py
--- a/arviz/utils/xarray_utils.py
+++ b/arviz/utils/xarray_utils.py
@@ -282,9 +282,13 @@ def posterior_to_xarray(self):
@requires('trace')
def sample_stats_to_xarray(self):
"""Extract sample_stats from PyMC3 trace."""
+ rename_key = {
+ 'model_logp' : 'lp',
+ }
data = {}
for stat in self.trace.stat_names:
- data[stat] = np.array(self.trace.get_sampler_stats(stat, combine=False))
+ name = rename_key.get(stat, stat)
+ data[name] = np.array(self.trace.get_sampler_stats(stat, combine=False))
return dict_to_dataset(data)
@requires('posterior_predictive')
diff --git a/examples/ppcplot.py b/examples/ppcplot.py
--- a/examples/ppcplot.py
+++ b/examples/ppcplot.py
@@ -5,25 +5,8 @@
_thumb: .6, .5
"""
import arviz as az
-import numpy as np
-import pymc3 as pm
az.style.use('arviz-darkgrid')
-# Data of the Eight Schools Model
-J = 8
-y = np.array([28., 8., -3., 7., -1., 1., 18., 12.])
-sigma = np.array([15., 10., 16., 11., 9., 11., 10., 18.])
-
-
-with pm.Model() as centered_eight:
- mu = pm.Normal('mu', mu=0, sd=5)
- tau = pm.HalfCauchy('tau', beta=5)
- theta = pm.Normal('theta', mu=mu, sd=tau, shape=J)
- obs = pm.Normal('obs', mu=theta, sd=sigma, observed=y)
- centered_eight_trace = pm.sample()
-
-with centered_eight:
- ppc_samples = pm.sample_ppc(centered_eight_trace, samples=100)
-
-az.ppcplot(y, ppc_samples)
+data = az.load_arviz_data('non_centered_eight')
+az.ppcplot(data, alpha=0.03, figsize=(12, 6), textsize=14);
diff --git a/examples/ppcplot_cumulative.py b/examples/ppcplot_cumulative.py
new file mode 100644
--- /dev/null
+++ b/examples/ppcplot_cumulative.py
@@ -0,0 +1,12 @@
+"""
+Posterior Predictive Check Cumulative Plot
+==========================================
+
+_thumb: .6, .5
+"""
+import arviz as az
+
+az.style.use('arviz-darkgrid')
+
+data = az.load_arviz_data('non_centered_eight')
+az.ppcplot(data, alpha=0.03, kind='cumulative', figsize=(12, 6), textsize=14)
\ No newline at end of file
| diff --git a/arviz/tests/test_plots.py b/arviz/tests/test_plots.py
--- a/arviz/tests/test_plots.py
+++ b/arviz/tests/test_plots.py
@@ -5,6 +5,7 @@
import pytest
from .helpers import eight_schools_params, load_cached_models, BaseArvizTest
+from ..utils import pymc3_to_inference_data
from ..plots import (densityplot, traceplot, energyplot, posteriorplot, autocorrplot, forestplot,
parallelplot, pairplot, jointplot, ppcplot, violintraceplot)
@@ -80,8 +81,10 @@ def test_pairplot(self):
pairplot(self.short_trace, kind='hexbin', var_names=['theta'],
coords={'theta_dim_0': [0, 1]}, plot_kwargs={'cmap': 'viridis'}, textsize=20)
- def test_ppcplot(self):
- ppcplot(self.data['y'], self.sample_ppc)
+ @pytest.mark.parametrize('kind', ['kde', 'cumulative'])
+ def test_ppcplot(self, kind):
+ data = pymc3_to_inference_data(trace=self.short_trace, posterior_predictive=self.sample_ppc)
+ ppcplot(data, kind=kind)
def test_violintraceplot(self):
for obj in (self.short_trace, self.fit):
| Make `ppcplot` work with `InferenceData`
After #180 this will probably be the last plot to get to parity with pymc3 plotting.
| nitpicking, `ppcplot` is not available in PyMC3, is an ArviZ-native :-)
In that case, congrats on pymc3 feature parity! 🎉
I'll still probably tackle this next so we can remove all the pandas scaffolding before renaming/reorganizing everything. | 2018-09-09T21:20:04Z | [] | [] |
arviz-devs/arviz | 315 | arviz-devs__arviz-315 | [
"243"
] | 9c5363318ed4b79a75326fa865acb9a1d0ecb5ba | diff --git a/arviz/data/base.py b/arviz/data/base.py
--- a/arviz/data/base.py
+++ b/arviz/data/base.py
@@ -1,8 +1,10 @@
"""Low level converters usually used by other functions."""
-import warnings
from copy import deepcopy
+import datetime
+import warnings
import numpy as np
+import pkg_resources
import xarray as xr
@@ -134,12 +136,31 @@ def numpy_to_data_array(ary, *, var_name='data', coords=None, dims=None):
return xr.DataArray(ary, coords=coords, dims=dims)
-def dict_to_dataset(data, *, coords=None, dims=None):
+def dict_to_dataset(data, *, attrs=None, library=None, coords=None, dims=None):
"""Convert a dictionary of numpy arrays to an xarray.Dataset.
+ Parameters
+ ----------
+ data : dict[str] -> ndarray
+ Data to convert. Keys are variable names.
+ attrs : dict
+ Json serializable metadata to attach to the dataset, in addition to defaults.
+ library : module
+ Library used for performing inference. Will be attached to the attrs metadata.
+ coords : dict[str] -> ndarray
+ Coordinates for the dataset
+ dims : dict[str] -> list[str]
+ Dimensions of each variable. The keys are variable names, values are lists of
+ coordinates.
+
+ Returns
+ -------
+ xr.Dataset
+
Examples
--------
dict_to_dataset({'x': np.random.randn(4, 100), 'y', np.random.rand(4, 100)})
+
"""
if dims is None:
dims = {}
@@ -150,4 +171,34 @@ def dict_to_dataset(data, *, coords=None, dims=None):
var_name=key,
coords=coords,
dims=dims.get(key))
- return xr.Dataset(data_vars=data_vars)
+ return xr.Dataset(data_vars=data_vars, attrs=make_attrs(attrs=attrs, library=library))
+
+
+def make_attrs(attrs=None, library=None):
+ """Make standard attributes to attach to xarray datasets.
+
+ Parameters
+ ----------
+ attrs : dict (optional)
+ Additional attributes to add or overwrite
+
+ Returns
+ -------
+ dict
+ attrs
+ """
+ default_attrs = {
+ 'created_at': datetime.datetime.utcnow().isoformat(),
+ }
+ if library is not None:
+ library_name = library.__name__
+ default_attrs['inference_library'] = library_name
+ try:
+ version = pkg_resources.get_distribution(library_name).version
+ default_attrs['inference_library_version'] = version
+ except pkg_resources.DistributionNotFound:
+ pass
+
+ if attrs is not None:
+ default_attrs.update(attrs)
+ return default_attrs
diff --git a/arviz/data/io_emcee.py b/arviz/data/io_emcee.py
--- a/arviz/data/io_emcee.py
+++ b/arviz/data/io_emcee.py
@@ -54,20 +54,22 @@ def __init__(self, sampler, *_, var_names=None, arg_names=None, coords=None, dim
self.arg_names = arg_names
self.coords = coords
self.dims = dims
+ import emcee
+ self.emcee = emcee
def posterior_to_xarray(self):
"""Convert the posterior to an xarray dataset."""
data = {}
for idx, var_name in enumerate(self.var_names):
data[var_name] = self.sampler.chain[(..., idx)]
- return dict_to_dataset(data, coords=self.coords, dims=self.dims)
+ return dict_to_dataset(data, library=self.emcee, coords=self.coords, dims=self.dims)
def observed_data_to_xarray(self):
"""Convert observed data to xarray."""
data = {}
for idx, var_name in enumerate(self.arg_names):
data[var_name] = self.sampler.args[idx]
- return dict_to_dataset(data, coords=self.coords, dims=self.dims)
+ return dict_to_dataset(data, library=self.emcee, coords=self.coords, dims=self.dims)
def to_inference_data(self):
"""Convert all available data to an InferenceData object."""
diff --git a/arviz/data/io_pymc3.py b/arviz/data/io_pymc3.py
--- a/arviz/data/io_pymc3.py
+++ b/arviz/data/io_pymc3.py
@@ -3,7 +3,7 @@
import xarray as xr
from .inference_data import InferenceData
-from .base import requires, dict_to_dataset, generate_dims_coords
+from .base import requires, dict_to_dataset, generate_dims_coords, make_attrs
class PyMC3Converter:
@@ -16,6 +16,8 @@ def __init__(self, *_, trace=None, prior=None, posterior_predictive=None,
self.posterior_predictive = posterior_predictive
self.coords = coords
self.dims = dims
+ import pymc3
+ self.pymc3 = pymc3
@requires('trace')
def _extract_log_likelihood(self):
@@ -55,14 +57,14 @@ def log_likelihood_vals_point(point):
@requires('trace')
def posterior_to_xarray(self):
"""Convert the posterior to an xarray dataset."""
- import pymc3 as pm
- var_names = pm.utils.get_default_varnames(self.trace.varnames, # pylint: disable=no-member
- include_transformed=False)
+ var_names = self.pymc3.utils.get_default_varnames( # pylint: disable=no-member
+ self.trace.varnames,
+ include_transformed=False)
data = {}
for var_name in var_names:
data[var_name] = np.array(self.trace.get_values(var_name, combine=False,
squeeze=False))
- return dict_to_dataset(data, coords=self.coords, dims=self.dims)
+ return dict_to_dataset(data, library=self.pymc3, coords=self.coords, dims=self.dims)
@requires('trace')
def sample_stats_to_xarray(self):
@@ -81,18 +83,19 @@ def sample_stats_to_xarray(self):
else:
dims = None
- return dict_to_dataset(data, dims=dims, coords=self.coords)
+ return dict_to_dataset(data, library=self.pymc3, dims=dims, coords=self.coords)
@requires('posterior_predictive')
def posterior_predictive_to_xarray(self):
"""Convert posterior_predictive samples to xarray."""
data = {k: np.expand_dims(v, 0) for k, v in self.posterior_predictive.items()}
- return dict_to_dataset(data, coords=self.coords, dims=self.dims)
+ return dict_to_dataset(data, library=self.pymc3, coords=self.coords, dims=self.dims)
@requires('prior')
def prior_to_xarray(self):
"""Convert prior samples to xarray."""
return dict_to_dataset({k: np.expand_dims(v, 0) for k, v in self.prior.items()},
+ library=self.pymc3,
coords=self.coords,
dims=self.dims)
@@ -117,7 +120,7 @@ def observed_data_to_xarray(self):
# filter coords based on the dims
coords = {key: xr.IndexVariable((key,), data=coords[key]) for key in val_dims}
observed_data[name] = xr.DataArray(vals, dims=val_dims, coords=coords)
- return xr.Dataset(data_vars=observed_data)
+ return xr.Dataset(data_vars=observed_data, attrs=make_attrs(library=self.pymc3))
def to_inference_data(self):
"""Convert all available data to an InferenceData object.
diff --git a/arviz/data/io_pyro.py b/arviz/data/io_pyro.py
--- a/arviz/data/io_pyro.py
+++ b/arviz/data/io_pyro.py
@@ -44,6 +44,8 @@ def __init__(self, posterior, *_, coords=None, dims=None):
self.observed_vars, self.latent_vars = _get_var_names(posterior)
self.coords = coords
self.dims = dims
+ import pyro
+ self.pyro = pyro
def posterior_to_xarray(self):
"""Convert the posterior to an xarray dataset."""
@@ -53,7 +55,7 @@ def posterior_to_xarray(self):
for var_name in self.latent_vars:
samples = EmpiricalMarginal(self.posterior, sites=var_name).get_samples_and_weights()[0]
data[var_name] = np.expand_dims(samples.numpy().squeeze(), 0)
- return dict_to_dataset(data, coords=self.coords, dims=self.dims)
+ return dict_to_dataset(data, library=self.pyro, coords=self.coords, dims=self.dims)
def observed_data_to_xarray(self):
"""Convert observed data to xarray."""
@@ -62,7 +64,7 @@ def observed_data_to_xarray(self):
for var_name in self.observed_vars:
samples = EmpiricalMarginal(self.posterior, sites=var_name).get_samples_and_weights()[0]
data[var_name] = np.expand_dims(samples.numpy().squeeze(), 0)
- return dict_to_dataset(data, coords=self.coords, dims=self.dims)
+ return dict_to_dataset(data, library=self.pyro, coords=self.coords, dims=self.dims)
def to_inference_data(self):
"""Convert all available data to an InferenceData object."""
diff --git a/arviz/data/io_pystan.py b/arviz/data/io_pystan.py
--- a/arviz/data/io_pystan.py
+++ b/arviz/data/io_pystan.py
@@ -6,7 +6,7 @@
import xarray as xr
from .inference_data import InferenceData
-from .base import requires, dict_to_dataset, generate_dims_coords
+from .base import requires, dict_to_dataset, generate_dims_coords, make_attrs
class PyStanConverter:
@@ -22,6 +22,8 @@ def __init__(self, *_, fit=None, prior=None, posterior_predictive=None,
self.coords = coords
self.dims = dims
self._var_names = fit.model_pars
+ import pystan
+ self.pystan = pystan
@requires('fit')
def posterior_to_xarray(self):
@@ -64,7 +66,7 @@ def posterior_to_xarray(self):
else:
values = np.expand_dims(values, 0)
data[var_name] = np.swapaxes(values, 0, 1)
- return dict_to_dataset(data, coords=self.coords, dims=self.dims)
+ return dict_to_dataset(data, library=self.pystan, coords=self.coords, dims=self.dims)
@requires('fit')
def sample_stats_to_xarray(self):
@@ -143,7 +145,7 @@ def sample_stats_to_xarray(self):
name = re.sub('__$', "", key)
name = "diverging" if name == 'divergent' else name
data[name] = np.vstack([j[key].astype(dtypes.get(key)) for j in sampler_params])
- return dict_to_dataset(data, coords=self.coords, dims=self.dims)
+ return dict_to_dataset(data, library=self.pystan, coords=self.coords, dims=self.dims)
@requires('fit')
@requires('posterior_predictive')
@@ -186,7 +188,7 @@ def posterior_predictive_to_xarray(self):
else:
values = np.expand_dims(values, 0)
data[var_name] = np.swapaxes(values, 0, 1)
- return dict_to_dataset(data, coords=self.coords, dims=self.dims)
+ return dict_to_dataset(data, library=self.pystan, coords=self.coords, dims=self.dims)
@requires('fit')
@requires('prior')
@@ -204,7 +206,7 @@ def prior_to_xarray(self):
values = np.expand_dims(values, 0)
values = np.swapaxes(values, 0, 1)
data[key] = values
- return dict_to_dataset(data, coords=self.coords, dims=self.dims)
+ return dict_to_dataset(data, library=self.pystan, coords=self.coords, dims=self.dims)
@requires('fit')
@requires('observed_data')
@@ -234,7 +236,7 @@ def observed_data_to_xarray(self):
val_dims, coords = generate_dims_coords(vals.shape, key,
dims=val_dims, coords=self.coords)
observed_data[key] = xr.DataArray(vals, dims=val_dims, coords=coords)
- return xr.Dataset(data_vars=observed_data)
+ return xr.Dataset(data_vars=observed_data, attrs=make_attrs(library=self.pystan))
@requires('fit')
def infer_dtypes(self):
| diff --git a/arviz/tests/test_plots.py b/arviz/tests/test_plots.py
--- a/arviz/tests/test_plots.py
+++ b/arviz/tests/test_plots.py
@@ -192,9 +192,10 @@ def test_plot_khat():
"divergences_kwargs": {'marker': '*', 'c': 'C'}},
{"divergences": True, "plot_kwargs": {'marker': 'x'},
- "divergences_kwargs": {'marker': '*', 'c': 'C'}},
+ "divergences_kwargs": {'marker': '*', 'c': 'C'},
+ "var_names": ['theta']},
- {"kind": "kde"},
+ {"kind": "kde", "var_names": ['theta']},
{"kind": 'hexbin', "var_names": ['theta'],
"coords":{'theta_dim_0': [0, 1]}, "colorbar": True,
| Actually use `attrs` to describe xarray datasets in convert functions
`attrs` are json metadata attached to xarray datasets - we could even put whole stan programs in there, and maybe pymc3 ones, using `inspect` if we're clever.
| Whatever settles this should also have a special mark for ensemble samplers - `from_emcee` puts non-independent chains into the `posterior` group, which may be misleading.
Also cmdstan/pystan has sampler stats that are not currently parsed / extracted. E.g. inits, seeds etc should live in there, right? | 2018-10-03T13:56:41Z | [] | [] |
arviz-devs/arviz | 387 | arviz-devs__arviz-387 | [
"385"
] | 18ee1a84bce34fbb4c1c8cf2c112ffcdb3a41d4f | diff --git a/arviz/plots/traceplot.py b/arviz/plots/traceplot.py
--- a/arviz/plots/traceplot.py
+++ b/arviz/plots/traceplot.py
@@ -12,6 +12,7 @@ def plot_trace(
data,
var_names=None,
coords=None,
+ divergences="bottom",
figsize=None,
textsize=None,
lines=None,
@@ -22,6 +23,9 @@ def plot_trace(
):
"""Plot samples histograms and values.
+ If `divergences` data is available in `sample_stats`, will plot the location of divergences as
+ dashed vertical lines.
+
Parameters
----------
data : obj
@@ -31,6 +35,8 @@ def plot_trace(
Variables to be plotted, two variables are required.
coords : mapping, optional
Coordinates of var_names to be plotted. Passed to `Dataset.sel`
+ divergences : {"bottom", "top", None, False}
+ Plot location of divergences on the traceplots. Options are "bottom", "top", or False-y.
figsize : figure size tuple
If None, size is (12, variables * 2)
textsize: float
@@ -83,6 +89,12 @@ def plot_trace(
>>> coords = {'theta_t_dim_0': [0, 1], 'school':['Lawrenceville']}
>>> az.plot_trace(data, var_names=('theta_t', 'theta'), coords=coords, lines=lines)
"""
+ if divergences:
+ try:
+ divergence_data = convert_to_dataset(data, group="sample_stats").diverging
+ except (ValueError, AttributeError): # No sample_stats, or no `.diverging`
+ divergences = False
+
data = convert_to_dataset(data, group="posterior")
var_names = _var_names(var_names)
@@ -111,50 +123,104 @@ def plot_trace(
hist_kwargs.setdefault("alpha", 0.35)
figsize, _, titlesize, xt_labelsize, linewidth, _ = _scale_fig_size(
- figsize, textsize, len(plotters), 2
+ figsize, textsize, rows=len(plotters), cols=2
)
trace_kwargs.setdefault("linewidth", linewidth)
kde_kwargs.setdefault("plot_kwargs", {"linewidth": linewidth})
- fig, axes = plt.subplots(len(plotters), 2, squeeze=False, figsize=figsize)
+ _, axes = plt.subplots(
+ len(plotters), 2, squeeze=False, figsize=figsize, constrained_layout=True
+ )
- for i, (var_name, selection, value) in enumerate(plotters):
+ colors = {}
+ for idx, (var_name, selection, value) in enumerate(plotters):
+ colors[idx] = []
if combined:
value = value.flatten()
value = np.atleast_2d(value)
- colors = []
+
for row in value:
- axes[i, 1].plot(np.arange(len(row)), row, **trace_kwargs)
- colors.append(axes[i, 1].get_lines()[-1].get_color())
+ axes[idx, 1].plot(np.arange(len(row)), row, **trace_kwargs)
+
+ colors[idx].append(axes[idx, 1].get_lines()[-1].get_color())
kde_kwargs.setdefault("plot_kwargs", {})
- kde_kwargs["plot_kwargs"]["color"] = colors[-1]
+ kde_kwargs["plot_kwargs"]["color"] = colors[idx][-1]
if row.dtype.kind == "i":
- _histplot_op(axes[i, 0], row, **hist_kwargs)
+ _histplot_op(axes[idx, 0], row, **hist_kwargs)
else:
- plot_kde(row, textsize=xt_labelsize, ax=axes[i, 0], **kde_kwargs)
+ plot_kde(row, textsize=xt_labelsize, ax=axes[idx, 0], **kde_kwargs)
+
+ axes[idx, 0].set_yticks([])
+ for col in (0, 1):
+ axes[idx, col].set_title(make_label(var_name, selection), fontsize=titlesize)
+ axes[idx, col].tick_params(labelsize=xt_labelsize)
+
+ for idx, (var_name, selection, value) in enumerate(plotters):
+ if combined:
+ value = value.flatten()
+ value = np.atleast_2d(value)
- axes[i, 0].set_yticks([])
- for idx in (0, 1):
- axes[i, idx].set_title(make_label(var_name, selection), fontsize=titlesize)
- axes[i, idx].tick_params(labelsize=xt_labelsize)
+ xlims = [ax.get_xlim() for ax in axes[idx, :]]
+ ylims = [ax.get_ylim() for ax in axes[idx, :]]
+
+ if divergences:
+ div_selection = {k: v for k, v in selection.items() if k in divergence_data.dims}
+ divs = divergence_data.sel(**div_selection).values
+ if combined:
+ divs = divs.flatten()
+ divs = np.atleast_2d(divs)
+
+ for chain, chain_divs in enumerate(divs):
+ div_idxs = np.arange(len(chain_divs))[chain_divs]
+ if div_idxs.size > 0:
+ if divergences == "top":
+ ylocs = [ylim[1] for ylim in ylims]
+ else:
+ ylocs = [ylim[0] for ylim in ylims]
+ values = value[chain, div_idxs]
+ axes[idx, 1].plot(
+ div_idxs,
+ np.zeros_like(div_idxs) + ylocs[1],
+ marker="|",
+ color="black",
+ markeredgewidth=1.5,
+ markersize=30,
+ linestyle="None",
+ alpha=hist_kwargs["alpha"],
+ zorder=-5,
+ )
+ axes[idx, 1].set_ylim(*ylims[1])
+ axes[idx, 0].plot(
+ values,
+ np.zeros_like(values) + ylocs[0],
+ marker="|",
+ color="black",
+ markeredgewidth=1.5,
+ markersize=30,
+ linestyle="None",
+ alpha=trace_kwargs["alpha"],
+ zorder=-5,
+ )
+ axes[idx, 0].set_ylim(*ylims[0])
for _, _, vlines in (j for j in lines if j[0] == var_name and j[1] == selection):
if isinstance(vlines, (float, int)):
line_values = [vlines]
else:
line_values = np.atleast_1d(vlines).ravel()
- axes[i, 0].vlines(
- line_values, *axes[i, 0].get_ylim(), colors=colors, linewidth=1.5, alpha=0.75
+ axes[idx, 0].vlines(
+ line_values, *ylims[0], colors=colors[idx][0], linewidth=1.5, alpha=0.75
)
- axes[i, 1].hlines(
+ axes[idx, 1].hlines(
line_values,
- *axes[i, 1].get_xlim(),
- colors=colors,
+ *xlims[1],
+ colors=colors[idx][0],
linewidth=1.5,
alpha=trace_kwargs["alpha"]
)
- axes[i, 0].set_ylim(bottom=0)
- fig.tight_layout()
+ axes[idx, 0].set_ylim(bottom=0, top=ylims[0][1])
+ axes[idx, 1].set_xlim(left=0, right=data.draw.max())
+ axes[idx, 1].set_ylim(*ylims[1])
return axes
| diff --git a/arviz/tests/test_plots.py b/arviz/tests/test_plots.py
--- a/arviz/tests/test_plots.py
+++ b/arviz/tests/test_plots.py
@@ -131,6 +131,8 @@ def test_plot_density_discrete(discrete_model):
{"var_names": "mu"},
{"var_names": ["mu", "tau"]},
{"combined": True},
+ {"divergences": "top"},
+ {"divergences": False},
{"lines": [("mu", {}, [1, 2])]},
{"lines": [("mu", 0)]},
],
| Add divergences to traceplot
A flag to add vertical and/or horizontal lines to the traceplot whenever there are divergences would be neat. That is `az.plot_trace(data, divergences=True)`.
I made a quick mockup here:
![image](https://user-images.githubusercontent.com/2295568/47872447-ffe0ba80-dde4-11e8-81be-48b2bd920541.png)
Inspired by [this slide](https://youtu.be/E8vdXoJId8M?t=685) from Jonah Gabry + Dan Simpson's recent talk, where Dan is saying that a traceplot "tells you *nothing*."
| 2018-11-02T14:19:29Z | [] | [] |
|
arviz-devs/arviz | 557 | arviz-devs__arviz-557 | [
"554"
] | 33774a10e2ab77448685032abae86539a9a828b6 | diff --git a/arviz/plots/plot_utils.py b/arviz/plots/plot_utils.py
--- a/arviz/plots/plot_utils.py
+++ b/arviz/plots/plot_utils.py
@@ -260,7 +260,7 @@ def xarray_var_iter(data, var_names=None, combined=False, skip_dims=None, revers
for var_name in var_names:
if var_name in data:
new_dims = [dim for dim in data[var_name].dims if dim not in skip_dims]
- vals = [set(data[var_name][dim].values) for dim in new_dims]
+ vals = [np.unique(data[var_name][dim].values) for dim in new_dims]
dims = [{k: v for k, v in zip(new_dims, prod)} for prod in product(*vals)]
if reverse_selections:
dims = reversed(dims)
| diff --git a/arviz/tests/test_plot_utils.py b/arviz/tests/test_plot_utils.py
--- a/arviz/tests/test_plot_utils.py
+++ b/arviz/tests/test_plot_utils.py
@@ -3,6 +3,7 @@
import xarray as xr
import pytest
+from ..data import from_dict
from ..plots.plot_utils import make_2d, xarray_to_ndarray, xarray_var_iter, get_bins, get_coords
@@ -54,6 +55,19 @@ def test_dataset_to_numpy_combined(sample_dataset):
assert (data[var_names.index("tau")] == tau.reshape(1, 6)).all()
+def test_xarray_var_iter_ordering():
+ """Assert that coordinate names stay the provided order"""
+ coords = list("abcd")
+ data = from_dict( # pylint: disable=no-member
+ {"x": np.random.randn(1, 100, len(coords))},
+ coords={"in_order": coords},
+ dims={"x": ["in_order"]},
+ ).posterior
+
+ coord_names = [sel["in_order"] for _, sel, _ in xarray_var_iter(data)]
+ assert coord_names == coords
+
+
def test_xarray_var_iter_ordering_combined(sample_dataset): # pylint: disable=invalid-name
"""Assert that varname order stays consistent when chains are combined"""
_, _, data = sample_dataset
| Ordering of variables after coords set
## Short Description
I'm trying to use the coords/dims to set labels on posterior variables from a stan fit and the order of the dimensions gets shuffled when I set the labels (not sure what determines the order). Is there a way to (1) retain the existing order (A,B,C...) and (2) sort the order according to a custom index that I provide (e.g. pass a index array like [2,1,0,3, ...] to have the plot appear in the order [C, B, A, D ...])?
Sorry if this is covered in the documentation somewhere, I didn't see it.
Without setting any coords/dims, the variables appear in the same order as one would expect from the stan code.
![test1](https://user-images.githubusercontent.com/12652166/51454598-838ab300-1d13-11e9-9d8d-fd42e3be07b2.png)
Setting the coords/dims with the code below changes the order to this.
![test2](https://user-images.githubusercontent.com/12652166/51454545-3b6b9080-1d13-11e9-8417-6e9a029e8803.png)
## Code Example or link
```
azdata = az.from_pystan(posterior=fit, observed_data=['Y'], coords={'betas': ['A', 'B', 'C', 'D', 'E', 'F', 'G']}, dims={'b': ['betas']})
axes = az.plot_forest(azdata, var_names=('b'))
```
Using arviz 0.3.2
| Thanks, this is definitely a bug. How is the order in `azdata`?
Are you on python 3.5 or 3.6+?
Same behavior on python 3.6 and 3.7 (numpy 1.15.4 in both cases). The order in the azdata variable looks fine as far as I can tell (let me know if that's not what you mean).
```
<xarray.Dataset>
Dimensions: (b_Intercept_dim_0: 4, betas: 7, chain: 2, draw: 500, r_1_1_dim_0: 29, r_2_1_dim_0: 96, sd_1_dim_0: 1, sd_2_dim_0: 1, temp_Intercept_dim_0: 4, z_1_dim_0: 1, z_1_dim_1: 29, z_2_dim_0: 1, z_2_dim_1: 96)
Coordinates:
* chain (chain) int64 0 1
* draw (draw) int64 0 1 2 3 4 5 6 ... 494 495 496 497 498 499
* betas (betas) <U1 'A' 'B' 'C' 'D' 'E' 'F' 'G'
* temp_Intercept_dim_0 (temp_Intercept_dim_0) int64 0 1 2 3
* sd_1_dim_0 (sd_1_dim_0) int64 0
* z_1_dim_0 (z_1_dim_0) int64 0
* z_1_dim_1 (z_1_dim_1) int64 0 1 2 3 4 5 6 ... 23 24 25 26 27 28
* sd_2_dim_0 (sd_2_dim_0) int64 0
* z_2_dim_0 (z_2_dim_0) int64 0
* z_2_dim_1 (z_2_dim_1) int64 0 1 2 3 4 5 6 ... 90 91 92 93 94 95
* r_1_1_dim_0 (r_1_1_dim_0) int64 0 1 2 3 4 5 ... 23 24 25 26 27 28
* r_2_1_dim_0 (r_2_1_dim_0) int64 0 1 2 3 4 5 ... 90 91 92 93 94 95
* b_Intercept_dim_0 (b_Intercept_dim_0) int64 0 1 2 3
Data variables:
b (chain, draw, betas) float64 -1.356 0.6095 ... 0.5485
temp_Intercept (chain, draw, temp_Intercept_dim_0) float64 -1.338 ... 5.715
sd_1 (chain, draw, sd_1_dim_0) float64 2.041 ... 1.669
z_1 (chain, draw, z_1_dim_0, z_1_dim_1) float64 0.4641 ... 0.4383
sd_2 (chain, draw, sd_2_dim_0) float64 3.567 ... 3.731
z_2 (chain, draw, z_2_dim_0, z_2_dim_1) float64 -0.06457 ... 0.2636
r_1_1 (chain, draw, r_1_1_dim_0) float64 0.9473 ... 0.7318
r_2_1 (chain, draw, r_2_1_dim_0) float64 -0.2303 ... 0.9832
b_Intercept (chain, draw, b_Intercept_dim_0) float64 -1.401 ... 4.859
Attributes:
created_at: 2019-01-21T08:20:13.242255
inference_library: pystan
inference_library_version: 2.18.1.0
```
@daeh Is the InferenceData object something you can share? Although not strictly necessary it would help me help you if I could get a copy of the input data
Thank you for reporting this
Just to clarify: when you plot
```python
az.plot_forest(fit, var_names='b')
```
you get a different result from
```python
azdata = az.from_pystan(posterior=fit, observed_data=['Y'], coords={'betas': ['A', 'B', 'C', 'D', 'E', 'F', 'G']}, dims={'b': ['betas']})
az.plot_forest(azdata, var_names='b')
```
and specifically the coordinates show up out of order?
@canyon289 pickled inference objects attached [inferenceobjects.zip](https://github.com/arviz-devs/arviz/files/2781219/inferenceobjects.zip).
@ColCarroll correct.
Code that generated both below:
```
axes = az.plot_forest(fit, var_names=('b'))
plt.savefig('fit_raw.png')
##
azdata_base = az.from_pystan(posterior=fit, observed_data=['Y'], dims={'b': ['betas']})
axes = az.plot_forest(azdata_base, var_names=('b'))
plt.savefig('azdata_base.png')
with open('azdata_base.pkl', 'wb') as f:
pickle.dump(azdata_base, f)
##
azdata = az.from_pystan(posterior=fit, observed_data=['Y'], coords={'betas': ['A', 'B', 'C', 'D', 'E', 'F', 'G']}, dims={'b': ['betas']})
axes = az.plot_forest(azdata, var_names=('b'))
plt.savefig('azdata.png')
with open('azdata.pkl', 'wb') as f:
pickle.dump(azdata, f)
```
| raw stan fit | azdata_base.png | azdata.png |
| ------------- | ------------- | ------------- |
| ![fit_raw](https://user-images.githubusercontent.com/12652166/51511509-91057300-1dcf-11e9-876e-66bef2c60f7a.png) | ![azdata_base](https://user-images.githubusercontent.com/12652166/51511112-f9535500-1dcd-11e9-95a3-539991dcbfe6.png) | ![azdata](https://user-images.githubusercontent.com/12652166/51511110-f8babe80-1dcd-11e9-98fa-7d6b7907b92b.png) |
Thanks for the prompt reply @daeh! (also, kudos for the sweet `pathlib` use!)
This can be recreated with the eight schools model in pymc3. I should be able to dig deeper tomorrow morning. Definitely a bug, but thank goodness that the dimensions map correctly (0 -> 'a', 1 -> 'b', etc)!
```python
import arviz as az
import numpy as np
import pymc3 as pm
data = {
"J": 8,
"y": np.array([28.0, 8.0, -3.0, 7.0, -1.0, 1.0, 18.0, 12.0]),
"sigma": np.array([15.0, 10.0, 16.0, 11.0, 9.0, 11.0, 10.0, 18.0]),
}
with pm.Model() as model:
mu = pm.Normal("mu", mu=0, sd=5)
tau = pm.HalfCauchy("tau", beta=5)
eta = pm.Normal("eta", mu=0, sd=1, shape=data["J"])
theta = pm.Deterministic("theta", mu + tau * eta)
pm.Normal("obs", mu=theta, sd=data["sigma"], observed=data["y"])
trace = pm.sample()
az.plot_forest(trace, var_names='theta')
```
![image](https://user-images.githubusercontent.com/2295568/51511913-3e2cbb00-1dd1-11e9-8a69-6f65e42b9cde.png)
```python
data = az.from_pymc3(trace=trace, coords={'schools': list('abcdefgh')}, dims={'theta': ['schools']})
az.plot_forest(data, var_names='theta')
```
![image](https://user-images.githubusercontent.com/2295568/51511920-45ec5f80-1dd1-11e9-8984-f8c19edbfbb1.png)
| 2019-01-22T13:51:02Z | [] | [] |
arviz-devs/arviz | 582 | arviz-devs__arviz-582 | [
"581"
] | 7908a609ff8cdd8f2fe9482966c8dfd348af22be | diff --git a/arviz/plots/kdeplot.py b/arviz/plots/kdeplot.py
--- a/arviz/plots/kdeplot.py
+++ b/arviz/plots/kdeplot.py
@@ -69,7 +69,7 @@ def plot_kde(
Use `space` keyword (float) to control the position of the rugplot. The larger this number
the lower the rugplot.
contour_kwargs : dict
- Keywords passed to the contourplot. Ignored for 1D KDE
+ Keywords passed to the contourplot. Ignored for 1D KDE.
ax : matplotlib axes
legend : bool
Add legend to the figure. By default True.
@@ -176,7 +176,7 @@ def plot_kde(
if cumulative:
density_q = density
else:
- density_q = np.cumsum(density)
+ density_q = density.cumsum() / density.sum()
fill_func = ax.fill_between
fill_x, fill_y = x, density
if rotated:
@@ -240,7 +240,7 @@ def plot_kde(
return ax
-def _fast_kde(x, cumulative=False, bw=4.5):
+def _fast_kde(x, cumulative=False, bw=4.5, xmin=None, xmax=None):
"""Fast Fourier transform-based Gaussian kernel density estimate (KDE).
The code was adapted from https://github.com/mfouesneau/faststats
@@ -254,6 +254,10 @@ def _fast_kde(x, cumulative=False, bw=4.5):
Bandwidth scaling factor for the KDE. Should be larger than 0. The higher this number the
smoother the KDE will be. Defaults to 4.5 which is essentially the same as the Scott's rule
of thumb (the default rule used by SciPy).
+ xmin : float
+ Manually set lower limit.
+ xmax : float
+ Manually set upper limit.
Returns
-------
@@ -264,12 +268,21 @@ def _fast_kde(x, cumulative=False, bw=4.5):
x = np.asarray(x, dtype=float)
x = x[np.isfinite(x)]
len_x = len(x)
- xmin, xmax = np.min(x), np.max(x)
+ n_points = 200 if (xmin or xmax) is None else 500
+
+ if xmin is None:
+ xmin = np.min(x)
+ if xmax is None:
+ xmax = np.max(x)
+
+ assert np.min(x) >= xmin
+ assert np.max(x) <= xmax
std_x = entropy(x - xmin) * bw
- n_bins = min(int(len_x ** (1 / 3) * std_x * 2), 200)
- grid = _histogram(x, n_bins)
+ n_bins = min(int(len_x ** (1 / 3) * std_x * 2), n_points)
+ d_x = (xmax - xmin) / (n_bins - 1)
+ grid = _histogram(x, n_bins, range_hist=(xmin, xmax))
scotts_factor = len_x ** (-0.2)
kern_nx = int(scotts_factor * 2 * np.pi * std_x)
@@ -278,18 +291,19 @@ def _fast_kde(x, cumulative=False, bw=4.5):
npad = min(n_bins, 2 * kern_nx)
grid = np.concatenate([grid[npad:0:-1], grid, grid[n_bins : n_bins - npad : -1]])
density = convolve(grid, kernel, mode="same", method="direct")[npad : npad + n_bins]
+ norm_factor = len_x * d_x * (2 * np.pi * std_x ** 2 * scotts_factor ** 2) ** 0.5
- density /= np.sum(density)
+ density /= norm_factor
if cumulative:
- density = np.cumsum(density)
+ density = density.cumsum() / density.sum()
return density, xmin, xmax
@conditional_jit
-def _histogram(x, n_bins):
- grid, _ = np.histogram(x, bins=n_bins)
+def _histogram(x, n_bins, range_hist=None):
+ grid, _ = np.histogram(x, bins=n_bins, range=range_hist)
return grid
| diff --git a/arviz/tests/test_plots.py b/arviz/tests/test_plots.py
--- a/arviz/tests/test_plots.py
+++ b/arviz/tests/test_plots.py
@@ -2,6 +2,7 @@
import os
import matplotlib.pyplot as plt
from pandas import DataFrame
+from scipy.stats import gaussian_kde
import numpy as np
import pytest
import pymc3 as pm
@@ -24,6 +25,7 @@
plot_violin,
plot_compare,
plot_kde,
+ _fast_kde,
plot_khat,
plot_hpd,
)
@@ -665,3 +667,23 @@ def test_plot_compare_no_ic(models):
def test_plot_hpd(models, model_fit, data, kwargs):
obj = getattr(models, model_fit)
plot_hpd(data["y"], obj["theta"], **kwargs)
+
+
[email protected]("limits", [(-10.0, 10.0), (-5, 5), (None, None)])
+def test_fast_kde_scipy(limits):
+ data = np.random.normal(0, 1, 1000)
+ if limits[0] is None:
+ x = np.linspace(data.min(), data.max(), 200)
+ else:
+ x = np.linspace(*limits, 500)
+ density = gaussian_kde(data).evaluate(x)
+ density_fast = _fast_kde(data, xmin=limits[0], xmax=limits[1])[0]
+
+ np.testing.assert_almost_equal(density_fast.sum(), density.sum(), 1)
+
+
[email protected]("limits", [(-10.0, 10.0), (-5, 5), (None, None)])
+def test_fast_kde_cumulative(limits):
+ data = np.random.normal(0, 1, 1000)
+ density_fast = _fast_kde(data, xmin=limits[0], xmax=limits[1], cumulative=True)[0]
+ np.testing.assert_almost_equal(round(density_fast[-1], 3), 1)
| KDE plot produces strange results
**Description**
arviz's `plot_kde` produces plot with y-axis that doesn't make sense.
**Reproduce**
https://gist.github.com/absolutelyNoWarranty/f417a052c4b8d05fe3da3b95542f8a0a
**Expected behavior**
The peak of the kde should be around 0.4 for the standard gaussian just like Pandas's kde plot.
**Additional context**
Version of `arviz`: 0.3.2
| Thanks for the reproducible example - it made it easy to check on this.
It looks like the difference is due to a difference in the algorithm being used - pandas is doing something different at the edge of the data, but arviz stops plotting at the edge. Rescaling the x-axis shows that the plots are quite similar.
I do not know much about the algorithms for a KDE, but @ahartikainen might have some opinions about whether we want to pad out the left and right edges of the data with 0's.
![image](https://user-images.githubusercontent.com/2295568/52276857-43395080-2921-11e9-86b9-d9fea4d47572.png)
@absolutelyNoWarranty thanks for taking the time to report this issue.
My opinion on this issue is that ArviZ is doing the a good job here, Pandas/Scipy's KDE do not respect the boundaries. In this example the result is over-smoothing the tails. Is not right to plot the density at the tail if you do not have data at the tails, at least not for a tool like ArviZ, this will show information about the posterior that is not there, if you want nicer tails, then you need more samples. Of course, for other tools people might argue that as a KDE is a quick way to perform inference about the density of the data the result by Pandas is a better one given that it looks closer to the the _true_ distribution. But for ArviZ this will be misleading.
Yeah, I added a WIP branch to manually set limits.
There is currently a scaling issue. (We normalize by sum, we probably need to normalize by integration.) | 2019-02-06T12:25:36Z | [] | [] |
arviz-devs/arviz | 592 | arviz-devs__arviz-592 | [
"146"
] | 27a0cef15b4965a8040721629fa19a63567f7458 | diff --git a/arviz/plots/__init__.py b/arviz/plots/__init__.py
--- a/arviz/plots/__init__.py
+++ b/arviz/plots/__init__.py
@@ -14,6 +14,7 @@
from .ppcplot import plot_ppc
from .violinplot import plot_violin
from .hpdplot import plot_hpd
+from .distplot import plot_dist
__all__ = [
@@ -34,4 +35,5 @@
"plot_ppc",
"plot_violin",
"plot_hpd",
+ "plot_dist",
]
diff --git a/arviz/plots/distplot.py b/arviz/plots/distplot.py
new file mode 100644
--- /dev/null
+++ b/arviz/plots/distplot.py
@@ -0,0 +1,155 @@
+"""Plot distribution as histogram or kernel density estimates."""
+import matplotlib.pyplot as plt
+
+from .kdeplot import plot_kde
+from .plot_utils import get_bins
+
+
+def plot_dist(
+ values,
+ values2=None,
+ color="C0",
+ kind="auto",
+ cumulative=False,
+ label=None,
+ rotated=False,
+ rug=False,
+ bw=4.5,
+ quantiles=None,
+ contour=True,
+ fill_last=True,
+ textsize=None,
+ plot_kwargs=None,
+ fill_kwargs=None,
+ rug_kwargs=None,
+ contour_kwargs=None,
+ hist_kwargs=None,
+ ax=None,
+):
+ """Plot distribution as histogram or kernel density estimates.
+
+ By default continuous variables are plotted using KDEs and discrete ones using histograms
+
+ Parameters
+ ----------
+ values : array-like
+ Values to plot
+ values2 : array-like, optional
+ Values to plot. If present, a 2D KDE or a hexbin will be estimated
+ color : string
+ valid matplotlib color
+ kind : string
+ By default ("auto") continuous variables are plotted using KDEs and discrete ones using
+ histograms. To override this use "hist" to plot histograms and "density" for KDEs
+ cumulative : bool
+ If true plot the estimated cumulative distribution function. Defaults to False.
+ Ignored for 2D KDE
+ label : string
+ Text to include as part of the legend
+ rotated : bool
+ Whether to rotate the 1D KDE plot 90 degrees.
+ rug : bool
+ If True adds a rugplot. Defaults to False. Ignored for 2D KDE
+ bw : float
+ Bandwidth scaling factor for 1D KDE. Should be larger than 0. The higher this number the
+ smoother the KDE will be. Defaults to 4.5 which is essentially the same as the Scott's
+ rule of thumb (the default rule used by SciPy).
+ quantiles : list
+ Quantiles in ascending order used to segment the KDE. Use [.25, .5, .75] for quartiles.
+ Defaults to None.
+ contour : bool
+ If True plot the 2D KDE using contours, otherwise plot a smooth 2D KDE. Defaults to True.
+ fill_last : bool
+ If True fill the last contour of the 2D KDE plot. Defaults to True.
+ textsize: float
+ Text size scaling factor for labels, titles and lines. If None it will be autoscaled based
+ on figsize.
+ plot_kwargs : dict
+ Keywords passed to the pdf line of a 1D KDE.
+ fill_kwargs : dict
+ Keywords passed to the fill under the line (use fill_kwargs={'alpha': 0} to disable fill).
+ Ignored for 2D KDE
+ rug_kwargs : dict
+ Keywords passed to the rug plot. Ignored if rug=False or for 2D KDE
+ Use `space` keyword (float) to control the position of the rugplot. The larger this number
+ the lower the rugplot.
+ contour_kwargs : dict
+ Keywords passed to the contourplot. Ignored for 1D KDE.
+ hist_kwargs : dict
+ Keywords passed to the histogram.
+ ax : matplotlib axes
+
+ Returns
+ -------
+ ax : matplotlib axes
+ """
+ if ax is None:
+ ax = plt.gca()
+
+ if hist_kwargs is None:
+ hist_kwargs = {}
+ hist_kwargs.setdefault("bins", None)
+ hist_kwargs.setdefault("cumulative", cumulative)
+ hist_kwargs.setdefault("color", color)
+ hist_kwargs.setdefault("label", label)
+ hist_kwargs.setdefault("rwidth", 0.9)
+ hist_kwargs.setdefault("align", "left")
+ hist_kwargs.setdefault("density", True)
+
+ if plot_kwargs is None:
+ plot_kwargs = {}
+
+ if rotated:
+ hist_kwargs.setdefault("orientation", "horizontal")
+ else:
+ hist_kwargs.setdefault("orientation", "vertical")
+
+ if kind == "auto":
+ kind = "hist" if values.dtype.kind == "i" else "density"
+
+ if kind == "hist":
+ _histplot_op(
+ values=values, values2=values2, rotated=rotated, ax=ax, hist_kwargs=hist_kwargs
+ )
+ elif kind == "density":
+ plot_kwargs.setdefault("color", color)
+ legend = label is not None
+
+ plot_kde(
+ values,
+ values2,
+ cumulative=cumulative,
+ rug=rug,
+ label=label,
+ bw=bw,
+ quantiles=quantiles,
+ rotated=rotated,
+ contour=contour,
+ legend=legend,
+ fill_last=fill_last,
+ textsize=textsize,
+ plot_kwargs=plot_kwargs,
+ fill_kwargs=fill_kwargs,
+ rug_kwargs=rug_kwargs,
+ contour_kwargs=contour_kwargs,
+ ax=ax,
+ )
+ return ax
+
+
+def _histplot_op(values, values2, rotated, ax, hist_kwargs):
+ """Add a histogram for the data to the axes."""
+ if values2 is not None:
+ raise NotImplementedError("Insert hexbin plot here")
+
+ bins = hist_kwargs.pop("bins")
+ if bins is None:
+ bins = get_bins(values)
+ ax.hist(values, bins=bins, **hist_kwargs)
+ if rotated:
+ ax.set_yticks(bins[:-1])
+ else:
+ ax.set_xticks(bins[:-1])
+ if hist_kwargs["label"] is not None:
+ ax.legend()
+ return ax
diff --git a/arviz/plots/jointplot.py b/arviz/plots/jointplot.py
--- a/arviz/plots/jointplot.py
+++ b/arviz/plots/jointplot.py
@@ -2,8 +2,9 @@
import matplotlib.pyplot as plt
from ..data import convert_to_dataset
+from .distplot import plot_dist
from .kdeplot import plot_kde
-from .plot_utils import _scale_fig_size, get_bins, xarray_var_iter, make_label, get_coords
+from .plot_utils import _scale_fig_size, xarray_var_iter, make_label, get_coords
from ..utils import _var_names
@@ -84,6 +85,8 @@ def plot_joint(
if marginal_kwargs is None:
marginal_kwargs = {}
+ marginal_kwargs.setdefault("plot_kwargs", {})
+ marginal_kwargs["plot_kwargs"]["linewidth"] = linewidth
# Instantiate figure and grid
fig, _ = plt.subplots(0, 0, figsize=figsize, constrained_layout=True)
@@ -122,19 +125,8 @@ def plot_joint(
axjoin.hexbin(x, y, mincnt=1, gridsize=gridsize, **joint_kwargs)
axjoin.grid(False)
- for val, ax, orient, rotate in (
- (x, ax_hist_x, "vertical", False),
- (y, ax_hist_y, "horizontal", True),
- ):
- if val.dtype.kind == "i":
- bins = get_bins(val)
- ax.hist(
- val, bins=bins, align="left", density=True, orientation=orient, **marginal_kwargs
- )
- else:
- marginal_kwargs.setdefault("plot_kwargs", {})
- marginal_kwargs["plot_kwargs"]["linewidth"] = linewidth
- plot_kde(val, rotated=rotate, ax=ax, **marginal_kwargs)
+ for val, ax, rotate in ((x, ax_hist_x, False), (y, ax_hist_y, True)):
+ plot_dist(val, textsize=xt_labelsize, rotated=rotate, ax=ax, **marginal_kwargs)
ax_hist_x.set_xlim(axjoin.get_xlim())
ax_hist_y.set_ylim(axjoin.get_ylim())
diff --git a/arviz/plots/plot_utils.py b/arviz/plots/plot_utils.py
--- a/arviz/plots/plot_utils.py
+++ b/arviz/plots/plot_utils.py
@@ -84,25 +84,46 @@ def _scale_fig_size(figsize, textsize, rows=1, cols=1):
return (width, height), ax_labelsize, titlesize, xt_labelsize, linewidth, markersize
-def get_bins(ary, max_bins=50, fenceposts=2):
- """Compute number of bins (or ticks).
+def get_bins(values):
+ """
+ Automatically compute the number of bins for discrete variables.
Parameters
----------
- ary : numpy.array
- array to be binned
- max_bins : int
- maximum number of bins
- fenceposts : int
- when computing bins, this should be 2, when computing ticks this should be 1.
+ values = numpy array
+ values
+
+ Returns
+ -------
+ array with the bins
+
+ Notes
+ -----
+ Computes the width of the bins by taking the maximun of the Sturges and the Freedman-Diaconis
+ estimators. Acording to numpy `np.histogram` this provides good all around performance.
+
+ The Sturges is a very simplistic estimator based on the assumption of normality of the data.
+ This estimator has poor performance for non-normal data, which becomes especially obvious for
+ large data sets. The estimate depends only on size of the data.
+
+ The Freedman-Diaconis rule uses interquartile range (IQR) to estimate the binwidth.
+ It is considered a robusts version of the Scott rule as the IQR is less affected by outliers
+ than the standard deviation. However, the IQR depends on fewer points than the standard
+ deviation, so it is less accurate, especially for long tailed distributions.
"""
- x_max, x_min = ary.max(), ary.min()
- x_range = x_max - x_min
- if x_range > max_bins:
- bins = range(x_min, x_max + fenceposts, max(1, int(x_range / 10)))
- else:
- bins = range(x_min, x_max + fenceposts)
- return bins
+ x_min = values.min().astype(int)
+ x_max = values.max().astype(int)
+
+ # Sturges histogram bin estimator
+ bins_sturges = (x_max - x_min) / (np.log2(values.size) + 1)
+
+ # The Freedman-Diaconis histogram bin estimator.
+ iqr = np.subtract(*np.percentile(values, [75, 25])) # pylint: disable=assignment-from-no-return
+ bins_fd = 2 * iqr * values.size ** (-1 / 3)
+
+ width = round(np.max([1, bins_sturges, bins_fd])).astype(int)
+
+ return np.arange(x_min, x_max + width + 1, width)
def default_grid(n_items, max_cols=4, min_cols=3): # noqa: D202
diff --git a/arviz/plots/traceplot.py b/arviz/plots/traceplot.py
--- a/arviz/plots/traceplot.py
+++ b/arviz/plots/traceplot.py
@@ -1,9 +1,9 @@
-"""Plot histogram and values from MCMC samples."""
+"""Plot kde or histograms and values from MCMC samples."""
import matplotlib.pyplot as plt
import numpy as np
from ..data import convert_to_dataset
-from .kdeplot import plot_kde
+from .distplot import plot_dist
from .plot_utils import _scale_fig_size, get_bins, xarray_var_iter, make_label, get_coords
from ..utils import _var_names
@@ -17,11 +17,13 @@ def plot_trace(
textsize=None,
lines=None,
combined=False,
- kde_kwargs=None,
+ plot_kwargs=None,
+ fill_kwargs=None,
+ rug_kwargs=None,
hist_kwargs=None,
trace_kwargs=None,
):
- """Plot samples histograms and values.
+ """Plot distribution (histogram or kernel density estimates) and sampled values.
If `divergences` data is available in `sample_stats`, will plot the location of divergences as
dashed vertical lines.
@@ -48,10 +50,14 @@ def plot_trace(
combined : bool
Flag for combining multiple chains into a single line. If False (default), chains will be
plotted separately.
- kde_kwargs : dict
- Extra keyword arguments passed to `arviz.plot_kde`. Only affects continuous variables.
+ plot_kwargs : dict
+ Extra keyword arguments passed to `arviz.plot_dist`. Only affects continuous variables.
+ fill_kwargs : dict
+ Extra keyword arguments passed to `arviz.plot_dist`. Only affects continuous variables.
+ rug_kwargs : dict
+ Extra keyword arguments passed to `arviz.plot_dist`. Only affects continuous variables.
hist_kwargs : dict
- Extra keyword arguments passed to `plt.hist`. Only affects discrete variables.
+ Extra keyword arguments passed to `arviz.plot_dist`. Only affects discrete variables.
trace_kwargs : dict
Extra keyword arguments passed to `plt.plot`
Returns
@@ -114,11 +120,14 @@ def plot_trace(
trace_kwargs.setdefault("alpha", 0.35)
- if kde_kwargs is None:
- kde_kwargs = {}
-
if hist_kwargs is None:
hist_kwargs = {}
+ if plot_kwargs is None:
+ plot_kwargs = {}
+ if fill_kwargs is None:
+ fill_kwargs = {}
+ if rug_kwargs is None:
+ rug_kwargs = {}
hist_kwargs.setdefault("alpha", 0.35)
@@ -126,7 +135,7 @@ def plot_trace(
figsize, textsize, rows=len(plotters), cols=2
)
trace_kwargs.setdefault("linewidth", linewidth)
- kde_kwargs.setdefault("plot_kwargs", {"linewidth": linewidth})
+ plot_kwargs.setdefault("linewidth", linewidth)
_, axes = plt.subplots(
len(plotters), 2, squeeze=False, figsize=figsize, constrained_layout=True
@@ -143,15 +152,20 @@ def plot_trace(
axes[idx, 1].plot(np.arange(len(row)), row, **trace_kwargs)
colors[idx].append(axes[idx, 1].get_lines()[-1].get_color())
- kde_kwargs["plot_kwargs"]["color"] = colors[idx][-1]
- if row.dtype.kind == "i":
- _histplot_op(axes[idx, 0], row, **hist_kwargs)
- else:
- plot_kde(row, textsize=xt_labelsize, ax=axes[idx, 0], **kde_kwargs)
+ plot_kwargs["color"] = colors[idx][-1]
+ plot_dist(
+ row,
+ textsize=xt_labelsize,
+ ax=axes[idx, 0],
+ hist_kwargs=hist_kwargs,
+ plot_kwargs=plot_kwargs,
+ fill_kwargs=fill_kwargs,
+ rug_kwargs=rug_kwargs,
+ )
if value[0].dtype.kind == "i":
- xticks = get_bins(value, max_bins=10, fenceposts=1)
- axes[idx, 0].set_xticks(xticks)
+ xticks = get_bins(value)
+ axes[idx, 0].set_xticks(xticks[:-1])
axes[idx, 0].set_yticks([])
for col in (0, 1):
axes[idx, col].set_title(make_label(var_name, selection), fontsize=titlesize, wrap=True)
diff --git a/examples/plot_dist.py b/examples/plot_dist.py
new file mode 100644
--- /dev/null
+++ b/examples/plot_dist.py
@@ -0,0 +1,18 @@
+"""
+Dist Plot
+========
+
+_thumb: .2, .8
+"""
+import matplotlib.pyplot as plt
+import numpy as np
+import arviz as az
+
+az.style.use('arviz-darkgrid')
+
+a = np.random.poisson(4, 1000)
+b = np.random.normal(0, 1, 1000)
+
+_, ax = plt.subplots(1, 2, figsize=(10, 4))
+az.plot_dist(a, color='C1', label='Poisson', ax=ax[0])
+az.plot_dist(b, color='C2', label='Gaussian', ax=ax[1])
| diff --git a/arviz/tests/test_plot_utils.py b/arviz/tests/test_plot_utils.py
--- a/arviz/tests/test_plot_utils.py
+++ b/arviz/tests/test_plot_utils.py
@@ -30,7 +30,7 @@ def test_make_2d():
def test_get_bins():
"""Touches code that is hard to reach."""
- assert get_bins(np.array([1, 2, 3, 100]), max_bins=10) is not None
+ assert get_bins(np.array([1, 2, 3, 100])) is not None
def test_dataset_to_numpy_not_combined(sample_dataset): # pylint: disable=invalid-name
diff --git a/arviz/tests/test_plots.py b/arviz/tests/test_plots.py
--- a/arviz/tests/test_plots.py
+++ b/arviz/tests/test_plots.py
@@ -28,6 +28,7 @@
_fast_kde,
plot_khat,
plot_hpd,
+ plot_dist,
)
np.random.seed(0)
@@ -285,6 +286,25 @@ def test_plot_kde_cumulative(continuous_model, kwargs):
assert axes
[email protected]("kwargs", [{"kind": "hist"}, {"kind": "dist"}])
+def test_plot_dist(continuous_model, kwargs):
+ axes = plot_dist(continuous_model["x"], **kwargs)
+ assert axes
+
+
[email protected](
+ "kwargs",
+ [
+ {"plot_kwargs": {"linestyle": "-"}},
+ {"contour": True, "fill_last": False},
+ {"contour": False},
+ ],
+)
+def test_plot_dist_2d_kde(continuous_model, kwargs):
+ axes = plot_dist(continuous_model["x"], continuous_model["y"], **kwargs)
+ assert axes
+
+
@pytest.mark.parametrize(
"kwargs", [{"plot_kwargs": {"linestyle": "-"}}, {"cumulative": True}, {"rug": True}]
)
| Implement `distplot` equivalent
There are a few places where we would like to use `arviz.kdeplot` if a variable is continuous, and a histogram if not (traceplots, for example).
This might involve implementing a `histplot` first and then having `distplot` just inspect the `dtype`.
(happy if someone wants to change the names of any of these...)
| Right, this will be very helful, distplot sounds like a good name.
I'd be up to contribute.
Thanks @Dpananos! I think it would mostly be factoring out lines 82-85 [here](https://github.com/arviz-devs/arviz/blob/master/arviz/plots/traceplot.py#L82), so we can replace those with something like
```python
distplot(row, textsize=textsize, ax=axes[i, 0], kde_kwargs=kde_kwargs, hist_kwargs=hist_kwargs)
```
`histplot` will look a lot like `kdeplot`, which accepts a numpy array instead of an xarray dataset, and then `distplot` will be similar, and probably just be an `if...else` statement.
This project still is not super well instrumented with linters/tests or documented, so please feel free to push partial code/ask questions if you'd like input!
Is `plot_density` what you were hoping for?
https://github.com/arviz-devs/arviz/blob/master/arviz/plots/densityplot.py
Not exactly -- much like `plot_kde` accepts a raw numpy array, and lets us call it from other plots, I want a plot that I can call with a numpy array that _either_ calls `plot_kde` or some discrete version, so we can use it everywhere else (I think there are a few corners cut with discrete plots, still!)
if @Dpananos does not have the time to work on this I will be happy to take it.
Take it.
On Jan 15, 2019, 7:37 AM -0500, Osvaldo Martin <[email protected]>, wrote:
> if @Dpananos does not have the time to work on this I will be happy to take it.
> —
> You are receiving this because you were mentioned.
> Reply to this email directly, view it on GitHub, or mute the thread.
| 2019-02-20T12:57:06Z | [] | [] |
arviz-devs/arviz | 615 | arviz-devs__arviz-615 | [
"453"
] | b32e25139f6c30466c696157c10b4c7a703f8e76 | diff --git a/arviz/plots/pairplot.py b/arviz/plots/pairplot.py
--- a/arviz/plots/pairplot.py
+++ b/arviz/plots/pairplot.py
@@ -192,7 +192,6 @@ def plot_pair(
ax.set_xlabel("{}".format(flat_var_names[0]), fontsize=ax_labelsize, wrap=True)
ax.set_ylabel("{}".format(flat_var_names[1]), fontsize=ax_labelsize, wrap=True)
ax.tick_params(labelsize=xt_labelsize)
- axs = ax
else:
(figsize, ax_labelsize, _, xt_labelsize, _, _) = _scale_fig_size(
@@ -203,7 +202,6 @@ def plot_pair(
fig, ax = plt.subplots(
numvars - 1, numvars - 1, figsize=figsize, constrained_layout=True
)
- axs = []
hexbin_values = []
for i in range(0, numvars - 1):
var1 = _posterior[i]
@@ -254,6 +252,5 @@ def plot_pair(
)
ax[j, i].tick_params(labelsize=xt_labelsize)
- axs.append(ax)
- return axs
+ return ax
| diff --git a/arviz/tests/test_plots.py b/arviz/tests/test_plots.py
--- a/arviz/tests/test_plots.py
+++ b/arviz/tests/test_plots.py
@@ -365,7 +365,7 @@ def test_plot_khat():
def test_plot_pair(models, model_fit, kwargs):
obj = getattr(models, model_fit)
ax = plot_pair(obj, **kwargs)
- assert ax
+ assert np.all(ax)
@pytest.mark.parametrize(
| Return value of plot_pair is numvars the same
The return value of `plot_pair` is a list that contains `numvars` times the array of subplots. I guess it comes from the original version with gridspec.
Line 168 defines `axs` empty list, which is no longer used, line 219 appends `ax` to `axs`, but `ax` already contains all the subplots. Eventually, `axs` is returned instead of `ax`.
In addition, maybe the docstring should be updated to specify that `ax` (both as input and as return value) is actually an array of axes, not an axes object. It cannot be a single axes because there are many, but I am not sure if everyone will see it right away.
| 2019-03-15T16:32:40Z | [] | [] |
|
arviz-devs/arviz | 625 | arviz-devs__arviz-625 | [
"495"
] | 6557e214e8105bc773be40dec71cf80d00f9e753 | diff --git a/arviz/data/__init__.py b/arviz/data/__init__.py
--- a/arviz/data/__init__.py
+++ b/arviz/data/__init__.py
@@ -1,6 +1,6 @@
"""Code for loading and manipulating data structures."""
from .inference_data import InferenceData, concat
-from .io_netcdf import from_netcdf, to_netcdf, load_data, save_data
+from .io_netcdf import from_netcdf, to_netcdf
from .datasets import load_arviz_data, list_datasets, clear_data_home
from .base import numpy_to_data_array, dict_to_dataset
from .converters import convert_to_dataset, convert_to_inference_data
@@ -31,6 +31,4 @@
"from_tfp",
"from_netcdf",
"to_netcdf",
- "load_data",
- "save_data",
]
diff --git a/arviz/data/io_netcdf.py b/arviz/data/io_netcdf.py
--- a/arviz/data/io_netcdf.py
+++ b/arviz/data/io_netcdf.py
@@ -1,5 +1,5 @@
"""Input and output support for data."""
-import warnings
+
from .inference_data import InferenceData
from .converters import convert_to_inference_data
@@ -41,58 +41,3 @@ def to_netcdf(data, filename, *, group="posterior", coords=None, dims=None):
inference_data = convert_to_inference_data(data, group=group, coords=coords, dims=dims)
file_name = inference_data.to_netcdf(filename)
return file_name
-
-
-def load_data(filename):
- """Load netcdf file back into an arviz.InferenceData.
-
- Parameters
- ----------
- filename : str
- name or path of the file to load trace
-
- Note
- ----
- This function is deprecated and will be removed in 0.4.
- Use `from_netcdf` instead.
- """
- warnings.warn(
- "The 'load_data' function is deprecated as of 0.3.2, use 'from_netcdf' instead",
- DeprecationWarning,
- )
- return from_netcdf(filename=filename)
-
-
-def save_data(data, filename, *, group="posterior", coords=None, dims=None):
- """Save dataset as a netcdf file.
-
- WARNING: Only idempotent in case `data` is InferenceData
-
- Parameters
- ----------
- data : InferenceData, or any object accepted by `convert_to_inference_data`
- Object to be saved
- filename : str
- name or path of the file to load trace
- group : str (optional)
- In case `data` is not InferenceData, this is the group it will be saved to
- coords : dict (optional)
- See `convert_to_inference_data`
- dims : dict (optional)
- See `convert_to_inference_data`
-
- Returns
- -------
- str
- filename saved to
-
- Note
- ----
- This function is deprecated and will be removed in 0.4.
- Use `to_netcdf` instead.
- """
- warnings.warn(
- "The 'save_data' function is deprecated as of 0.3.2, use 'to_netcdf' instead",
- DeprecationWarning,
- )
- return to_netcdf(data=data, filename=filename, group=group, coords=coords, dims=dims)
| diff --git a/arviz/tests/test_data.py b/arviz/tests/test_data.py
--- a/arviz/tests/test_data.py
+++ b/arviz/tests/test_data.py
@@ -22,8 +22,6 @@
from_netcdf,
from_tfp,
to_netcdf,
- load_data,
- save_data,
load_arviz_data,
list_datasets,
clear_data_home,
@@ -664,14 +662,6 @@ def test_io_function(self, data, eight_schools_params):
assert hasattr(inference_data2, "posterior")
os.remove(filepath)
assert not os.path.exists(filepath)
- # Test deprecated functions
- save_data(inference_data, filepath)
- assert os.path.exists(filepath)
- assert os.path.getsize(filepath) > 0
- inference_data3 = load_data(filepath)
- assert hasattr(inference_data3, "posterior")
- os.remove(filepath)
- assert not os.path.exists(filepath)
def test_io_method(self, data, eight_schools_params):
inference_data = self.get_inference_data( # pylint: disable=W0612
| Remove load_data and save_data functions before 0.4
`load_data` and `save_data` are currently deprecated (after 0.3.1 release). They need to be removed after 0.4 (assuming next release is going to be 0.3.2).
| 2019-03-21T22:53:06Z | [] | [] |
|
arviz-devs/arviz | 636 | arviz-devs__arviz-636 | [
"419"
] | 63bb78b6e85feecaec66bed9d8b719a39460d85f | diff --git a/arviz/plots/parallelplot.py b/arviz/plots/parallelplot.py
--- a/arviz/plots/parallelplot.py
+++ b/arviz/plots/parallelplot.py
@@ -2,6 +2,7 @@
import matplotlib.pyplot as plt
import numpy as np
+from scipy.stats.mstats import rankdata
from ..data import convert_to_dataset
from .plot_utils import _scale_fig_size, xarray_to_ndarray, get_coords
from ..utils import _var_names
@@ -18,6 +19,7 @@ def plot_parallel(
colord="C1",
shadend=0.025,
ax=None,
+ norm_method=None,
):
"""
Plot parallel coordinates plot showing posterior points with and without divergences.
@@ -50,10 +52,33 @@ def plot_parallel(
Defaults to .025
ax : axes
Matplotlib axes.
+ norm_method : str
+ Method for normalizing the data. Methods include normal, minmax and rank.
+ Defaults to none.
Returns
-------
ax : matplotlib axes
+
+ Examples
+ --------
+ Plot default parallel plot
+
+ .. plot::
+ :context: close-figs
+
+ >>> import arviz as az
+ >>> data = az.load_arviz_data('centered_eight')
+ >>> az.plot_parallel(data, var_names=["mu", "tau"])
+
+
+ Plot parallel plot with normalization
+
+ .. plot::
+ :context: close-figs
+
+ >>> az.plot_parallel(data, var_names=["mu", "tau"], norm_method='normal')
+
"""
if coords is None:
coords = {}
@@ -69,9 +94,23 @@ def plot_parallel(
var_names, _posterior = xarray_to_ndarray(
get_coords(posterior_data, coords), var_names=var_names, combined=True
)
-
if len(var_names) < 2:
raise ValueError("This plot needs at least two variables")
+ if norm_method is not None:
+ if norm_method == "normal":
+ mean = np.mean(_posterior, axis=1)
+ standard_deviation = np.std(_posterior, axis=1)
+ for i in range(0, np.shape(mean)[0]):
+ _posterior[i, :] = (_posterior[i, :] - mean[i]) / standard_deviation[i]
+ elif norm_method == "minmax":
+ min_elem = np.min(_posterior, axis=1)
+ max_elem = np.max(_posterior, axis=1)
+ for i in range(0, np.shape(min_elem)[0]):
+ _posterior[i, :] = ((_posterior[i, :]) - min_elem[i]) / (max_elem[i] - min_elem[i])
+ elif norm_method == "rank":
+ _posterior = rankdata(_posterior, axis=1)
+ else:
+ raise ValueError("{} is not supported. Use normal, minmax or rank.".format(norm_method))
figsize, _, _, xt_labelsize, _, _ = _scale_fig_size(figsize, textsize, 1, 1)
| diff --git a/arviz/tests/test_plots.py b/arviz/tests/test_plots.py
--- a/arviz/tests/test_plots.py
+++ b/arviz/tests/test_plots.py
@@ -233,15 +233,17 @@ def test_plot_parallel_raises_valueerror(df_trace): # pylint: disable=invalid-n
@pytest.mark.parametrize("model_fit", ["pymc3_fit", "stan_fit"])
-def test_plot_parallel(models, model_fit):
[email protected]("norm_method", [None, "normal", "minmax", "rank"])
+def test_plot_parallel(models, model_fit, norm_method):
obj = getattr(models, model_fit)
- assert plot_parallel(obj, var_names=["mu", "tau"])
+ assert plot_parallel(obj, var_names=["mu", "tau"], norm_method=norm_method)
-def test_plot_parallel_exception(models):
[email protected]("var_names", [None, "mu", ["mu", "tau"]])
+def test_plot_parallel_exception(models, var_names):
"""Ensure that correct exception is raised when one variable is passed."""
with pytest.raises(ValueError):
- assert plot_parallel(models.pymc3_fit, var_names="mu")
+ assert plot_parallel(models.pymc3_fit, var_names=var_names, norm_method="foo")
@pytest.mark.parametrize("model_fit", ["pymc3_fit", "stan_fit", "pyro_fit"])
| Allow normalization in plot_parallel
It would be great if plot_parallel had a keyword arg `normalize` (or standardize), that centers and rescales the variables before plotting. That would make it easier to see things if some posteriors are much more tight than others:
![grafik](https://user-images.githubusercontent.com/1882397/48769279-83424d00-ecbb-11e8-8bf3-f1df15455cd0.png)
| normalize --> `(x -np.mean(x))/np.std(x)`
minmax --> `(x-min(x))/(max(x)-min(x))`
uniform --> `ecdf(x)[x]` <-- `inverse_ecdf_x(x)`
So we add a kwarg with those three options as string? What should it be called? Maybe `rescale`? We could use `normalize` if it is `True`.
`bayesplot` supports arbitrary transformations passed as a list of functions, I guess we could do the same. But calling it `transformation` could be confusing because we already have transformed variables, and we might want a log transform *and* a normalization in some cases.
Oh, and maybe instead of a `uniform` we could use `quantile` and also change the y-axis labels appropriately. | 2019-04-04T11:31:24Z | [] | [] |
arviz-devs/arviz | 637 | arviz-devs__arviz-637 | [
"602"
] | 63bb78b6e85feecaec66bed9d8b719a39460d85f | diff --git a/arviz/plots/kdeplot.py b/arviz/plots/kdeplot.py
--- a/arviz/plots/kdeplot.py
+++ b/arviz/plots/kdeplot.py
@@ -27,6 +27,8 @@ def plot_kde(
fill_kwargs=None,
rug_kwargs=None,
contour_kwargs=None,
+ contourf_kwargs=None,
+ pcolormesh_kwargs=None,
ax=None,
legend=True,
):
@@ -71,7 +73,11 @@ def plot_kde(
Use `space` keyword (float) to control the position of the rugplot. The larger this number
the lower the rugplot.
contour_kwargs : dict
- Keywords passed to the contourplot. Ignored for 1D KDE.
+ Keywords passed to ax.contour. Ignored for 1D KDE.
+ contourf_kwargs : dict
+ Keywords passed to ax.contourf. Ignored for 1D KDE.
+ pcolormesh_kwargs : dict
+ Keywords passed to ax.pcolormesh. Ignored for 1D KDE.
ax : matplotlib axes
legend : bool
Add legend to the figure. By default True.
@@ -227,6 +233,10 @@ def plot_kde(
if contour_kwargs is None:
contour_kwargs = {}
contour_kwargs.setdefault("colors", "0.5")
+ if contourf_kwargs is None:
+ contourf_kwargs = {}
+ if pcolormesh_kwargs is None:
+ pcolormesh_kwargs = {}
gridsize = (128, 128) if contour else (256, 256)
@@ -238,14 +248,13 @@ def plot_kde(
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
if contour:
- qcfs = ax.contourf(x_x, y_y, density, antialiased=True)
- if not fill_last:
- qcfs.collections[0].set_alpha(0)
+ qcfs = ax.contourf(x_x, y_y, density, antialiased=True, **contourf_kwargs)
qcs = ax.contour(x_x, y_y, density, **contour_kwargs)
if not fill_last:
+ qcfs.collections[0].set_alpha(0)
qcs.collections[0].set_alpha(0)
else:
- ax.pcolormesh(x_x, y_y, density)
+ ax.pcolormesh(x_x, y_y, density, **pcolormesh_kwargs)
return ax
diff --git a/arviz/plots/pairplot.py b/arviz/plots/pairplot.py
--- a/arviz/plots/pairplot.py
+++ b/arviz/plots/pairplot.py
@@ -1,10 +1,11 @@
"""Plot a scatter or hexbin of sampled parameters."""
+import warnings
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter
from mpl_toolkits.axes_grid1 import make_axes_locatable
-from ..data import convert_to_dataset
+from ..data import convert_to_dataset, convert_to_inference_data
from .kdeplot import plot_kde
from .plot_utils import _scale_fig_size, xarray_to_ndarray, get_coords
from ..utils import _var_names
@@ -133,6 +134,7 @@ def plot_pair(
divergences_kwargs.setdefault("lw", 0)
# Get posterior draws and combine chains
+ data = convert_to_inference_data(data)
posterior_data = convert_to_dataset(data, group="posterior")
var_names = _var_names(var_names, posterior_data)
flat_var_names, _posterior = xarray_to_ndarray(
@@ -141,11 +143,21 @@ def plot_pair(
# Get diverging draws and combine chains
if divergences:
- divergent_data = convert_to_dataset(data, group="sample_stats")
- _, diverging_mask = xarray_to_ndarray(
- divergent_data, var_names=("diverging",), combined=True
- )
- diverging_mask = np.squeeze(diverging_mask)
+ if hasattr(data, "sample_stats") and hasattr(data.sample_stats, "diverging"):
+ divergent_data = convert_to_dataset(data, group="sample_stats")
+ _, diverging_mask = xarray_to_ndarray(
+ divergent_data, var_names=("diverging",), combined=True
+ )
+ diverging_mask = np.squeeze(diverging_mask)
+ else:
+ divergences = False
+ warnings.warn(
+ "Divergences data not found, plotting without divergences. "
+ "Make sure the sample method provides divergences data and "
+ "that it is present in the `diverging` field of `sample_stats` "
+ "or set divergences=False",
+ SyntaxWarning,
+ )
if gridsize == "auto":
gridsize = int(len(_posterior[0]) ** 0.35)
| diff --git a/arviz/tests/test_plots.py b/arviz/tests/test_plots.py
--- a/arviz/tests/test_plots.py
+++ b/arviz/tests/test_plots.py
@@ -272,7 +272,13 @@ def test_plot_joint_bad(models, model_fit):
[
{"plot_kwargs": {"linestyle": "-"}},
{"contour": True, "fill_last": False},
+ {
+ "contour": True,
+ "contourf_kwargs": {"cmap": "plasma"},
+ "contour_kwargs": {"linewidths": 1},
+ },
{"contour": False},
+ {"contour": False, "pcolormesh_kwargs": {"cmap": "plasma"}},
],
)
def test_plot_kde(continuous_model, kwargs):
@@ -280,7 +286,15 @@ def test_plot_kde(continuous_model, kwargs):
assert axes
[email protected]("kwargs", [{"cumulative": True}, {"rug": True}])
[email protected](
+ "kwargs",
+ [
+ {"cumulative": True},
+ {"cumulative": True, "plot_kwargs": {"linestyle": "--"}},
+ {"rug": True},
+ {"rug": True, "rug_kwargs": {"alpha": 0.2}},
+ ],
+)
def test_plot_kde_cumulative(continuous_model, kwargs):
axes = plot_kde(continuous_model["x"], quantiles=[0.25, 0.5, 0.75], **kwargs)
assert axes
@@ -387,6 +401,20 @@ def test_plot_pair_bad(models, model_fit):
plot_pair(obj, var_names=["mu"])
[email protected]("has_sample_stats", [True, False])
+def test_plot_pair_divergences_warning(has_sample_stats):
+ data = load_arviz_data("centered_eight")
+ if has_sample_stats:
+ # sample_stats present, diverging field missing
+ data.sample_stats = data.sample_stats.rename({"diverging": "diverging_missing"})
+ else:
+ # sample_stats missing
+ data = data.posterior
+ with pytest.warns(SyntaxWarning):
+ ax = plot_pair(data, divergences=True)
+ assert np.all(ax)
+
+
@pytest.mark.parametrize("kind", ["density", "cumulative", "scatter"])
@pytest.mark.parametrize("alpha", [None, 0.2, 1])
@pytest.mark.parametrize("animated", [False, True])
| Change pair_plot behavior for MCMC methods that do not produce divergences
**Describe the bug**
When using metropolis hasting sampler in pymc3, then converting to inference data, then using `az.pair_plot(divergences=True)` raises an index error
**To Reproduce**
```
import matplotlib.pyplot as plt
import arviz as az
import pymc3 as pm
x = [1,2]
y = [1,2]
with pm.Model() as model_g:
α = pm.Normal('α', mu=0, sd=10)
β = pm.Normal('β', mu=0, sd=1)
ϵ = pm.HalfCauchy('ϵ', 5)
μ = pm.Deterministic('μ', α + β * x)
y_pred = pm.Normal('y_pred', mu=μ, sd=ϵ, observed=y)
# NUTS sampler works when plotting divergences
"""
with model_g:
trace_nuts_non_centered = pm.sample()
nuts_non_centered_dataset = az.from_pymc3(trace=trace_nuts_non_centered)
az.plot_pair(nuts_non_centered_dataset, var_names=['α', 'β', 'ϵ'], divergences=True)
plt.show()
"""
# Metropolis Hastings raises indexerror
with model_g:
step = pm.Metropolis()
trace_mh_non_centered = pm.sample(step=step)
az.plot_pair(trace_mh_non_centered, var_names=['α', 'β', 'ϵ'], divergences=True)
plt.show()
```
**Expected behavior**
~~Divergences are plotted as expected~~
Show a warning and continue plotting
**Additional context**
Versions of `arviz` and other libraries used, operating system used, and anything else that may be useful.
Arviz Version '0.3.3'
PyMC3 Version : '3.6'
| 2019-04-04T22:03:40Z | [] | [] |
|
arviz-devs/arviz | 677 | arviz-devs__arviz-677 | [
"675"
] | b6007685f985f20823e21edce945a883aa0fc24f | diff --git a/arviz/data/io_pystan.py b/arviz/data/io_pystan.py
--- a/arviz/data/io_pystan.py
+++ b/arviz/data/io_pystan.py
@@ -355,9 +355,11 @@ def get_draws(fit, variables=None, ignore=None):
# check if the values are in 0-based (<=2.17) or 1-based indexing (>=2.18)
shift = 1
- if any(fit.sim["dims_oi"]):
+ if any(dim and np.prod(dim) != 0 for dim in fit.sim["dims_oi"]):
# choose variable with lowest number of dims > 1
- par_idx = min((dim, i) for i, dim in enumerate(fit.sim["dims_oi"]) if dim)[1]
+ par_idx = min(
+ (dim, i) for i, dim in enumerate(fit.sim["dims_oi"]) if (dim and np.prod(dim) != 0)
+ )[1]
offset = int(sum(map(np.product, fit.sim["dims_oi"][:par_idx])))
par_offset = int(np.product(fit.sim["dims_oi"][par_idx]))
par_keys = fit.sim["fnames_oi"][offset : offset + par_offset]
| diff --git a/arviz/tests/test_data.py b/arviz/tests/test_data.py
--- a/arviz/tests/test_data.py
+++ b/arviz/tests/test_data.py
@@ -926,7 +926,9 @@ def test_empty_parameter(self):
model_code = """
parameters {
real y;
- vector[0] z;
+ vector[3] x;
+ vector[0] a;
+ vector[2] z;
}
model {
y ~ normal(0,1);
@@ -937,7 +939,7 @@ def test_empty_parameter(self):
model = StanModel(model_code=model_code)
fit = model.sampling(iter=10, chains=2, check_hmc_diagnostics=False)
posterior = from_pystan(posterior=fit)
- test_dict = {"posterior": ["y"], "sample_stats": ["lp"]}
+ test_dict = {"posterior": ["y", "x", "z"], "sample_stats": ["lp"]}
fails = check_multiple_attrs(test_dict, posterior)
assert not fails
| from_pystan fails when there is empty dimension?
**Describe the bug**
`arviz.from_pystan` fails with IndexError when there is an empty-dimension parameter.
```python
az.from_pystan(stanfit)
---------------------------------------------------------------------------
IndexError Traceback (most recent call last)
<ipython-input-8-efb15a5798cb> in <module>
----> 1 az.from_pystan(stanfit)
~/.conda/envs/matrix/lib/python3.7/site-packages/arviz/data/io_pystan.py in from_pystan(posterior, posterior_predictive, prior, prior_predictive, observed_data, log_likelihood, coords, dims, posterior_model, prior_model)
607 log_likelihood=log_likelihood,
608 coords=coords,
--> 609 dims=dims,
610 ).to_inference_data()
~/.conda/envs/matrix/lib/python3.7/site-packages/arviz/data/io_pystan.py in to_inference_data(self)
152 return InferenceData(
153 **{
--> 154 "posterior": self.posterior_to_xarray(),
155 "sample_stats": self.sample_stats_to_xarray(),
156 "posterior_predictive": self.posterior_predictive_to_xarray(),
~/.conda/envs/matrix/lib/python3.7/site-packages/arviz/data/base.py in wrapped(cls, *args, **kwargs)
23 if getattr(cls, prop) is None:
24 return None
---> 25 return func(cls, *args, **kwargs)
26
27 return wrapped
~/.conda/envs/matrix/lib/python3.7/site-packages/arviz/data/io_pystan.py in posterior_to_xarray(self)
57 ignore = posterior_predictive + log_likelihood + ["lp__"]
58
---> 59 data = get_draws(posterior, ignore=ignore)
60
61 return dict_to_dataset(data, library=self.pystan, coords=self.coords, dims=self.dims)
~/.conda/envs/matrix/lib/python3.7/site-packages/arviz/data/io_pystan.py in get_draws(fit, variables, ignore)
403 for key, loc in keys_locs:
404 ary_slice = tuple(axes + loc)
--> 405 ary[ary_slice] = pyholder.chains[key][-ndraw:]
406 data[var] = ary
407
IndexError: index 3 is out of bounds for axis 2 with size 3
```
What I mean by empty-dimension parameter is:
```
>>> stanfit.par_dims
[[200], [3], [], [0, 3, 3], [200, 3], [0], [200, 3], [3, 3], [200, 3], [0]]
```
I think this is related to that I have this trick in my stan code:
```stan
parameters {
....
matrix[3, 3] T[include_T ? 1 : 0];
}
```
I did this in order to add an optional parameter following [this](https://dev.to/martinmodrak/optional-parametersdata-in-stan-4o33).
**To Reproduce**
Sorry I cannot provide a self-contained example but the traceback is above and this only happens when include_T is 0 and thus T has shape [0, 3, 3] and maybe related to these issues: #576, #578.
I do not know if this is something arviz wants to support necessarily. But even if the answer is no, I was wondering if there is a workaround to easily instantiate InferenceData in this case.
**Expected behavior**
`az.from_pystan(stanfit)` returns the correct InferenceData object.
**Additional context**
arviz==0.4.0, pystan==2.18
| Hi, thanks for the info. I was sure that I have fixed this, but apparently not.
Ok, my mistake
I have added
np.prod(dim) == 0
But
np.prod([]) == 1.0
So I will fix this.
hmm, actually it might ve something else.
Yes, I found it.
It was this blunder (actually there was one other mistake)
min((dim, i) for i, dim in enumerate(fit.sim["dims_oi"]) if dim)[1]
or in more simple terms
min(dim for dim in fit.sim["dims_oi"] if dim)
which in your case resulted to `[0]` and then rest of the code fails.
I also needed to update one `if`-statement before that part, so everything should be fixed after I create a PR +merge.
Thanks for looking into it! I will wait for that. | 2019-05-22T20:41:37Z | [] | [] |
arviz-devs/arviz | 678 | arviz-devs__arviz-678 | [
"660"
] | f3aa3c53c8529bf055afbc0edf3ae2724bede5f1 | diff --git a/arviz/plots/__init__.py b/arviz/plots/__init__.py
--- a/arviz/plots/__init__.py
+++ b/arviz/plots/__init__.py
@@ -6,6 +6,7 @@
from .forestplot import plot_forest
from .kdeplot import plot_kde, _fast_kde, _fast_kde_2d
from .parallelplot import plot_parallel
+from .elpdplot import plot_elpd
from .posteriorplot import plot_posterior
from .traceplot import plot_trace
from .pairplot import plot_pair
@@ -28,6 +29,7 @@
"_fast_kde",
"_fast_kde_2d",
"plot_parallel",
+ "plot_elpd",
"plot_posterior",
"plot_trace",
"plot_pair",
diff --git a/arviz/plots/elpdplot.py b/arviz/plots/elpdplot.py
new file mode 100644
--- /dev/null
+++ b/arviz/plots/elpdplot.py
@@ -0,0 +1,276 @@
+"""Plot pointwise elpd estimations of inference data."""
+import numpy as np
+import matplotlib.pyplot as plt
+import matplotlib.cm as cm
+from matplotlib.ticker import NullFormatter
+from matplotlib.lines import Line2D
+
+from ..data import convert_to_inference_data
+from .plot_utils import (
+ _scale_fig_size,
+ get_coords,
+ color_from_dim,
+ format_coords_as_labels,
+ set_xticklabels,
+)
+from ..stats import waic, loo, ELPDData
+
+
+def plot_elpd(
+ compare_dict,
+ color=None,
+ xlabels=False,
+ figsize=None,
+ textsize=None,
+ coords=None,
+ legend=False,
+ threshold=None,
+ ax=None,
+ ic="waic",
+ scale="deviance",
+ plot_kwargs=None,
+):
+ """
+ Plot a scatter or hexbin matrix of the sampled parameters.
+
+ Parameters
+ ----------
+ compare_dict : mapping, str -> ELPDData or InferenceData
+ A dictionary mapping the model name to the object containing its inference data or
+ the result of `waic`/`loo` functions.
+ Refer to az.convert_to_inference_data for details on possible dict items
+ color : str or array_like, optional
+ Colors of the scatter plot, if color is a str all dots will have the same color,
+ if it is the size of the observations, each dot will have the specified color,
+ otherwise, it will be interpreted as a list of the dims to be used for the color code
+ xlabels : bool, optional
+ Use coords as xticklabels
+ figsize : figure size tuple, optional
+ If None, size is (8 + numvars, 8 + numvars)
+ textsize: int, optional
+ Text size for labels. If None it will be autoscaled based on figsize.
+ coords : mapping, optional
+ Coordinates of points to plot. **All** values are used for computation, but only a
+ a subset can be plotted for convenience.
+ legend : bool, optional
+ Include a legend to the plot. Only taken into account when color argument is a dim name.
+ threshold : float
+ If some elpd difference is larger than `threshold * elpd.std()`, show its label. If
+ `None`, no observations will be highlighted.
+ ax: axes, optional
+ Matplotlib axes
+ ic : str, optional
+ Information Criterion (WAIC or LOO) used to compare models. Default WAIC. Only taken
+ into account when input is InferenceData.
+ scale : str, optional
+ scale argument passed to az.waic or az.loo, see their docs for details. Only taken
+ into account when input is InferenceData.
+ plot_kwargs : dicts, optional
+ Additional keywords passed to ax.plot
+
+ Returns
+ -------
+ ax : matplotlib axes
+
+ Examples
+ --------
+ Compare pointwise WAIC for centered and non centered models of the 8school problem
+
+ .. plot::
+ :context: close-figs
+
+ >>> import arviz as az
+ >>> idata1 = az.load_arviz_data("centered_eight")
+ >>> idata2 = az.load_arviz_data("non_centered_eight")
+ >>> az.plot_elpd(
+ >>> {"centered model": idata1, "non centered model": idata2},
+ >>> xlabels=True
+ >>> )
+
+ """
+ valid_ics = ["waic", "loo"]
+ ic = ic.lower()
+ if ic not in valid_ics:
+ raise ValueError(
+ ("Information Criteria type {} not recognized." "IC must be in {}").format(
+ ic, valid_ics
+ )
+ )
+ ic_fun = waic if ic == "waic" else loo
+
+ # Make sure all object are ELPDData
+ for k, item in compare_dict.items():
+ if not isinstance(item, ELPDData):
+ compare_dict[k] = ic_fun(convert_to_inference_data(item), pointwise=True, scale=scale)
+ ics = [elpd_data.index[0] for elpd_data in compare_dict.values()]
+ if not all(x == ics[0] for x in ics):
+ raise SyntaxError(
+ "All Information Criteria must be of the same kind, but both loo and waic data present"
+ )
+ ic = ics[0]
+ scales = [elpd_data["{}_scale".format(ic)] for elpd_data in compare_dict.values()]
+ if not all(x == scales[0] for x in scales):
+ raise SyntaxError(
+ "All Information Criteria must be on the same scale, but {} are present".format(
+ set(scales)
+ )
+ )
+ numvars = len(compare_dict)
+ models = list(compare_dict.keys())
+
+ if coords is None:
+ coords = {}
+
+ if plot_kwargs is None:
+ plot_kwargs = {}
+ plot_kwargs.setdefault("marker", "+")
+
+ pointwise_data = [
+ get_coords(compare_dict[model]["{}_i".format(ic)], coords) for model in models
+ ]
+ xdata = np.arange(pointwise_data[0].size)
+
+ if isinstance(color, str):
+ if color in pointwise_data[0].dims:
+ colors, color_mapping = color_from_dim(pointwise_data[0], color)
+ if legend:
+ cmap_name = plot_kwargs.pop("cmap", plt.rcParams["image.cmap"])
+ markersize = plot_kwargs.pop("s", plt.rcParams["lines.markersize"])
+ cmap = getattr(cm, cmap_name)
+ handles = [
+ Line2D(
+ [],
+ [],
+ color=cmap(float_color),
+ label=coord,
+ ms=markersize,
+ lw=0,
+ **plot_kwargs
+ )
+ for coord, float_color in color_mapping.items()
+ ]
+ plot_kwargs.setdefault("cmap", cmap_name)
+ plot_kwargs.setdefault("s", markersize ** 2)
+ plot_kwargs.setdefault("c", colors)
+ else:
+ plot_kwargs.setdefault("c", color)
+ legend = False
+ else:
+ legend = False
+ plot_kwargs.setdefault("c", color)
+
+ if xlabels:
+ coord_labels = format_coords_as_labels(pointwise_data[0])
+
+ if numvars < 2:
+ raise Exception("Number of models to compare must be 2 or greater.")
+
+ if numvars == 2:
+ (figsize, ax_labelsize, titlesize, xt_labelsize, _, markersize) = _scale_fig_size(
+ figsize, textsize, numvars - 1, numvars - 1
+ )
+ plot_kwargs.setdefault("s", markersize ** 2)
+
+ if ax is None:
+ fig, ax = plt.subplots(figsize=figsize, constrained_layout=(not xlabels and not legend))
+
+ ydata = pointwise_data[0] - pointwise_data[1]
+ ax.scatter(xdata, ydata, **plot_kwargs)
+ if threshold is not None:
+ ydata = ydata.values.flatten()
+ diff_abs = np.abs(ydata - ydata.mean())
+ bool_ary = diff_abs > threshold * ydata.std()
+ try:
+ coord_labels
+ except NameError:
+ coord_labels = xdata.astype(str)
+ outliers = np.argwhere(bool_ary).squeeze()
+ for outlier in outliers:
+ label = coord_labels[outlier]
+ ax.text(
+ outlier,
+ ydata[outlier],
+ label,
+ horizontalalignment="center",
+ verticalalignment="bottom" if ydata[outlier] > 0 else "top",
+ fontsize=0.8 * xt_labelsize,
+ )
+
+ ax.set_title("{} - {}".format(*models), fontsize=titlesize, wrap=True)
+ ax.set_ylabel("ELPD difference", fontsize=ax_labelsize, wrap=True)
+ ax.tick_params(labelsize=xt_labelsize)
+ if xlabels:
+ set_xticklabels(ax, coord_labels)
+ fig.autofmt_xdate()
+ if legend:
+ ncols = len(handles) // 6 + 1
+ ax.legend(handles=handles, ncol=ncols, title=color)
+
+ else:
+ (figsize, ax_labelsize, titlesize, xt_labelsize, _, markersize) = _scale_fig_size(
+ figsize, textsize, numvars - 2, numvars - 2
+ )
+ plot_kwargs.setdefault("s", markersize ** 2)
+
+ if ax is None:
+ fig, ax = plt.subplots(
+ numvars - 1,
+ numvars - 1,
+ figsize=figsize,
+ constrained_layout=(not xlabels and not legend),
+ )
+
+ for i in range(0, numvars - 1):
+ var1 = pointwise_data[i]
+
+ for j in range(0, numvars - 1):
+ if j < i:
+ ax[j, i].axis("off")
+ continue
+
+ var2 = pointwise_data[j + 1]
+ ax[j, i].scatter(xdata, var1 - var2, **plot_kwargs)
+ if threshold is not None:
+ ydata = (var1 - var2).values.flatten()
+ diff_abs = np.abs(ydata - ydata.mean())
+ bool_ary = diff_abs > threshold * ydata.std()
+ try:
+ coord_labels
+ except NameError:
+ coord_labels = xdata.astype(str)
+ outliers = np.argwhere(bool_ary).squeeze()
+ for outlier in outliers:
+ label = coord_labels[outlier]
+ ax[j, i].text(
+ outlier,
+ ydata[outlier],
+ label,
+ horizontalalignment="center",
+ verticalalignment="bottom" if ydata[outlier] > 0 else "top",
+ fontsize=0.8 * xt_labelsize,
+ )
+
+ if j + 1 != numvars - 1:
+ ax[j, i].axes.get_xaxis().set_major_formatter(NullFormatter())
+ ax[j, i].set_xticks([])
+ elif xlabels:
+ set_xticklabels(ax[j, i], coord_labels)
+
+ if i != 0:
+ ax[j, i].axes.get_yaxis().set_major_formatter(NullFormatter())
+ ax[j, i].set_yticks([])
+ else:
+ ax[j, i].set_ylabel("ELPD difference", fontsize=ax_labelsize, wrap=True)
+
+ ax[j, i].tick_params(labelsize=xt_labelsize)
+ ax[j, i].set_title(
+ "{} - {}".format(models[i], models[j + 1]), fontsize=titlesize, wrap=True
+ )
+ if xlabels:
+ fig.autofmt_xdate()
+ if legend:
+ ncols = len(handles) // 6 + 1
+ ax[0, 1].legend(
+ handles=handles, ncol=ncols, title=color, bbox_to_anchor=(0, 1), loc="upper left"
+ )
+ return ax
diff --git a/arviz/plots/plot_utils.py b/arviz/plots/plot_utils.py
--- a/arviz/plots/plot_utils.py
+++ b/arviz/plots/plot_utils.py
@@ -375,3 +375,56 @@ def get_coords(data, coords):
" dimensions are valid. {}"
).format(err)
)
+
+
+def color_from_dim(dataarray, dim_name):
+ """Return colors and color mapping of a DataArray using coord values as color code.
+
+ Parameters
+ ----------
+ dataarray : xarray.DataArray
+ dim_name : str
+ dimension whose coordinates will be used as color code.
+
+ Returns
+ -------
+ colors : array of floats
+ Array of colors (as floats for use with a cmap) for each element in the dataarray.
+ color_mapping : mapping coord_value -> float
+ Mapping from coord values to corresponding color
+ """
+ present_dims = dataarray.dims
+ coord_values = dataarray[dim_name].values
+ unique_coords = set(coord_values)
+ color_mapping = {coord: num / len(unique_coords) for num, coord in enumerate(unique_coords)}
+ if len(present_dims) > 1:
+ multi_coords = dataarray.coords.to_index()
+ coord_idx = present_dims.index(dim_name)
+ colors = [color_mapping[coord[coord_idx]] for coord in multi_coords]
+ else:
+ colors = [color_mapping[coord] for coord in coord_values]
+ return colors, color_mapping
+
+
+def format_coords_as_labels(dataarray):
+ """Format 1d or multi-d dataarray coords as strings."""
+ coord_labels = dataarray.coords.to_index().values
+ if isinstance(coord_labels[0], tuple):
+ fmt = ", ".join(["{}" for _ in coord_labels[0]])
+ coord_labels[:] = [fmt.format(*x) for x in coord_labels]
+ else:
+ coord_labels[:] = ["{}".format(s) for s in coord_labels]
+ return coord_labels
+
+
+def set_xticklabels(ax, coord_labels):
+ """Set xticklabels to label list using Matplotlib default formatter."""
+ ax.xaxis.get_major_locator().set_params(nbins=9, steps=[1, 2, 5, 10])
+ xticks = ax.get_xticks().astype(np.int64)
+ xticks = xticks[(xticks >= 0) & (xticks < len(coord_labels))]
+ if len(xticks) > len(coord_labels):
+ ax.set_xticks(np.arange(len(coord_labels)))
+ ax.set_xticklabels(coord_labels)
+ else:
+ ax.set_xticks(xticks)
+ ax.set_xticklabels(coord_labels[xticks])
diff --git a/arviz/stats/__init__.py b/arviz/stats/__init__.py
--- a/arviz/stats/__init__.py
+++ b/arviz/stats/__init__.py
@@ -15,6 +15,7 @@
"summary",
"waic",
"effective_sample_size",
+ "ELPDData",
"ess",
"rhat",
"mcse",
diff --git a/arviz/stats/stats.py b/arviz/stats/stats.py
--- a/arviz/stats/stats.py
+++ b/arviz/stats/stats.py
@@ -12,7 +12,12 @@
from ..data import convert_to_inference_data, convert_to_dataset
from .diagnostics import _multichain_statistics, _mc_error, ess
-from .stats_utils import make_ufunc as _make_ufunc, logsumexp as _logsumexp
+from .stats_utils import (
+ make_ufunc as _make_ufunc,
+ wrap_xarray_ufunc as _wrap_xarray_ufunc,
+ logsumexp as _logsumexp,
+ ELPDData,
+)
from ..utils import _var_names
_log = logging.getLogger(__name__)
@@ -225,9 +230,9 @@ def gradient(weights):
for idx, val in enumerate(ics.index):
res = ics.loc[val]
if scale_value < 0:
- diff = res[ic_i] - min_ic_i_val
+ diff = (res[ic_i] - min_ic_i_val).values
else:
- diff = min_ic_i_val - res[ic_i]
+ diff = (min_ic_i_val - res[ic_i]).values
d_ic = np.sum(diff)
d_std_err = np.sqrt(len(diff) * np.var(diff))
std_err = ses.loc[val]
@@ -360,7 +365,7 @@ def loo(data, pointwise=False, reff=None, scale="deviance"):
Returns
-------
- pandas.Series with the following columns:
+ pandas.Series with the following rows:
loo : approximated Leave-one-out cross-validation
loo_se : standard error of loo
p_loo : effective number of parameters
@@ -371,6 +376,9 @@ def loo(data, pointwise=False, reff=None, scale="deviance"):
pareto_k : array of Pareto shape values, only if pointwise True
loo_scale : scale of the loo results
+ The returned object has a custom print method that overrides pd.Series method. It is
+ specific to expected log pointwise predictive density (elpd) information criteria.
+
Examples
--------
Calculate the LOO-CV of a model:
@@ -379,7 +387,15 @@ def loo(data, pointwise=False, reff=None, scale="deviance"):
In [1]: import arviz as az
...: data = az.load_arviz_data("centered_eight")
- ...: az.loo(data, pointwise=True)
+ ...: az.loo(data)
+
+ The custom print method can be seen here, printing only the relevant information and
+ with a specific organization. ``IC_loo`` stands for information criteria, which is the
+ `deviance` scale, the `log` (and `negative_log`) correspond to ``elpd`` (and ``-elpd``)
+
+ .. ipython::
+
+ In [2]: az.loo(data, pointwise=True, scale="log")
"""
inference_data = convert_to_inference_data(data)
@@ -392,9 +408,10 @@ def loo(data, pointwise=False, reff=None, scale="deviance"):
raise TypeError("Data must include log_likelihood in sample_stats")
posterior = inference_data.posterior
log_likelihood = inference_data.sample_stats.log_likelihood
- n_samples = log_likelihood.chain.size * log_likelihood.draw.size
- new_shape = (n_samples, np.product(log_likelihood.shape[2:]))
- log_likelihood = log_likelihood.values.reshape(*new_shape)
+ log_likelihood = log_likelihood.stack(samples=("chain", "draw"))
+ shape = log_likelihood.shape
+ n_samples = shape[-1]
+ n_data_points = np.product(shape[:-1])
if scale.lower() == "deviance":
scale_value = -2
@@ -430,29 +447,60 @@ def loo(data, pointwise=False, reff=None, scale="deviance"):
)
warn_mg = True
- loo_lppd_i = scale_value * _logsumexp(log_weights, axis=0)
- loo_lppd = loo_lppd_i.sum()
- loo_lppd_se = (len(loo_lppd_i) * np.var(loo_lppd_i)) ** 0.5
-
- lppd = np.sum(_logsumexp(log_likelihood, axis=0, b_inv=log_likelihood.shape[0]))
+ ufunc_kwargs = {"n_dims": 1, "ravel": False}
+ kwargs = {"input_core_dims": [["samples"]]}
+ loo_lppd_i = scale_value * _wrap_xarray_ufunc(
+ _logsumexp, log_weights, ufunc_kwargs=ufunc_kwargs, **kwargs
+ )
+ loo_lppd = loo_lppd_i.values.sum()
+ loo_lppd_se = (n_data_points * np.var(loo_lppd_i.values)) ** 0.5
+
+ lppd = np.sum(
+ _wrap_xarray_ufunc(
+ _logsumexp,
+ log_likelihood,
+ func_kwargs={"b_inv": n_samples},
+ ufunc_kwargs=ufunc_kwargs,
+ **kwargs
+ ).values
+ )
p_loo = lppd - loo_lppd / scale_value
if pointwise:
if np.equal(loo_lppd, loo_lppd_i).all(): # pylint: disable=no-member
warnings.warn(
- """The point-wise LOO is the same with the sum LOO, please double check
- the Observed RV in your model to make sure it returns element-wise logp.
- """
+ "The point-wise LOO is the same with the sum LOO, please double check "
+ "the Observed RV in your model to make sure it returns element-wise logp."
)
- return pd.Series(
- data=[loo_lppd, loo_lppd_se, p_loo, warn_mg, loo_lppd_i, pareto_shape, scale],
- index=["loo", "loo_se", "p_loo", "warning", "loo_i", "pareto_k", "loo_scale"],
+ return ELPDData(
+ data=[
+ loo_lppd,
+ loo_lppd_se,
+ p_loo,
+ n_samples,
+ n_data_points,
+ warn_mg,
+ loo_lppd_i.rename("loo_i"),
+ pareto_shape,
+ scale,
+ ],
+ index=[
+ "loo",
+ "loo_se",
+ "p_loo",
+ "n_samples",
+ "n_data_points",
+ "warning",
+ "loo_i",
+ "pareto_k",
+ "loo_scale",
+ ],
)
else:
- return pd.Series(
- data=[loo_lppd, loo_lppd_se, p_loo, warn_mg, scale],
- index=["loo", "loo_se", "p_loo", "warning", "loo_scale"],
+ return ELPDData(
+ data=[loo_lppd, loo_lppd_se, p_loo, n_samples, n_data_points, warn_mg, scale],
+ index=["loo", "loo_se", "p_loo", "n_samples", "n_data_points", "warning", "loo_scale"],
)
@@ -463,7 +511,7 @@ def psislw(log_weights, reff=1.0):
Parameters
----------
log_weights : array
- Array of size (n_samples, n_observations)
+ Array of size (n_observations, n_samples)
reff : float
relative MCMC efficiency, `ess / n`
@@ -474,57 +522,92 @@ def psislw(log_weights, reff=1.0):
kss : array
Pareto tail indices
"""
- rows, cols = log_weights.shape
-
- log_weights_out = np.copy(log_weights, order="F")
- kss = np.empty(cols)
-
+ if hasattr(log_weights, "samples"):
+ n_samples = len(log_weights.samples)
+ shape = [size for size, dim in zip(log_weights.shape, log_weights.dims) if dim != "samples"]
+ else:
+ n_samples = log_weights.shape[-1]
+ shape = log_weights.shape[:-1]
# precalculate constants
- cutoff_ind = -int(np.ceil(min(rows / 5.0, 3 * (rows / reff) ** 0.5))) - 1
+ cutoff_ind = -int(np.ceil(min(n_samples / 5.0, 3 * (n_samples / reff) ** 0.5))) - 1
cutoffmin = np.log(np.finfo(float).tiny) # pylint: disable=no-member, assignment-from-no-return
k_min = 1.0 / 3
- # loop over sets of log weights
- for i, x in enumerate(log_weights_out.T):
- # improve numerical accuracy
- x -= np.max(x)
- # sort the array
- x_sort_ind = np.argsort(x)
- # divide log weights into body and right tail
- xcutoff = max(x[x_sort_ind[cutoff_ind]], cutoffmin)
-
- expxcutoff = np.exp(xcutoff)
- tailinds, = np.where(x > xcutoff) # pylint: disable=unbalanced-tuple-unpacking
- x_tail = x[tailinds]
- tail_len = len(x_tail)
- if tail_len <= 4:
- # not enough tail samples for gpdfit
- k = np.inf
- else:
- # order of tail samples
- x_tail_si = np.argsort(x_tail)
- # fit generalized Pareto distribution to the right tail samples
- x_tail = np.exp(x_tail) - expxcutoff
- k, sigma = _gpdfit(x_tail[x_tail_si])
-
- if k >= k_min:
- # no smoothing if short tail or GPD fit failed
- # compute ordered statistic for the fit
- sti = np.arange(0.5, tail_len) / tail_len
- smoothed_tail = _gpinv(sti, k, sigma)
- smoothed_tail = np.log( # pylint: disable=assignment-from-no-return
- smoothed_tail + expxcutoff
- )
- # place the smoothed tail into the output array
- x[tailinds[x_tail_si]] = smoothed_tail
- # truncate smoothed values to the largest raw weight 0
- x[x > 0] = 0
- # renormalize weights
- x -= _logsumexp(x)
- # store tail index k
- kss[i] = k
+ # create output array with proper dimensions
+ out = tuple([np.empty_like(log_weights), np.empty(shape)])
+
+ # define kwargs
+ func_kwargs = {"cutoff_ind": cutoff_ind, "cutoffmin": cutoffmin, "k_min": k_min, "out": out}
+ ufunc_kwargs = {"n_dims": 1, "n_output": 2, "ravel": False, "check_shape": False}
+ kwargs = {"input_core_dims": [["samples"]], "output_core_dims": [["sample"], []]}
+ log_weights, pareto_shape = _wrap_xarray_ufunc(
+ _psislw, log_weights, ufunc_kwargs=ufunc_kwargs, func_kwargs=func_kwargs, **kwargs
+ )
+ if isinstance(log_weights, xr.DataArray):
+ log_weights = log_weights.rename("log_weights").rename(sample="samples")
+ if isinstance(pareto_shape, xr.DataArray):
+ pareto_shape = pareto_shape.rename("pareto_shape")
+ return log_weights, pareto_shape
+
+
+def _psislw(log_weights, cutoff_ind, cutoffmin, k_min=1.0 / 3):
+ """
+ Pareto smoothed importance sampling (PSIS) for a 1D vector.
- return log_weights_out, kss
+ Parameters
+ ----------
+ log_weights : array
+ Array of length n_observations
+ cutoff_ind : int
+ cutoffmin : float
+ k_min : float
+
+ Returns
+ -------
+ lw_out : array
+ Smoothed log weights
+ kss : float
+ Pareto tail index
+ """
+ x = np.asarray(log_weights)
+
+ # improve numerical accuracy
+ x -= np.max(x)
+ # sort the array
+ x_sort_ind = np.argsort(x)
+ # divide log weights into body and right tail
+ xcutoff = max(x[x_sort_ind[cutoff_ind]], cutoffmin)
+
+ expxcutoff = np.exp(xcutoff)
+ tailinds, = np.where(x > xcutoff) # pylint: disable=unbalanced-tuple-unpacking
+ x_tail = x[tailinds]
+ tail_len = len(x_tail)
+ if tail_len <= 4:
+ # not enough tail samples for gpdfit
+ k = np.inf
+ else:
+ # order of tail samples
+ x_tail_si = np.argsort(x_tail)
+ # fit generalized Pareto distribution to the right tail samples
+ x_tail = np.exp(x_tail) - expxcutoff
+ k, sigma = _gpdfit(x_tail[x_tail_si])
+
+ if k >= k_min:
+ # no smoothing if short tail or GPD fit failed
+ # compute ordered statistic for the fit
+ sti = np.arange(0.5, tail_len) / tail_len
+ smoothed_tail = _gpinv(sti, k, sigma)
+ smoothed_tail = np.log( # pylint: disable=assignment-from-no-return
+ smoothed_tail + expxcutoff
+ )
+ # place the smoothed tail into the output array
+ x[tailinds[x_tail_si]] = smoothed_tail
+ # truncate smoothed values to the largest raw weight 0
+ x[x > 0] = 0
+ # renormalize weights
+ x -= _logsumexp(x)
+
+ return x, k
def _gpdfit(ary):
@@ -924,7 +1007,7 @@ def waic(data, pointwise=False, scale="deviance"):
Returns
-------
- DataFrame with the following columns:
+ Series with the following rows:
waic : widely available information criterion
waic_se : standard error of waic
p_waic : effective number parameters
@@ -934,9 +1017,12 @@ def waic(data, pointwise=False, scale="deviance"):
waic_i : and array of the pointwise predictive accuracy, only if pointwise True
waic_scale : scale of the waic results
+ The returned object has a custom print method that overrides pd.Series method. It is
+ specific to expected log pointwise predictive density (elpd) information criteria.
+
Examples
--------
- Calculate the LOO-CV of a model:
+ Calculate the WAIC of a model:
.. ipython::
@@ -944,6 +1030,9 @@ def waic(data, pointwise=False, scale="deviance"):
...: data = az.load_arviz_data("centered_eight")
...: az.waic(data, pointwise=True)
+ The custom print method can be seen here, printing only the relevant information and
+ with a specific organization. ``IC_loo`` stands for information criteria, which is the
+ `deviance` scale, the `log` (and `negative_log`) correspond to ``elpd`` (and ``-elpd``)
"""
inference_data = convert_to_inference_data(data)
for group in ("sample_stats",):
@@ -964,13 +1053,22 @@ def waic(data, pointwise=False, scale="deviance"):
else:
raise TypeError('Valid scale values are "deviance", "log", "negative_log"')
- n_samples = log_likelihood.chain.size * log_likelihood.draw.size
- new_shape = (n_samples, np.product(log_likelihood.shape[2:]))
- log_likelihood = log_likelihood.values.reshape(*new_shape)
-
- lppd_i = _logsumexp(log_likelihood, axis=0, b_inv=log_likelihood.shape[0])
+ log_likelihood = log_likelihood.stack(samples=("chain", "draw"))
+ shape = log_likelihood.shape
+ n_samples = shape[-1]
+ n_data_points = np.product(shape[:-1])
+
+ ufunc_kwargs = {"n_dims": 1, "ravel": False}
+ kwargs = {"input_core_dims": [["samples"]]}
+ lppd_i = _wrap_xarray_ufunc(
+ _logsumexp,
+ log_likelihood,
+ func_kwargs={"b_inv": n_samples},
+ ufunc_kwargs=ufunc_kwargs,
+ **kwargs
+ )
- vars_lpd = np.var(log_likelihood, axis=0)
+ vars_lpd = log_likelihood.var(dim="samples")
warn_mg = False
if np.any(vars_lpd > 0.4):
warnings.warn(
@@ -982,9 +1080,9 @@ def waic(data, pointwise=False, scale="deviance"):
warn_mg = True
waic_i = scale_value * (lppd_i - vars_lpd)
- waic_se = (len(waic_i) * np.var(waic_i)) ** 0.5
- waic_sum = np.sum(waic_i)
- p_waic = np.sum(vars_lpd)
+ waic_se = (n_data_points * np.var(waic_i.values)) ** 0.5
+ waic_sum = np.sum(waic_i.values)
+ p_waic = np.sum(vars_lpd.values)
if pointwise:
if np.equal(waic_sum, waic_i).all(): # pylint: disable=no-member
@@ -993,12 +1091,38 @@ def waic(data, pointwise=False, scale="deviance"):
the Observed RV in your model to make sure it returns element-wise logp.
"""
)
- return pd.Series(
- data=[waic_sum, waic_se, p_waic, warn_mg, waic_i, scale],
- index=["waic", "waic_se", "p_waic", "warning", "waic_i", "waic_scale"],
+ return ELPDData(
+ data=[
+ waic_sum,
+ waic_se,
+ p_waic,
+ n_samples,
+ n_data_points,
+ warn_mg,
+ waic_i.rename("waic_i"),
+ scale,
+ ],
+ index=[
+ "waic",
+ "waic_se",
+ "p_waic",
+ "n_samples",
+ "n_data_points",
+ "warning",
+ "waic_i",
+ "waic_scale",
+ ],
)
else:
- return pd.Series(
- data=[waic_sum, waic_se, p_waic, warn_mg, scale],
- index=["waic", "waic_se", "p_waic", "warning", "waic_scale"],
+ return ELPDData(
+ data=[waic_sum, waic_se, p_waic, n_samples, n_data_points, warn_mg, scale],
+ index=[
+ "waic",
+ "waic_se",
+ "p_waic",
+ "n_samples",
+ "n_data_points",
+ "warning",
+ "waic_scale",
+ ],
)
diff --git a/arviz/stats/stats_utils.py b/arviz/stats/stats_utils.py
--- a/arviz/stats/stats_utils.py
+++ b/arviz/stats/stats_utils.py
@@ -4,13 +4,14 @@
import warnings
import numpy as np
+import pandas as pd
from scipy.fftpack import next_fast_len
from scipy.stats.mstats import mquantiles
from xarray import apply_ufunc
_log = logging.getLogger(__name__)
-__all__ = ["autocorr", "autocov", "make_ufunc", "wrap_xarray_ufunc"]
+__all__ = ["autocorr", "autocov", "ELPDData", "make_ufunc", "wrap_xarray_ufunc"]
def autocov(ary, axis=-1):
@@ -71,7 +72,9 @@ def autocorr(ary, axis=-1):
return corr
-def make_ufunc(func, n_dims=2, n_output=1, index=Ellipsis, ravel=True): # noqa: D202
+def make_ufunc(
+ func, n_dims=2, n_output=1, index=Ellipsis, ravel=True, check_shape=True
+): # noqa: D202
"""Make ufunc from a function taking 1D array input.
Parameters
@@ -87,6 +90,9 @@ def make_ufunc(func, n_dims=2, n_output=1, index=Ellipsis, ravel=True): # noqa:
Slice ndarray with `index`. Defaults to `Ellipsis`.
ravel : bool, optional
If true, ravel the ndarray before calling `func`.
+ check_shape: bool, optional
+ If false, do not check if the shape of the output is compatible with n_dims and
+ n_output.
Returns
-------
@@ -100,7 +106,7 @@ def _ufunc(ary, *args, out=None, **kwargs):
"""General ufunc for single-output function."""
if out is None:
out = np.empty(ary.shape[:-n_dims])
- else:
+ elif check_shape:
if out.shape != ary.shape[:-n_dims]:
msg = "Shape incorrect for `out`: {}.".format(out.shape)
msg += " Correct shape is {}".format(ary.shape[:-n_dims])
@@ -115,7 +121,7 @@ def _multi_ufunc(ary, *args, out=None, **kwargs):
element_shape = ary.shape[:-n_dims]
if out is None:
out = tuple(np.empty(element_shape) for _ in range(n_output))
- else:
+ elif check_shape:
raise_error = False
correct_shape = tuple(element_shape for _ in range(n_output))
if isinstance(out, tuple):
@@ -363,3 +369,59 @@ def not_valid(ary, check_nan=True, check_shape=True, nan_kwargs=None, shape_kwar
_log.warning(error_msg)
return nan_error | chain_error | draw_error
+
+
+BASE_FMT = """Computed from {{n_samples}} by {{n_points}} log-likelihood matrix
+
+{{0:{0}}} Estimate SE
+{{scale}}_{{kind}} {{1:8.2f}} {{2:7.2f}}
+p_{{kind:{1}}} {{3:8.2f}} -"""
+POINTWISE_LOO_FMT = """------
+
+Pareto k diagnostic values:
+ {{0:>{0}}} {{1:>6}}
+(-Inf, 0.5] (good) {{2:{0}d}} {{6:6.1f}}%
+ (0.5, 0.7] (ok) {{3:{0}d}} {{7:6.1f}}%
+ (0.7, 1] (bad) {{4:{0}d}} {{8:6.1f}}%
+ (1, Inf) (very bad) {{5:{0}d}} {{9:6.1f}}%
+"""
+SCALE_DICT = {"deviance": "IC", "log": "elpd", "negative_log": "-elpd"}
+
+
+class ELPDData(pd.Series): # pylint: disable=too-many-ancestors
+ """Class to contain the data from elpd information criterion like waic or loo."""
+
+ def __str__(self):
+ """Print elpd data in a user friendly way."""
+ kind = self.index[0]
+
+ if kind not in ("waic", "loo"):
+ raise ValueError("Invalid ELPDData object")
+
+ scale_str = SCALE_DICT[self["{}_scale".format(kind)]]
+ padding = len(scale_str) + len(kind) + 1
+ base = BASE_FMT.format(padding, padding - 2)
+ base = base.format(
+ "",
+ kind=kind,
+ scale=scale_str,
+ n_samples=self.n_samples,
+ n_points=self.n_data_points,
+ *self.values
+ )
+
+ if self.warning:
+ base += "\n\nThere has been a warning during the calculation. Please check the results."
+
+ if kind == "loo" and "pareto_k" in self:
+ counts, _ = np.histogram(self.pareto_k, bins=[-np.inf, 0.5, 0.7, 1, np.inf])
+ extended = POINTWISE_LOO_FMT.format(max(4, len(str(np.max(counts)))))
+ extended = extended.format(
+ "Count", "Pct.", *[*counts, *(counts / np.sum(counts) * 100)]
+ )
+ base = "\n".join([base, extended])
+ return base
+
+ def __repr__(self):
+ """Alias to ``__str__``."""
+ return self.__str__()
| diff --git a/arviz/tests/test_plots.py b/arviz/tests/test_plots.py
--- a/arviz/tests/test_plots.py
+++ b/arviz/tests/test_plots.py
@@ -7,7 +7,7 @@
import pytest
from ..data import from_dict, load_arviz_data
-from ..stats import compare, psislw
+from ..stats import compare, psislw, loo, waic
from .helpers import eight_schools_params # pylint: disable=unused-import
from ..plots import (
plot_density,
@@ -28,6 +28,7 @@
plot_hpd,
plot_dist,
plot_rank,
+ plot_elpd,
)
np.random.seed(0)
@@ -74,7 +75,56 @@ def create_model(seed=10):
prior_predictive=prior_predictive,
sample_stats_prior=sample_stats_prior,
observed_data={"y": data["y"]},
- dims={"y": ["obs_dim"]},
+ dims={"y": ["obs_dim"], "log_likelihood": ["obs_dim"]},
+ coords={"obs_dim": range(data["J"])},
+ )
+ return model
+
+
+def create_multidimensional_model(seed=10):
+ """Create model with fake data."""
+ np.random.seed(seed)
+ nchains = 4
+ ndraws = 500
+ ndim1 = 5
+ ndim2 = 7
+ data = {
+ "y": np.random.normal(size=(ndim1, ndim2)),
+ "sigma": np.random.normal(size=(ndim1, ndim2)),
+ }
+ posterior = {
+ "mu": np.random.randn(nchains, ndraws),
+ "tau": abs(np.random.randn(nchains, ndraws)),
+ "eta": np.random.randn(nchains, ndraws, ndim1, ndim2),
+ "theta": np.random.randn(nchains, ndraws, ndim1, ndim2),
+ }
+ posterior_predictive = {"y": np.random.randn(nchains, ndraws, ndim1, ndim2)}
+ sample_stats = {
+ "energy": np.random.randn(nchains, ndraws),
+ "diverging": np.random.randn(nchains, ndraws) > 0.90,
+ "log_likelihood": np.random.randn(nchains, ndraws, ndim1, ndim2),
+ }
+ prior = {
+ "mu": np.random.randn(nchains, ndraws) / 2,
+ "tau": abs(np.random.randn(nchains, ndraws)) / 2,
+ "eta": np.random.randn(nchains, ndraws, ndim1, ndim2) / 2,
+ "theta": np.random.randn(nchains, ndraws, ndim1, ndim2) / 2,
+ }
+ prior_predictive = {"y": np.random.randn(nchains, ndraws, ndim1, ndim2) / 2}
+ sample_stats_prior = {
+ "energy": np.random.randn(nchains, ndraws),
+ "diverging": (np.random.randn(nchains, ndraws) > 0.95).astype(int),
+ }
+ model = from_dict(
+ posterior=posterior,
+ posterior_predictive=posterior_predictive,
+ sample_stats=sample_stats,
+ prior=prior,
+ prior_predictive=prior_predictive,
+ sample_stats_prior=sample_stats_prior,
+ observed_data={"y": data["y"]},
+ dims={"y": ["dim1", "dim2"], "log_likelihood": ["dim1", "dim2"]},
+ coords={"dim1": range(ndim1), "dim2": range(ndim2)},
)
return model
@@ -88,6 +138,15 @@ class Models:
return Models()
[email protected](scope="module")
+def multidim_models():
+ class Models:
+ model_1 = create_multidimensional_model(seed=10)
+ model_2 = create_multidimensional_model(seed=11)
+
+ return Models()
+
+
@pytest.fixture(scope="function", autouse=True)
def clean_plots(request, save_figs):
"""Close plots after each test, optionally save if --save is specified during test invocation"""
@@ -763,3 +822,104 @@ def test_fast_kde_cumulative(limits):
data = np.random.normal(0, 1, 1000)
density_fast = _fast_kde(data, xmin=limits[0], xmax=limits[1], cumulative=True)[0]
np.testing.assert_almost_equal(round(density_fast[-1], 3), 1)
+
+
[email protected](
+ "kwargs",
+ [
+ {},
+ {"ic": "loo"},
+ {"xlabels": True, "scale": "log"},
+ {"color": "obs_dim", "xlabels": True},
+ {"color": "obs_dim", "legend": True},
+ {"ic": "loo", "color": "blue", "coords": {"obs_dim": slice(2, 5)}},
+ {"color": np.random.uniform(size=8), "threshold": 0.1},
+ ],
+)
[email protected]("add_model", [False, True])
[email protected]("use_elpddata", [False, True])
+def test_plot_elpd(models, add_model, use_elpddata, kwargs):
+ model_dict = {"Model 1": models.model_1, "Model 2": models.model_2}
+ if add_model:
+ model_dict["Model 3"] = create_model(seed=12)
+
+ if use_elpddata:
+ ic = kwargs.get("ic", "waic")
+ scale = kwargs.get("scale", "deviance")
+ if ic == "waic":
+ model_dict = {k: waic(v, scale=scale, pointwise=True) for k, v in model_dict.items()}
+ else:
+ model_dict = {k: loo(v, scale=scale, pointwise=True) for k, v in model_dict.items()}
+
+ axes = plot_elpd(model_dict, **kwargs)
+ assert np.all(axes)
+ if add_model:
+ assert axes.shape[0] == axes.shape[1]
+ assert axes.shape[0] == len(model_dict) - 1
+
+
[email protected](
+ "kwargs",
+ [
+ {},
+ {"ic": "loo"},
+ {"xlabels": True, "scale": "log"},
+ {"color": "dim1", "xlabels": True},
+ {"color": "dim2", "legend": True},
+ {"ic": "loo", "color": "blue", "coords": {"dim2": slice(2, 4)}},
+ {"color": np.random.uniform(size=35), "threshold": 0.1},
+ ],
+)
[email protected]("add_model", [False, True])
[email protected]("use_elpddata", [False, True])
+def test_plot_elpd_multidim(multidim_models, add_model, use_elpddata, kwargs):
+ model_dict = {"Model 1": multidim_models.model_1, "Model 2": multidim_models.model_2}
+ if add_model:
+ model_dict["Model 3"] = create_multidimensional_model(seed=12)
+
+ if use_elpddata:
+ ic = kwargs.get("ic", "waic")
+ scale = kwargs.get("scale", "deviance")
+ if ic == "waic":
+ model_dict = {k: waic(v, scale=scale, pointwise=True) for k, v in model_dict.items()}
+ else:
+ model_dict = {k: loo(v, scale=scale, pointwise=True) for k, v in model_dict.items()}
+
+ axes = plot_elpd(model_dict, **kwargs)
+ assert np.all(axes)
+ if add_model:
+ assert axes.shape[0] == axes.shape[1]
+ assert axes.shape[0] == len(model_dict) - 1
+
+
+def test_plot_elpd_bad_ic(models):
+ model_dict = {
+ "Model 1": waic(models.model_1, pointwise=True),
+ "Model 2": loo(models.model_2, pointwise=True),
+ }
+ with pytest.raises(ValueError):
+ plot_elpd(model_dict, ic="bad_ic")
+
+
+def test_plot_elpd_ic_error(models):
+ model_dict = {
+ "Model 1": waic(models.model_1, pointwise=True),
+ "Model 2": loo(models.model_2, pointwise=True),
+ }
+ with pytest.raises(SyntaxError):
+ plot_elpd(model_dict)
+
+
+def test_plot_elpd_scale_error(models):
+ model_dict = {
+ "Model 1": waic(models.model_1, pointwise=True),
+ "Model 2": waic(models.model_2, pointwise=True, scale="log"),
+ }
+ with pytest.raises(SyntaxError):
+ plot_elpd(model_dict)
+
+
+def test_plot_elpd_one_model(models):
+ model_dict = {"Model 1": models.model_1}
+ with pytest.raises(Exception):
+ plot_elpd(model_dict)
diff --git a/arviz/tests/test_stats.py b/arviz/tests/test_stats.py
--- a/arviz/tests/test_stats.py
+++ b/arviz/tests/test_stats.py
@@ -1,7 +1,7 @@
# pylint: disable=redefined-outer-name
from copy import deepcopy
import numpy as np
-from numpy.testing import assert_almost_equal, assert_array_almost_equal
+from numpy.testing import assert_allclose, assert_array_almost_equal
import pytest
from scipy.stats import linregress
from xarray import Dataset, DataArray
@@ -40,7 +40,7 @@ def test_r2_score():
x = np.linspace(0, 1, 100)
y = np.random.normal(x, 1)
res = linregress(x, y)
- assert_almost_equal(res.rvalue ** 2, r2_score(y, res.intercept + res.slope * x).r2, 2)
+ assert_allclose(res.rvalue ** 2, r2_score(y, res.intercept + res.slope * x).r2, 2)
def test_r2_score_multivariate():
@@ -57,8 +57,8 @@ def test_compare_same(centered_eight, method):
data_dict = {"first": centered_eight, "second": centered_eight}
weight = compare(data_dict, method=method)["weight"]
- assert_almost_equal(weight[0], weight[1])
- assert_almost_equal(np.sum(weight), 1.0)
+ assert_allclose(weight[0], weight[1])
+ assert_allclose(np.sum(weight), 1.0)
def test_compare_unknown_ic_and_method(centered_eight, non_centered_eight):
@@ -76,7 +76,7 @@ def test_compare_different(centered_eight, non_centered_eight, ic, method, scale
model_dict = {"centered": centered_eight, "non_centered": non_centered_eight}
weight = compare(model_dict, ic=ic, method=method, scale=scale)["weight"]
assert weight["non_centered"] >= weight["centered"]
- assert_almost_equal(np.sum(weight), 1.0)
+ assert_allclose(np.sum(weight), 1.0)
def test_compare_different_size(centered_eight, non_centered_eight):
@@ -216,6 +216,15 @@ def test_waic_warning(centered_eight):
assert waic(centered_eight, pointwise=True) is not None
[email protected]("scale", ["deviance", "log", "negative_log"])
+def test_waic_print(centered_eight, scale):
+ waic_data = waic(centered_eight, scale=scale).__repr__()
+ waic_pointwise = waic(centered_eight, scale=scale, pointwise=True).__repr__()
+ assert waic_data is not None
+ assert waic_pointwise is not None
+ assert waic_data == waic_pointwise
+
+
def test_loo(centered_eight):
assert loo(centered_eight) is not None
@@ -265,14 +274,22 @@ def test_loo_warning(centered_eight):
assert loo(centered_eight, pointwise=True) is not None
[email protected]("scale", ["deviance", "log", "negative_log"])
+def test_loo_print(centered_eight, scale):
+ loo_data = loo(centered_eight, scale=scale).__repr__()
+ loo_pointwise = loo(centered_eight, scale=scale, pointwise=True).__repr__()
+ assert loo_data is not None
+ assert loo_pointwise is not None
+ assert len(loo_data) < len(loo_pointwise)
+ assert loo_data == loo_pointwise[: len(loo_data)]
+
+
def test_psislw():
data = load_arviz_data("centered_eight")
pareto_k = loo(data, pointwise=True, reff=0.7)["pareto_k"]
log_likelihood = data.sample_stats.log_likelihood # pylint: disable=no-member
- n_samples = log_likelihood.chain.size * log_likelihood.draw.size
- new_shape = (n_samples,) + log_likelihood.shape[2:]
- log_likelihood = log_likelihood.values.reshape(*new_shape)
- assert_almost_equal(pareto_k, psislw(-log_likelihood, 0.7)[1])
+ log_likelihood = log_likelihood.stack(samples=("chain", "draw"))
+ assert_allclose(pareto_k, psislw(-log_likelihood, 0.7)[1])
@pytest.mark.parametrize("probs", [True, False])
diff --git a/arviz/tests/test_stats_utils.py b/arviz/tests/test_stats_utils.py
--- a/arviz/tests/test_stats_utils.py
+++ b/arviz/tests/test_stats_utils.py
@@ -4,7 +4,13 @@
import pytest
from scipy.special import logsumexp
-from ..stats.stats_utils import logsumexp as _logsumexp, make_ufunc, wrap_xarray_ufunc, not_valid
+from ..stats.stats_utils import (
+ logsumexp as _logsumexp,
+ make_ufunc,
+ wrap_xarray_ufunc,
+ not_valid,
+ ELPDData,
+)
@pytest.mark.parametrize("ary_dtype", [np.float64, np.float32, np.int32, np.int64])
@@ -198,3 +204,8 @@ def test_valid_shape():
assert not_valid(
np.ones((10, 10)), check_nan=False, shape_kwargs=dict(min_chains=100, min_draws=2)
)
+
+
+def test_elpd_data_error():
+ with pytest.raises(ValueError):
+ ELPDData(data=[0, 1, 2], index=["not IC", "se", "p"]).__repr__()
| plot pointwise WAIC/LOO
ArviZ is missing function to compare models in terms of pointwise ELPD. See [figure 10](https://arxiv.org/abs/1709.01449).
Such a function could work for 2 models (like figure 10) or for more than 2 and return a grid of non-redundant subplots, like `plot_pair`.
| I will start working on this. Here is the main idea to make sure we are on the same page.
def plot_compare_pointwise(idata_dict, ic="waic", color=None, ...):
"""Graphical comparison of multiple models in terms of pointwise ELPD.
idata_dict : mapping (str -> InferenceData)
Dictionary mapping the name of each model to compare to its corresponding InfereceData
ic : str, optional
Information Criterion (WAIC or LOO) used to compare models. Default WAIC.
color : str or array_like, optional
Colors of the scatter plot, if color is a str all dots will have the same color, if it is the size of the observations, each dot will have the specified color, otherwise, it will be interpreted as a list of the dims to be used for the color code.
"""
# behaviour similar to plot_pair with pointwise ELPD plots
I was also thinking that in addition to the color code, it may be useful to have a coords argument, in order to plot only a subset. It could be interesting in the radon case for instance. I will give some more thinking to the API and start a PR soon. | 2019-05-23T15:50:24Z | [] | [] |