body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
---|---|---|---|---|---|---|---|---|---|
7a7074ad8dd9cd7f9bf1c8907db648b7f7c5bff6a8f3bec832b520d5e1202d1f | def add_tokentype_embeddings(self, num_tokentypes):
'Add token-type embedding. This function is provided so we can add\n token-type embeddings in case the pretrained model does not have it.\n This allows us to load the model normally and then add this embedding.\n '
if (self.tokentype_embeddings is not None):
raise Exception('tokentype embeddings is already initialized')
if (torch.distributed.get_rank() == 0):
print('adding embedding for {} tokentypes'.format(num_tokentypes), flush=True)
self.num_tokentypes = num_tokentypes
self.tokentype_embeddings = torch.nn.Embedding(num_tokentypes, self.hidden_size)
self.init_method(self.tokentype_embeddings.weight) | Add token-type embedding. This function is provided so we can add
token-type embeddings in case the pretrained model does not have it.
This allows us to load the model normally and then add this embedding. | megatron/model/transformer.py | add_tokentype_embeddings | fplk/gpt-neox | 1 | python | def add_tokentype_embeddings(self, num_tokentypes):
'Add token-type embedding. This function is provided so we can add\n token-type embeddings in case the pretrained model does not have it.\n This allows us to load the model normally and then add this embedding.\n '
if (self.tokentype_embeddings is not None):
raise Exception('tokentype embeddings is already initialized')
if (torch.distributed.get_rank() == 0):
print('adding embedding for {} tokentypes'.format(num_tokentypes), flush=True)
self.num_tokentypes = num_tokentypes
self.tokentype_embeddings = torch.nn.Embedding(num_tokentypes, self.hidden_size)
self.init_method(self.tokentype_embeddings.weight) | def add_tokentype_embeddings(self, num_tokentypes):
'Add token-type embedding. This function is provided so we can add\n token-type embeddings in case the pretrained model does not have it.\n This allows us to load the model normally and then add this embedding.\n '
if (self.tokentype_embeddings is not None):
raise Exception('tokentype embeddings is already initialized')
if (torch.distributed.get_rank() == 0):
print('adding embedding for {} tokentypes'.format(num_tokentypes), flush=True)
self.num_tokentypes = num_tokentypes
self.tokentype_embeddings = torch.nn.Embedding(num_tokentypes, self.hidden_size)
self.init_method(self.tokentype_embeddings.weight)<|docstring|>Add token-type embedding. This function is provided so we can add
token-type embeddings in case the pretrained model does not have it.
This allows us to load the model normally and then add this embedding.<|endoftext|> |
59268cbcb89fbf12f7437079b53e2be36bd96db76c09bb11b8f37cd2606c34a0 | def state_dict_for_save_checkpoint(self, destination=None, prefix='', keep_vars=False):
'For easy load.'
state_dict_ = {}
state_dict_[self._word_embeddings_key] = self.word_embeddings.state_dict(destination, prefix, keep_vars)
if (self.embedding_type == 'learned'):
state_dict_[self._position_embeddings_key] = self.position_embeddings.state_dict(destination, prefix, keep_vars)
if (self.num_tokentypes > 0):
state_dict_[self._tokentype_embeddings_key] = self.tokentype_embeddings.state_dict(destination, prefix, keep_vars)
return state_dict_ | For easy load. | megatron/model/transformer.py | state_dict_for_save_checkpoint | fplk/gpt-neox | 1 | python | def state_dict_for_save_checkpoint(self, destination=None, prefix=, keep_vars=False):
state_dict_ = {}
state_dict_[self._word_embeddings_key] = self.word_embeddings.state_dict(destination, prefix, keep_vars)
if (self.embedding_type == 'learned'):
state_dict_[self._position_embeddings_key] = self.position_embeddings.state_dict(destination, prefix, keep_vars)
if (self.num_tokentypes > 0):
state_dict_[self._tokentype_embeddings_key] = self.tokentype_embeddings.state_dict(destination, prefix, keep_vars)
return state_dict_ | def state_dict_for_save_checkpoint(self, destination=None, prefix=, keep_vars=False):
state_dict_ = {}
state_dict_[self._word_embeddings_key] = self.word_embeddings.state_dict(destination, prefix, keep_vars)
if (self.embedding_type == 'learned'):
state_dict_[self._position_embeddings_key] = self.position_embeddings.state_dict(destination, prefix, keep_vars)
if (self.num_tokentypes > 0):
state_dict_[self._tokentype_embeddings_key] = self.tokentype_embeddings.state_dict(destination, prefix, keep_vars)
return state_dict_<|docstring|>For easy load.<|endoftext|> |
9bbb4b49ce5c73ff04126f05caf0f791688b52b892f38137823475b40246d755 | def load_state_dict(self, state_dict, strict=True):
'Customized load.'
if (self._word_embeddings_key in state_dict):
state_dict_ = state_dict[self._word_embeddings_key]
else:
state_dict_ = {}
for key in state_dict.keys():
if ('word_embeddings' in key):
state_dict_[key.split('word_embeddings.')[1]] = state_dict[key]
self.word_embeddings.load_state_dict(state_dict_, strict=strict)
if (self.embedding_type == 'learned'):
if (self._position_embeddings_key in state_dict):
state_dict_ = state_dict[self._position_embeddings_key]
else:
state_dict_ = {}
for key in state_dict.keys():
if ('position_embeddings' in key):
state_dict_[key.split('position_embeddings.')[1]] = state_dict[key]
self.position_embeddings.load_state_dict(state_dict_, strict=strict)
if (self.num_tokentypes > 0):
state_dict_ = {}
if (self._tokentype_embeddings_key in state_dict):
state_dict_ = state_dict[self._tokentype_embeddings_key]
else:
for key in state_dict.keys():
if ('tokentype_embeddings' in key):
state_dict_[key.split('tokentype_embeddings.')[1]] = state_dict[key]
if (len(state_dict_.keys()) > 0):
self.tokentype_embeddings.load_state_dict(state_dict_, strict=strict)
else:
print('***WARNING*** expected tokentype embeddings in the checkpoint but could not find it', flush=True) | Customized load. | megatron/model/transformer.py | load_state_dict | fplk/gpt-neox | 1 | python | def load_state_dict(self, state_dict, strict=True):
if (self._word_embeddings_key in state_dict):
state_dict_ = state_dict[self._word_embeddings_key]
else:
state_dict_ = {}
for key in state_dict.keys():
if ('word_embeddings' in key):
state_dict_[key.split('word_embeddings.')[1]] = state_dict[key]
self.word_embeddings.load_state_dict(state_dict_, strict=strict)
if (self.embedding_type == 'learned'):
if (self._position_embeddings_key in state_dict):
state_dict_ = state_dict[self._position_embeddings_key]
else:
state_dict_ = {}
for key in state_dict.keys():
if ('position_embeddings' in key):
state_dict_[key.split('position_embeddings.')[1]] = state_dict[key]
self.position_embeddings.load_state_dict(state_dict_, strict=strict)
if (self.num_tokentypes > 0):
state_dict_ = {}
if (self._tokentype_embeddings_key in state_dict):
state_dict_ = state_dict[self._tokentype_embeddings_key]
else:
for key in state_dict.keys():
if ('tokentype_embeddings' in key):
state_dict_[key.split('tokentype_embeddings.')[1]] = state_dict[key]
if (len(state_dict_.keys()) > 0):
self.tokentype_embeddings.load_state_dict(state_dict_, strict=strict)
else:
print('***WARNING*** expected tokentype embeddings in the checkpoint but could not find it', flush=True) | def load_state_dict(self, state_dict, strict=True):
if (self._word_embeddings_key in state_dict):
state_dict_ = state_dict[self._word_embeddings_key]
else:
state_dict_ = {}
for key in state_dict.keys():
if ('word_embeddings' in key):
state_dict_[key.split('word_embeddings.')[1]] = state_dict[key]
self.word_embeddings.load_state_dict(state_dict_, strict=strict)
if (self.embedding_type == 'learned'):
if (self._position_embeddings_key in state_dict):
state_dict_ = state_dict[self._position_embeddings_key]
else:
state_dict_ = {}
for key in state_dict.keys():
if ('position_embeddings' in key):
state_dict_[key.split('position_embeddings.')[1]] = state_dict[key]
self.position_embeddings.load_state_dict(state_dict_, strict=strict)
if (self.num_tokentypes > 0):
state_dict_ = {}
if (self._tokentype_embeddings_key in state_dict):
state_dict_ = state_dict[self._tokentype_embeddings_key]
else:
for key in state_dict.keys():
if ('tokentype_embeddings' in key):
state_dict_[key.split('tokentype_embeddings.')[1]] = state_dict[key]
if (len(state_dict_.keys()) > 0):
self.tokentype_embeddings.load_state_dict(state_dict_, strict=strict)
else:
print('***WARNING*** expected tokentype embeddings in the checkpoint but could not find it', flush=True)<|docstring|>Customized load.<|endoftext|> |
b39c827e5b20ec0502ec56b04263620981e62cd5e9f7e4047e11115ce831ace0 | @property
def word_embeddings_weight(self):
'Easy accessory for the pipeline engine to tie embeddings across stages.'
return self.word_embeddings.weight | Easy accessory for the pipeline engine to tie embeddings across stages. | megatron/model/transformer.py | word_embeddings_weight | fplk/gpt-neox | 1 | python | @property
def word_embeddings_weight(self):
return self.word_embeddings.weight | @property
def word_embeddings_weight(self):
return self.word_embeddings.weight<|docstring|>Easy accessory for the pipeline engine to tie embeddings across stages.<|endoftext|> |
0e097fa6612f1cd330bb32b5953187225cb5e820ef40427240208cde5a1b4898 | def create_workbook_from_dataframe(df):
'\n 1. Create workbook from specified pandas.DataFrame\n 2. Adjust columns width to fit the text inside\n 3. Make the index column and the header row bold\n 4. Fill background color for the header row\n\n Other beautification MUST be done by usage side.\n '
workbook = Workbook()
ws = workbook.active
rows = dataframe_to_rows(df.reset_index(), index=False)
col_widths = ([0] * (len(df.columns) + 1))
for (i, row) in enumerate(rows, 1):
for (j, val) in enumerate(row, 1):
if (type(val) is str):
cell = ws.cell(row=i, column=j, value=val)
col_widths[(j - 1)] = max([col_widths[(j - 1)], len(str(val))])
elif hasattr(val, 'sort'):
cell = ws.cell(row=i, column=j, value=', '.join(list(map((lambda v: str(v)), list(val)))))
col_widths[(j - 1)] = max([col_widths[(j - 1)], len(str(val))])
else:
cell = ws.cell(row=i, column=j, value=val)
col_widths[(j - 1)] = max([col_widths[(j - 1)], (len(str(val)) + 1)])
if ((i == 1) or (j == 1)):
cell.font = Font(bold=True)
if (i == 1):
cell.fill = PatternFill('solid', fgColor=colors.YELLOW)
for (i, w) in enumerate(col_widths):
letter = get_column_letter((i + 1))
ws.column_dimensions[letter].width = w
return workbook | 1. Create workbook from specified pandas.DataFrame
2. Adjust columns width to fit the text inside
3. Make the index column and the header row bold
4. Fill background color for the header row
Other beautification MUST be done by usage side. | dataviper/report/utils.py | create_workbook_from_dataframe | otiai10/dataviper | 19 | python | def create_workbook_from_dataframe(df):
'\n 1. Create workbook from specified pandas.DataFrame\n 2. Adjust columns width to fit the text inside\n 3. Make the index column and the header row bold\n 4. Fill background color for the header row\n\n Other beautification MUST be done by usage side.\n '
workbook = Workbook()
ws = workbook.active
rows = dataframe_to_rows(df.reset_index(), index=False)
col_widths = ([0] * (len(df.columns) + 1))
for (i, row) in enumerate(rows, 1):
for (j, val) in enumerate(row, 1):
if (type(val) is str):
cell = ws.cell(row=i, column=j, value=val)
col_widths[(j - 1)] = max([col_widths[(j - 1)], len(str(val))])
elif hasattr(val, 'sort'):
cell = ws.cell(row=i, column=j, value=', '.join(list(map((lambda v: str(v)), list(val)))))
col_widths[(j - 1)] = max([col_widths[(j - 1)], len(str(val))])
else:
cell = ws.cell(row=i, column=j, value=val)
col_widths[(j - 1)] = max([col_widths[(j - 1)], (len(str(val)) + 1)])
if ((i == 1) or (j == 1)):
cell.font = Font(bold=True)
if (i == 1):
cell.fill = PatternFill('solid', fgColor=colors.YELLOW)
for (i, w) in enumerate(col_widths):
letter = get_column_letter((i + 1))
ws.column_dimensions[letter].width = w
return workbook | def create_workbook_from_dataframe(df):
'\n 1. Create workbook from specified pandas.DataFrame\n 2. Adjust columns width to fit the text inside\n 3. Make the index column and the header row bold\n 4. Fill background color for the header row\n\n Other beautification MUST be done by usage side.\n '
workbook = Workbook()
ws = workbook.active
rows = dataframe_to_rows(df.reset_index(), index=False)
col_widths = ([0] * (len(df.columns) + 1))
for (i, row) in enumerate(rows, 1):
for (j, val) in enumerate(row, 1):
if (type(val) is str):
cell = ws.cell(row=i, column=j, value=val)
col_widths[(j - 1)] = max([col_widths[(j - 1)], len(str(val))])
elif hasattr(val, 'sort'):
cell = ws.cell(row=i, column=j, value=', '.join(list(map((lambda v: str(v)), list(val)))))
col_widths[(j - 1)] = max([col_widths[(j - 1)], len(str(val))])
else:
cell = ws.cell(row=i, column=j, value=val)
col_widths[(j - 1)] = max([col_widths[(j - 1)], (len(str(val)) + 1)])
if ((i == 1) or (j == 1)):
cell.font = Font(bold=True)
if (i == 1):
cell.fill = PatternFill('solid', fgColor=colors.YELLOW)
for (i, w) in enumerate(col_widths):
letter = get_column_letter((i + 1))
ws.column_dimensions[letter].width = w
return workbook<|docstring|>1. Create workbook from specified pandas.DataFrame
2. Adjust columns width to fit the text inside
3. Make the index column and the header row bold
4. Fill background color for the header row
Other beautification MUST be done by usage side.<|endoftext|> |
dd8f5b873f8869c2f2a02a03bb4c3e39d881660f5502025a53dec0315b365722 | @property
def TIMERWRAP(self):
'IGNORED: Only available in Epiphany-IV.'
return self._get_nth_bit_of_register('CONFIG', 26) | IGNORED: Only available in Epiphany-IV. | revelation/machine.py | TIMERWRAP | futurecore/revelation | 4 | python | @property
def TIMERWRAP(self):
return self._get_nth_bit_of_register('CONFIG', 26) | @property
def TIMERWRAP(self):
return self._get_nth_bit_of_register('CONFIG', 26)<|docstring|>IGNORED: Only available in Epiphany-IV.<|endoftext|> |
95eb6a75218aca2e33f7844d1f7033af5cfa048fb59ea674b0f3ddac7a6f700d | @TIMERWRAP.setter
def TIMERWRAP(self, value):
'IGNORED: Only available in Epiphany-IV.'
self._set_nth_bit_of_register('CONFIG', 26, value) | IGNORED: Only available in Epiphany-IV. | revelation/machine.py | TIMERWRAP | futurecore/revelation | 4 | python | @TIMERWRAP.setter
def TIMERWRAP(self, value):
self._set_nth_bit_of_register('CONFIG', 26, value) | @TIMERWRAP.setter
def TIMERWRAP(self, value):
self._set_nth_bit_of_register('CONFIG', 26, value)<|docstring|>IGNORED: Only available in Epiphany-IV.<|endoftext|> |
f19ec6c7c1fc5e69de38b8d0462fbd623da5bd94e50dd5a66c46d6096a222fc7 | def Run(args):
'Run the casectrl function as SPSS syntax'
args = args[list(args.keys())[0]]
oobj = Syntax([Template('DEMANDERDS', subc='', var='demanderds', ktype='varname'), Template('SUPPLIERDS', subc='', var='supplierds', ktype='varname'), Template('DS3', subc='', var='ds3', ktype='varname'), Template('BY', subc='', var='by', ktype='varname', islist=True), Template('FUZZ', subc='', var='fuzz', ktype='float', islist=True), Template('EXACTPRIORITY', subc='', var='exactpriority', ktype='bool'), Template('CUSTOMFUZZ', subc='', var='customfuzz', ktype='literal'), Template('GROUP', subc='', var='group', ktype='existingvarlist', islist=False), Template('SUPPLIERID', subc='', var='supplierid', ktype='varname'), Template('NEWDEMANDERIDVARS', subc='', var='matchslots', islist=True), Template('COPYTODEMANDER', subc='', ktype='varname', var='copytodemander', islist=True), Template('MATCHGROUPVAR', subc='', var='hashvar', ktype='varname'), Template('DRAWPOOLSIZE', subc='', var='drawpool', ktype='varname'), Template('DEMANDERID', subc='', var='demanderid', ktype='varname'), Template('SAMPLEWITHREPLACEMENT', subc='OPTIONS', var='samplewithreplacement', ktype='bool'), Template('MINIMIZEMEMORY', subc='OPTIONS', var='minimizememory', ktype='bool'), Template('SEED', subc='OPTIONS', var='seed', ktype='int', vallist=(((- (2 ** 31)) + 1), ((2 ** 31) - 1))), Template('SHUFFLE', subc='OPTIONS', var='shuffle', ktype='bool'), Template('LOGFILE', subc='OUTFILE', var='logfile', ktype='literal'), Template('ACCESSMODE', subc='OUTFILE', var='logaccessmode', ktype='str', vallist=('append', 'overwrite'))])
global _
try:
_('---')
except:
def _(msg):
return msg
if ('HELP' in args):
helper()
else:
processcmd(oobj, args, casecontrol, vardict=spssaux.VariableDict()) | Run the casectrl function as SPSS syntax | src/FUZZY.py | Run | IBMPredictiveAnalytics/FUZZY | 1 | python | def Run(args):
args = args[list(args.keys())[0]]
oobj = Syntax([Template('DEMANDERDS', subc=, var='demanderds', ktype='varname'), Template('SUPPLIERDS', subc=, var='supplierds', ktype='varname'), Template('DS3', subc=, var='ds3', ktype='varname'), Template('BY', subc=, var='by', ktype='varname', islist=True), Template('FUZZ', subc=, var='fuzz', ktype='float', islist=True), Template('EXACTPRIORITY', subc=, var='exactpriority', ktype='bool'), Template('CUSTOMFUZZ', subc=, var='customfuzz', ktype='literal'), Template('GROUP', subc=, var='group', ktype='existingvarlist', islist=False), Template('SUPPLIERID', subc=, var='supplierid', ktype='varname'), Template('NEWDEMANDERIDVARS', subc=, var='matchslots', islist=True), Template('COPYTODEMANDER', subc=, ktype='varname', var='copytodemander', islist=True), Template('MATCHGROUPVAR', subc=, var='hashvar', ktype='varname'), Template('DRAWPOOLSIZE', subc=, var='drawpool', ktype='varname'), Template('DEMANDERID', subc=, var='demanderid', ktype='varname'), Template('SAMPLEWITHREPLACEMENT', subc='OPTIONS', var='samplewithreplacement', ktype='bool'), Template('MINIMIZEMEMORY', subc='OPTIONS', var='minimizememory', ktype='bool'), Template('SEED', subc='OPTIONS', var='seed', ktype='int', vallist=(((- (2 ** 31)) + 1), ((2 ** 31) - 1))), Template('SHUFFLE', subc='OPTIONS', var='shuffle', ktype='bool'), Template('LOGFILE', subc='OUTFILE', var='logfile', ktype='literal'), Template('ACCESSMODE', subc='OUTFILE', var='logaccessmode', ktype='str', vallist=('append', 'overwrite'))])
global _
try:
_('---')
except:
def _(msg):
return msg
if ('HELP' in args):
helper()
else:
processcmd(oobj, args, casecontrol, vardict=spssaux.VariableDict()) | def Run(args):
args = args[list(args.keys())[0]]
oobj = Syntax([Template('DEMANDERDS', subc=, var='demanderds', ktype='varname'), Template('SUPPLIERDS', subc=, var='supplierds', ktype='varname'), Template('DS3', subc=, var='ds3', ktype='varname'), Template('BY', subc=, var='by', ktype='varname', islist=True), Template('FUZZ', subc=, var='fuzz', ktype='float', islist=True), Template('EXACTPRIORITY', subc=, var='exactpriority', ktype='bool'), Template('CUSTOMFUZZ', subc=, var='customfuzz', ktype='literal'), Template('GROUP', subc=, var='group', ktype='existingvarlist', islist=False), Template('SUPPLIERID', subc=, var='supplierid', ktype='varname'), Template('NEWDEMANDERIDVARS', subc=, var='matchslots', islist=True), Template('COPYTODEMANDER', subc=, ktype='varname', var='copytodemander', islist=True), Template('MATCHGROUPVAR', subc=, var='hashvar', ktype='varname'), Template('DRAWPOOLSIZE', subc=, var='drawpool', ktype='varname'), Template('DEMANDERID', subc=, var='demanderid', ktype='varname'), Template('SAMPLEWITHREPLACEMENT', subc='OPTIONS', var='samplewithreplacement', ktype='bool'), Template('MINIMIZEMEMORY', subc='OPTIONS', var='minimizememory', ktype='bool'), Template('SEED', subc='OPTIONS', var='seed', ktype='int', vallist=(((- (2 ** 31)) + 1), ((2 ** 31) - 1))), Template('SHUFFLE', subc='OPTIONS', var='shuffle', ktype='bool'), Template('LOGFILE', subc='OUTFILE', var='logfile', ktype='literal'), Template('ACCESSMODE', subc='OUTFILE', var='logaccessmode', ktype='str', vallist=('append', 'overwrite'))])
global _
try:
_('---')
except:
def _(msg):
return msg
if ('HELP' in args):
helper()
else:
processcmd(oobj, args, casecontrol, vardict=spssaux.VariableDict())<|docstring|>Run the casectrl function as SPSS syntax<|endoftext|> |
7f5daba8857719f2f158c76d02c684f30333529bcf0ed6a915a07580d86887fd | def casecontrol(by, supplierid, matchslots, demanderds=None, supplierds=None, group=None, copytodemander=[], ds3=None, demanderid=None, samplewithreplacement=False, hashvar='matchgroup', seed=None, shuffle=False, minimizememory=True, fuzz=None, exactpriority=True, drawpool=None, customfuzz=None, logfile=None, logaccessmode='overwrite'):
'Find match for demanderds cases in supplierds and add identifiers to demanderds. Return unmatched count. \n \n demanderds is the dataset name of cases needing a match (demanders)\n supplierds is the dataset name of cases supplying matches (suppliers)\n ds3 is optional and will contain the supplierds cases used for matches.\n demanderid is optional. If specified, and ds3 is used, it will be appended to the supplier cases. It must have a name\n different from any variable in the supplier dataset.\n \n by is a variable or sequence of variable names used to determine a match. The variables must exist in both demanderds and supplierds.\n supplierid is the variable name of the ID variable in the supplier dataset.\n matchslots is the variable name or sequence of variable names for the ids of the matches\n \n copytodemander is an optional list of variables in supplierds to be added to demanderds. If this option is used, only a single\n matching case can be requested. Variable types must agree for variables that already exist in demanderds.\n samplewithreplacement, if true, samples with replacement; otherwise sampling is without replacement.\n hashvar is an optional variable name to contain the hash of the match variables and added to demanderds and ds3.\n If seed is not None, its value is used to initialize the generator for repeatability.\n If shuffle is True, the demander cases are matched in a random order; otherwise they are matched in case order.\n Since shuffling requires O(N) memory and will be slower, presorting the demander dataset by a random number is an alternative.\n If minimizememory is true, only one eligible case is assigned to eachdemander, and the available matches table is suppressed.\n If fuzz is not None, it must be a sequence of half-ranges, one per by variable. Use 0 for any nonnumeric variables.\n By default, with fuzzy matching, exact matches take priority when available except with minimizememory. \n Set exactpriority False to treat all equally.\n Minimize memory cannot be used with exactpriority.\n drawpool names a variable to be created in the demander ds whose value is the size of the pool for\n each case\n'
global logger
if (not (seed is None)):
random.seed(seed)
myenc = locale.getlocale()[1]
by = spssaux._buildvarlist(by)
matchslots = spssaux._buildvarlist(matchslots)
nmatches = len(matchslots)
if group:
activedsname = spss.ActiveDataset()
if (demanderds is None):
demanderds = activedsname
if (supplierds is None):
supplierds = activedsname
elif ((demanderds is None) or (supplierds is None)):
raise ValueError(_('The required demander or supplier dataset name was not specified'))
if ((demanderds == supplierds) and (not group)):
raise ValueError(_('A group variable must be specified if the demander and supplier datasets are the same'))
if (group and (demanderds != supplierds)):
raise ValueError(_('A group variable cannot be used unless the demander and supplier datasets are the same'))
if (group and copytodemander):
raise ValueError(_('COPYTODEMANDER cannot be used with GROUP'))
copytodemander = spssaux._buildvarlist(copytodemander)
if ((nmatches > 1) and (len(copytodemander) > 0)):
raise ValueError(_('Error: variables can only be copied to the demander dataset if only a single match is requested'))
if ((len(set([v.lower() for v in matchslots])) != nmatches) or (nmatches == 0)):
matchslots = ', '.join(matchslots)
if (not isinstance(matchslots, str)):
matchslots = str(matchslots, myenc)
raise ValueError((_('Match id variable names are not unique or none was specified\n') + matchslots))
if ((fuzz is not None) and (len(fuzz) != len(by))):
raise ValueError((_('List of fuzz values does not match list of BY variables. Fuzz: %s') % fuzz))
if (fuzz and exactpriority):
if minimizememory:
print('Fuzzy matching with exactpriority cannot be combined with minimizememory. Setting minimizememory to NO.')
mimimizememory = False
if (minimizememory and samplewithreplacement):
print(_('Samping with replacement cannot be used with minimize memory. Using sampling without replacement'))
samplewithreplacement = False
nomatchcount = 0
with DataStep():
demanderdsx = spss.Dataset(demanderds)
if (demanderds != supplierds):
supplierds = spss.Dataset(supplierds)
else:
supplierds = demanderdsx
demanderds = demanderdsx
if drawpool:
demanderds.varlist.append(drawpool)
drawpoolindex = demanderds.varlist[drawpool].index
else:
drawpoolindex = None
demanderds.varlist.append(hashvar)
hashvarindex = demanderds.varlist[hashvar].index
try:
supplieridindex = supplierds.varlist[supplierid].index
idtype = supplierds.varlist[supplierid].type
except:
if (not isinstance(supplierid, str)):
supplierid = str(supplierid, myenc)
raise ValueError((_('Supplier dataset id variable not found: %s') % supplierid))
for v in matchslots:
demanderds.varlist.append(v, idtype)
if ds3:
dsextra = spss.Dataset(name=None)
dsextraname = dsextra.name
lends3 = createds3(supplierds, dsextra, hashvar, demanderds, demanderid, supplierid, myenc, group, drawpool)
else:
dsextra = None
lends3 = 0
if (demanderid is not None):
demanderidindex = demanderds.varlist[demanderid].index
else:
demanderidindex = None
if group:
groupindex = demanderds.varlist[group].index
else:
groupindex = None
demandercopyindexes = []
suppliercopyindexes = []
copyvartypes = []
typeconflicts = []
if copytodemander:
demandervars = set([v.name for v in demanderds.varlist])
svtype = 0
for sv in copytodemander:
try:
svtype = supplierds.varlist[sv].type
except:
if (not isinstance(sv, str)):
sv = str(sv, myenc)
raise ValueError((_('Supplier dataset variable not found: %s') % sv))
if (not (sv in demandervars)):
demanderds.varlist.append(sv, svtype)
elif (demanderds.varlist[sv].type != svtype):
typeconflicts.append(sv)
demandercopyindexes.append(demanderds.varlist[sv].index)
suppliercopyindexes.append(supplierds.varlist[sv].index)
copyvartypes.append(svtype)
if typeconflicts:
typeconflicts = ','.join(typeconflicts)
if (not isinstance(typeconflicts, str)):
typeconflicts = str(typeconflicts, myenc)
raise ValueError((_('Error: supplier/demander type conflicts exist for variables: ') + typeconflicts))
matcher = Matcher(by, supplierid, demanderds, supplierds, nmatches, samplewithreplacement, minimizememory, fuzz, exactpriority, groupindex, customfuzz)
demanderdscases = demanderds.cases
supplierdscases = supplierds.cases
demanderdssize = len(demanderdscases)
supplierdssize = len(supplierdscases)
logger = Logger(logfile=logfile, accessmode=logaccessmode)
logger.info('Adding demanders')
addcount = 0
for i in range(demanderdssize):
if ((i % 5000) == 4999):
logger.info(('Cumulative demanders added = %s' % addcount))
addcount += matcher.adddemander(demanderdscases[i])
logger.info(('Done adding demanders. Number added = %s' % addcount))
logger.info(('Adding suppliers. suppliersize = %s (for single dataset usage, this is the total casecount)' % supplierdssize))
addcount = 0
matchmaker = Matchmaker(demanderdscases, matcher, hashvarindex, supplierdscases, dsextra, demandercopyindexes, suppliercopyindexes, demanderidindex, drawpoolindex, supplieridindex, group)
matcher.domatch = matchmaker.do
for i in range(supplierdssize):
if ((i % 1000) == 999):
logger.info(('Cumulative potential suppliers processed = %s. Supplier adds = %s' % (i, addcount)))
addcount += matcher.addsupplier(supplierdscases[i], i)
logger.info(('Done adding suppliers. Number added: %s. A supplier may be added to more than one demander.' % addcount))
logger.info(('Making matches. Demandersize = %s' % demanderdssize))
if (not shuffle):
for i in range(demanderdssize):
if ((i % 1000) == 999):
logger.info(('Cumulative matches = %s, nomatch Count = %s' % (i, nomatchcount)))
nomatchcount += matchmaker.do(i)
else:
caselist = list(range(demanderdssize))
random.shuffle(caselist)
for i in caselist:
if ((i % 1000) == 999):
logger.info(('Cumulative matches = %s, nomatch count = %s' % (i, nomatchcount)))
nomatchcount += matchmaker.do(i)
logger.info('Done matching. Displaying results')
if ds3:
spss.Submit(('DATASET ACTIVATE %(dsextraname)s.\n DATASET NAME %(ds3)s' % locals()))
StartProcedure(_('Case-control matching'), 'SPSSINC CASECTRL')
tbl = spss.BasePivotTable(_('Case Control Matching Statistics'), 'CASECTRLSTATS')
tbl.SetDefaultFormatSpec(spss.FormatSpec.Count)
rowlabels = [_('Exact Matches'), _('Fuzzy Matches'), _('Unmatched Including Missing Keys'), _('Unmatched with Valid Keys'), _('Sampling'), _('Log file'), _('Maximize Matching Performance')]
cells = ((((matcher.counts + [nomatchcount]) + [((samplewithreplacement and _('with replacement')) or _('without replacement'))]) + [(((logfile is None) and _('none')) or logfile)]) + [((minimizememory and _('yes')) or _('no'))])
tbl.SimplePivotTable(rowdim=_('Match Type'), rowlabels=rowlabels, coldim='', collabels=[_('Count')], cells=cells)
if fuzz:
by.insert(0, _('Exact (All Variables)'))
fuzz.insert(0, None)
for i in range(len(fuzz)):
if (matcher.tries[i] > 0.0):
matcher.rejections[i] = ((float(matcher.rejections[i]) / matcher.tries[i]) * 100.0)
tblvalues = [(fuzz[i], matcher.tries[i], matcher.rejections[i]) for i in range(len(fuzz))]
collabels = [_('Value'), _('Fuzzy Match Tries'), _('Incremental Rejection Percentage')]
caption = _('Tries is the number of match comparisons before drawing. Rejection percentage shows the match rejection rate. Rejections are attributed to the first variable in the BY list that causes rejection.')
elif customfuzz:
tblvalues = (len(by) * [None])
collabels = [_('Value')]
caption = _(('Case distance computed from custom function: %s' % customfuzz))
else:
tblvalues = (len(by) * [0])
collabels = [_('Value')]
caption = ''
fuzztbl = spss.BasePivotTable(_('Case Control Match Tolerances'), 'CASECTRLFUZZ', caption=caption)
fuzztbl.SimplePivotTable(rowdim=_('Match Variables'), rowlabels=by, coldim='', collabels=collabels, cells=tblvalues)
if (not minimizememory):
matcher.freqs.showtable()
spss.EndProcedure()
logger.done()
return nomatchcount | Find match for demanderds cases in supplierds and add identifiers to demanderds. Return unmatched count.
demanderds is the dataset name of cases needing a match (demanders)
supplierds is the dataset name of cases supplying matches (suppliers)
ds3 is optional and will contain the supplierds cases used for matches.
demanderid is optional. If specified, and ds3 is used, it will be appended to the supplier cases. It must have a name
different from any variable in the supplier dataset.
by is a variable or sequence of variable names used to determine a match. The variables must exist in both demanderds and supplierds.
supplierid is the variable name of the ID variable in the supplier dataset.
matchslots is the variable name or sequence of variable names for the ids of the matches
copytodemander is an optional list of variables in supplierds to be added to demanderds. If this option is used, only a single
matching case can be requested. Variable types must agree for variables that already exist in demanderds.
samplewithreplacement, if true, samples with replacement; otherwise sampling is without replacement.
hashvar is an optional variable name to contain the hash of the match variables and added to demanderds and ds3.
If seed is not None, its value is used to initialize the generator for repeatability.
If shuffle is True, the demander cases are matched in a random order; otherwise they are matched in case order.
Since shuffling requires O(N) memory and will be slower, presorting the demander dataset by a random number is an alternative.
If minimizememory is true, only one eligible case is assigned to eachdemander, and the available matches table is suppressed.
If fuzz is not None, it must be a sequence of half-ranges, one per by variable. Use 0 for any nonnumeric variables.
By default, with fuzzy matching, exact matches take priority when available except with minimizememory.
Set exactpriority False to treat all equally.
Minimize memory cannot be used with exactpriority.
drawpool names a variable to be created in the demander ds whose value is the size of the pool for
each case | src/FUZZY.py | casecontrol | IBMPredictiveAnalytics/FUZZY | 1 | python | def casecontrol(by, supplierid, matchslots, demanderds=None, supplierds=None, group=None, copytodemander=[], ds3=None, demanderid=None, samplewithreplacement=False, hashvar='matchgroup', seed=None, shuffle=False, minimizememory=True, fuzz=None, exactpriority=True, drawpool=None, customfuzz=None, logfile=None, logaccessmode='overwrite'):
'Find match for demanderds cases in supplierds and add identifiers to demanderds. Return unmatched count. \n \n demanderds is the dataset name of cases needing a match (demanders)\n supplierds is the dataset name of cases supplying matches (suppliers)\n ds3 is optional and will contain the supplierds cases used for matches.\n demanderid is optional. If specified, and ds3 is used, it will be appended to the supplier cases. It must have a name\n different from any variable in the supplier dataset.\n \n by is a variable or sequence of variable names used to determine a match. The variables must exist in both demanderds and supplierds.\n supplierid is the variable name of the ID variable in the supplier dataset.\n matchslots is the variable name or sequence of variable names for the ids of the matches\n \n copytodemander is an optional list of variables in supplierds to be added to demanderds. If this option is used, only a single\n matching case can be requested. Variable types must agree for variables that already exist in demanderds.\n samplewithreplacement, if true, samples with replacement; otherwise sampling is without replacement.\n hashvar is an optional variable name to contain the hash of the match variables and added to demanderds and ds3.\n If seed is not None, its value is used to initialize the generator for repeatability.\n If shuffle is True, the demander cases are matched in a random order; otherwise they are matched in case order.\n Since shuffling requires O(N) memory and will be slower, presorting the demander dataset by a random number is an alternative.\n If minimizememory is true, only one eligible case is assigned to eachdemander, and the available matches table is suppressed.\n If fuzz is not None, it must be a sequence of half-ranges, one per by variable. Use 0 for any nonnumeric variables.\n By default, with fuzzy matching, exact matches take priority when available except with minimizememory. \n Set exactpriority False to treat all equally.\n Minimize memory cannot be used with exactpriority.\n drawpool names a variable to be created in the demander ds whose value is the size of the pool for\n each case\n'
global logger
if (not (seed is None)):
random.seed(seed)
myenc = locale.getlocale()[1]
by = spssaux._buildvarlist(by)
matchslots = spssaux._buildvarlist(matchslots)
nmatches = len(matchslots)
if group:
activedsname = spss.ActiveDataset()
if (demanderds is None):
demanderds = activedsname
if (supplierds is None):
supplierds = activedsname
elif ((demanderds is None) or (supplierds is None)):
raise ValueError(_('The required demander or supplier dataset name was not specified'))
if ((demanderds == supplierds) and (not group)):
raise ValueError(_('A group variable must be specified if the demander and supplier datasets are the same'))
if (group and (demanderds != supplierds)):
raise ValueError(_('A group variable cannot be used unless the demander and supplier datasets are the same'))
if (group and copytodemander):
raise ValueError(_('COPYTODEMANDER cannot be used with GROUP'))
copytodemander = spssaux._buildvarlist(copytodemander)
if ((nmatches > 1) and (len(copytodemander) > 0)):
raise ValueError(_('Error: variables can only be copied to the demander dataset if only a single match is requested'))
if ((len(set([v.lower() for v in matchslots])) != nmatches) or (nmatches == 0)):
matchslots = ', '.join(matchslots)
if (not isinstance(matchslots, str)):
matchslots = str(matchslots, myenc)
raise ValueError((_('Match id variable names are not unique or none was specified\n') + matchslots))
if ((fuzz is not None) and (len(fuzz) != len(by))):
raise ValueError((_('List of fuzz values does not match list of BY variables. Fuzz: %s') % fuzz))
if (fuzz and exactpriority):
if minimizememory:
print('Fuzzy matching with exactpriority cannot be combined with minimizememory. Setting minimizememory to NO.')
mimimizememory = False
if (minimizememory and samplewithreplacement):
print(_('Samping with replacement cannot be used with minimize memory. Using sampling without replacement'))
samplewithreplacement = False
nomatchcount = 0
with DataStep():
demanderdsx = spss.Dataset(demanderds)
if (demanderds != supplierds):
supplierds = spss.Dataset(supplierds)
else:
supplierds = demanderdsx
demanderds = demanderdsx
if drawpool:
demanderds.varlist.append(drawpool)
drawpoolindex = demanderds.varlist[drawpool].index
else:
drawpoolindex = None
demanderds.varlist.append(hashvar)
hashvarindex = demanderds.varlist[hashvar].index
try:
supplieridindex = supplierds.varlist[supplierid].index
idtype = supplierds.varlist[supplierid].type
except:
if (not isinstance(supplierid, str)):
supplierid = str(supplierid, myenc)
raise ValueError((_('Supplier dataset id variable not found: %s') % supplierid))
for v in matchslots:
demanderds.varlist.append(v, idtype)
if ds3:
dsextra = spss.Dataset(name=None)
dsextraname = dsextra.name
lends3 = createds3(supplierds, dsextra, hashvar, demanderds, demanderid, supplierid, myenc, group, drawpool)
else:
dsextra = None
lends3 = 0
if (demanderid is not None):
demanderidindex = demanderds.varlist[demanderid].index
else:
demanderidindex = None
if group:
groupindex = demanderds.varlist[group].index
else:
groupindex = None
demandercopyindexes = []
suppliercopyindexes = []
copyvartypes = []
typeconflicts = []
if copytodemander:
demandervars = set([v.name for v in demanderds.varlist])
svtype = 0
for sv in copytodemander:
try:
svtype = supplierds.varlist[sv].type
except:
if (not isinstance(sv, str)):
sv = str(sv, myenc)
raise ValueError((_('Supplier dataset variable not found: %s') % sv))
if (not (sv in demandervars)):
demanderds.varlist.append(sv, svtype)
elif (demanderds.varlist[sv].type != svtype):
typeconflicts.append(sv)
demandercopyindexes.append(demanderds.varlist[sv].index)
suppliercopyindexes.append(supplierds.varlist[sv].index)
copyvartypes.append(svtype)
if typeconflicts:
typeconflicts = ','.join(typeconflicts)
if (not isinstance(typeconflicts, str)):
typeconflicts = str(typeconflicts, myenc)
raise ValueError((_('Error: supplier/demander type conflicts exist for variables: ') + typeconflicts))
matcher = Matcher(by, supplierid, demanderds, supplierds, nmatches, samplewithreplacement, minimizememory, fuzz, exactpriority, groupindex, customfuzz)
demanderdscases = demanderds.cases
supplierdscases = supplierds.cases
demanderdssize = len(demanderdscases)
supplierdssize = len(supplierdscases)
logger = Logger(logfile=logfile, accessmode=logaccessmode)
logger.info('Adding demanders')
addcount = 0
for i in range(demanderdssize):
if ((i % 5000) == 4999):
logger.info(('Cumulative demanders added = %s' % addcount))
addcount += matcher.adddemander(demanderdscases[i])
logger.info(('Done adding demanders. Number added = %s' % addcount))
logger.info(('Adding suppliers. suppliersize = %s (for single dataset usage, this is the total casecount)' % supplierdssize))
addcount = 0
matchmaker = Matchmaker(demanderdscases, matcher, hashvarindex, supplierdscases, dsextra, demandercopyindexes, suppliercopyindexes, demanderidindex, drawpoolindex, supplieridindex, group)
matcher.domatch = matchmaker.do
for i in range(supplierdssize):
if ((i % 1000) == 999):
logger.info(('Cumulative potential suppliers processed = %s. Supplier adds = %s' % (i, addcount)))
addcount += matcher.addsupplier(supplierdscases[i], i)
logger.info(('Done adding suppliers. Number added: %s. A supplier may be added to more than one demander.' % addcount))
logger.info(('Making matches. Demandersize = %s' % demanderdssize))
if (not shuffle):
for i in range(demanderdssize):
if ((i % 1000) == 999):
logger.info(('Cumulative matches = %s, nomatch Count = %s' % (i, nomatchcount)))
nomatchcount += matchmaker.do(i)
else:
caselist = list(range(demanderdssize))
random.shuffle(caselist)
for i in caselist:
if ((i % 1000) == 999):
logger.info(('Cumulative matches = %s, nomatch count = %s' % (i, nomatchcount)))
nomatchcount += matchmaker.do(i)
logger.info('Done matching. Displaying results')
if ds3:
spss.Submit(('DATASET ACTIVATE %(dsextraname)s.\n DATASET NAME %(ds3)s' % locals()))
StartProcedure(_('Case-control matching'), 'SPSSINC CASECTRL')
tbl = spss.BasePivotTable(_('Case Control Matching Statistics'), 'CASECTRLSTATS')
tbl.SetDefaultFormatSpec(spss.FormatSpec.Count)
rowlabels = [_('Exact Matches'), _('Fuzzy Matches'), _('Unmatched Including Missing Keys'), _('Unmatched with Valid Keys'), _('Sampling'), _('Log file'), _('Maximize Matching Performance')]
cells = ((((matcher.counts + [nomatchcount]) + [((samplewithreplacement and _('with replacement')) or _('without replacement'))]) + [(((logfile is None) and _('none')) or logfile)]) + [((minimizememory and _('yes')) or _('no'))])
tbl.SimplePivotTable(rowdim=_('Match Type'), rowlabels=rowlabels, coldim=, collabels=[_('Count')], cells=cells)
if fuzz:
by.insert(0, _('Exact (All Variables)'))
fuzz.insert(0, None)
for i in range(len(fuzz)):
if (matcher.tries[i] > 0.0):
matcher.rejections[i] = ((float(matcher.rejections[i]) / matcher.tries[i]) * 100.0)
tblvalues = [(fuzz[i], matcher.tries[i], matcher.rejections[i]) for i in range(len(fuzz))]
collabels = [_('Value'), _('Fuzzy Match Tries'), _('Incremental Rejection Percentage')]
caption = _('Tries is the number of match comparisons before drawing. Rejection percentage shows the match rejection rate. Rejections are attributed to the first variable in the BY list that causes rejection.')
elif customfuzz:
tblvalues = (len(by) * [None])
collabels = [_('Value')]
caption = _(('Case distance computed from custom function: %s' % customfuzz))
else:
tblvalues = (len(by) * [0])
collabels = [_('Value')]
caption =
fuzztbl = spss.BasePivotTable(_('Case Control Match Tolerances'), 'CASECTRLFUZZ', caption=caption)
fuzztbl.SimplePivotTable(rowdim=_('Match Variables'), rowlabels=by, coldim=, collabels=collabels, cells=tblvalues)
if (not minimizememory):
matcher.freqs.showtable()
spss.EndProcedure()
logger.done()
return nomatchcount | def casecontrol(by, supplierid, matchslots, demanderds=None, supplierds=None, group=None, copytodemander=[], ds3=None, demanderid=None, samplewithreplacement=False, hashvar='matchgroup', seed=None, shuffle=False, minimizememory=True, fuzz=None, exactpriority=True, drawpool=None, customfuzz=None, logfile=None, logaccessmode='overwrite'):
'Find match for demanderds cases in supplierds and add identifiers to demanderds. Return unmatched count. \n \n demanderds is the dataset name of cases needing a match (demanders)\n supplierds is the dataset name of cases supplying matches (suppliers)\n ds3 is optional and will contain the supplierds cases used for matches.\n demanderid is optional. If specified, and ds3 is used, it will be appended to the supplier cases. It must have a name\n different from any variable in the supplier dataset.\n \n by is a variable or sequence of variable names used to determine a match. The variables must exist in both demanderds and supplierds.\n supplierid is the variable name of the ID variable in the supplier dataset.\n matchslots is the variable name or sequence of variable names for the ids of the matches\n \n copytodemander is an optional list of variables in supplierds to be added to demanderds. If this option is used, only a single\n matching case can be requested. Variable types must agree for variables that already exist in demanderds.\n samplewithreplacement, if true, samples with replacement; otherwise sampling is without replacement.\n hashvar is an optional variable name to contain the hash of the match variables and added to demanderds and ds3.\n If seed is not None, its value is used to initialize the generator for repeatability.\n If shuffle is True, the demander cases are matched in a random order; otherwise they are matched in case order.\n Since shuffling requires O(N) memory and will be slower, presorting the demander dataset by a random number is an alternative.\n If minimizememory is true, only one eligible case is assigned to eachdemander, and the available matches table is suppressed.\n If fuzz is not None, it must be a sequence of half-ranges, one per by variable. Use 0 for any nonnumeric variables.\n By default, with fuzzy matching, exact matches take priority when available except with minimizememory. \n Set exactpriority False to treat all equally.\n Minimize memory cannot be used with exactpriority.\n drawpool names a variable to be created in the demander ds whose value is the size of the pool for\n each case\n'
global logger
if (not (seed is None)):
random.seed(seed)
myenc = locale.getlocale()[1]
by = spssaux._buildvarlist(by)
matchslots = spssaux._buildvarlist(matchslots)
nmatches = len(matchslots)
if group:
activedsname = spss.ActiveDataset()
if (demanderds is None):
demanderds = activedsname
if (supplierds is None):
supplierds = activedsname
elif ((demanderds is None) or (supplierds is None)):
raise ValueError(_('The required demander or supplier dataset name was not specified'))
if ((demanderds == supplierds) and (not group)):
raise ValueError(_('A group variable must be specified if the demander and supplier datasets are the same'))
if (group and (demanderds != supplierds)):
raise ValueError(_('A group variable cannot be used unless the demander and supplier datasets are the same'))
if (group and copytodemander):
raise ValueError(_('COPYTODEMANDER cannot be used with GROUP'))
copytodemander = spssaux._buildvarlist(copytodemander)
if ((nmatches > 1) and (len(copytodemander) > 0)):
raise ValueError(_('Error: variables can only be copied to the demander dataset if only a single match is requested'))
if ((len(set([v.lower() for v in matchslots])) != nmatches) or (nmatches == 0)):
matchslots = ', '.join(matchslots)
if (not isinstance(matchslots, str)):
matchslots = str(matchslots, myenc)
raise ValueError((_('Match id variable names are not unique or none was specified\n') + matchslots))
if ((fuzz is not None) and (len(fuzz) != len(by))):
raise ValueError((_('List of fuzz values does not match list of BY variables. Fuzz: %s') % fuzz))
if (fuzz and exactpriority):
if minimizememory:
print('Fuzzy matching with exactpriority cannot be combined with minimizememory. Setting minimizememory to NO.')
mimimizememory = False
if (minimizememory and samplewithreplacement):
print(_('Samping with replacement cannot be used with minimize memory. Using sampling without replacement'))
samplewithreplacement = False
nomatchcount = 0
with DataStep():
demanderdsx = spss.Dataset(demanderds)
if (demanderds != supplierds):
supplierds = spss.Dataset(supplierds)
else:
supplierds = demanderdsx
demanderds = demanderdsx
if drawpool:
demanderds.varlist.append(drawpool)
drawpoolindex = demanderds.varlist[drawpool].index
else:
drawpoolindex = None
demanderds.varlist.append(hashvar)
hashvarindex = demanderds.varlist[hashvar].index
try:
supplieridindex = supplierds.varlist[supplierid].index
idtype = supplierds.varlist[supplierid].type
except:
if (not isinstance(supplierid, str)):
supplierid = str(supplierid, myenc)
raise ValueError((_('Supplier dataset id variable not found: %s') % supplierid))
for v in matchslots:
demanderds.varlist.append(v, idtype)
if ds3:
dsextra = spss.Dataset(name=None)
dsextraname = dsextra.name
lends3 = createds3(supplierds, dsextra, hashvar, demanderds, demanderid, supplierid, myenc, group, drawpool)
else:
dsextra = None
lends3 = 0
if (demanderid is not None):
demanderidindex = demanderds.varlist[demanderid].index
else:
demanderidindex = None
if group:
groupindex = demanderds.varlist[group].index
else:
groupindex = None
demandercopyindexes = []
suppliercopyindexes = []
copyvartypes = []
typeconflicts = []
if copytodemander:
demandervars = set([v.name for v in demanderds.varlist])
svtype = 0
for sv in copytodemander:
try:
svtype = supplierds.varlist[sv].type
except:
if (not isinstance(sv, str)):
sv = str(sv, myenc)
raise ValueError((_('Supplier dataset variable not found: %s') % sv))
if (not (sv in demandervars)):
demanderds.varlist.append(sv, svtype)
elif (demanderds.varlist[sv].type != svtype):
typeconflicts.append(sv)
demandercopyindexes.append(demanderds.varlist[sv].index)
suppliercopyindexes.append(supplierds.varlist[sv].index)
copyvartypes.append(svtype)
if typeconflicts:
typeconflicts = ','.join(typeconflicts)
if (not isinstance(typeconflicts, str)):
typeconflicts = str(typeconflicts, myenc)
raise ValueError((_('Error: supplier/demander type conflicts exist for variables: ') + typeconflicts))
matcher = Matcher(by, supplierid, demanderds, supplierds, nmatches, samplewithreplacement, minimizememory, fuzz, exactpriority, groupindex, customfuzz)
demanderdscases = demanderds.cases
supplierdscases = supplierds.cases
demanderdssize = len(demanderdscases)
supplierdssize = len(supplierdscases)
logger = Logger(logfile=logfile, accessmode=logaccessmode)
logger.info('Adding demanders')
addcount = 0
for i in range(demanderdssize):
if ((i % 5000) == 4999):
logger.info(('Cumulative demanders added = %s' % addcount))
addcount += matcher.adddemander(demanderdscases[i])
logger.info(('Done adding demanders. Number added = %s' % addcount))
logger.info(('Adding suppliers. suppliersize = %s (for single dataset usage, this is the total casecount)' % supplierdssize))
addcount = 0
matchmaker = Matchmaker(demanderdscases, matcher, hashvarindex, supplierdscases, dsextra, demandercopyindexes, suppliercopyindexes, demanderidindex, drawpoolindex, supplieridindex, group)
matcher.domatch = matchmaker.do
for i in range(supplierdssize):
if ((i % 1000) == 999):
logger.info(('Cumulative potential suppliers processed = %s. Supplier adds = %s' % (i, addcount)))
addcount += matcher.addsupplier(supplierdscases[i], i)
logger.info(('Done adding suppliers. Number added: %s. A supplier may be added to more than one demander.' % addcount))
logger.info(('Making matches. Demandersize = %s' % demanderdssize))
if (not shuffle):
for i in range(demanderdssize):
if ((i % 1000) == 999):
logger.info(('Cumulative matches = %s, nomatch Count = %s' % (i, nomatchcount)))
nomatchcount += matchmaker.do(i)
else:
caselist = list(range(demanderdssize))
random.shuffle(caselist)
for i in caselist:
if ((i % 1000) == 999):
logger.info(('Cumulative matches = %s, nomatch count = %s' % (i, nomatchcount)))
nomatchcount += matchmaker.do(i)
logger.info('Done matching. Displaying results')
if ds3:
spss.Submit(('DATASET ACTIVATE %(dsextraname)s.\n DATASET NAME %(ds3)s' % locals()))
StartProcedure(_('Case-control matching'), 'SPSSINC CASECTRL')
tbl = spss.BasePivotTable(_('Case Control Matching Statistics'), 'CASECTRLSTATS')
tbl.SetDefaultFormatSpec(spss.FormatSpec.Count)
rowlabels = [_('Exact Matches'), _('Fuzzy Matches'), _('Unmatched Including Missing Keys'), _('Unmatched with Valid Keys'), _('Sampling'), _('Log file'), _('Maximize Matching Performance')]
cells = ((((matcher.counts + [nomatchcount]) + [((samplewithreplacement and _('with replacement')) or _('without replacement'))]) + [(((logfile is None) and _('none')) or logfile)]) + [((minimizememory and _('yes')) or _('no'))])
tbl.SimplePivotTable(rowdim=_('Match Type'), rowlabels=rowlabels, coldim=, collabels=[_('Count')], cells=cells)
if fuzz:
by.insert(0, _('Exact (All Variables)'))
fuzz.insert(0, None)
for i in range(len(fuzz)):
if (matcher.tries[i] > 0.0):
matcher.rejections[i] = ((float(matcher.rejections[i]) / matcher.tries[i]) * 100.0)
tblvalues = [(fuzz[i], matcher.tries[i], matcher.rejections[i]) for i in range(len(fuzz))]
collabels = [_('Value'), _('Fuzzy Match Tries'), _('Incremental Rejection Percentage')]
caption = _('Tries is the number of match comparisons before drawing. Rejection percentage shows the match rejection rate. Rejections are attributed to the first variable in the BY list that causes rejection.')
elif customfuzz:
tblvalues = (len(by) * [None])
collabels = [_('Value')]
caption = _(('Case distance computed from custom function: %s' % customfuzz))
else:
tblvalues = (len(by) * [0])
collabels = [_('Value')]
caption =
fuzztbl = spss.BasePivotTable(_('Case Control Match Tolerances'), 'CASECTRLFUZZ', caption=caption)
fuzztbl.SimplePivotTable(rowdim=_('Match Variables'), rowlabels=by, coldim=, collabels=collabels, cells=tblvalues)
if (not minimizememory):
matcher.freqs.showtable()
spss.EndProcedure()
logger.done()
return nomatchcount<|docstring|>Find match for demanderds cases in supplierds and add identifiers to demanderds. Return unmatched count.
demanderds is the dataset name of cases needing a match (demanders)
supplierds is the dataset name of cases supplying matches (suppliers)
ds3 is optional and will contain the supplierds cases used for matches.
demanderid is optional. If specified, and ds3 is used, it will be appended to the supplier cases. It must have a name
different from any variable in the supplier dataset.
by is a variable or sequence of variable names used to determine a match. The variables must exist in both demanderds and supplierds.
supplierid is the variable name of the ID variable in the supplier dataset.
matchslots is the variable name or sequence of variable names for the ids of the matches
copytodemander is an optional list of variables in supplierds to be added to demanderds. If this option is used, only a single
matching case can be requested. Variable types must agree for variables that already exist in demanderds.
samplewithreplacement, if true, samples with replacement; otherwise sampling is without replacement.
hashvar is an optional variable name to contain the hash of the match variables and added to demanderds and ds3.
If seed is not None, its value is used to initialize the generator for repeatability.
If shuffle is True, the demander cases are matched in a random order; otherwise they are matched in case order.
Since shuffling requires O(N) memory and will be slower, presorting the demander dataset by a random number is an alternative.
If minimizememory is true, only one eligible case is assigned to eachdemander, and the available matches table is suppressed.
If fuzz is not None, it must be a sequence of half-ranges, one per by variable. Use 0 for any nonnumeric variables.
By default, with fuzzy matching, exact matches take priority when available except with minimizememory.
Set exactpriority False to treat all equally.
Minimize memory cannot be used with exactpriority.
drawpool names a variable to be created in the demander ds whose value is the size of the pool for
each case<|endoftext|> |
645446602808a1c266328c3ac028d40e8c408d4a3c68665b9328e2823f280aca | def createds3(dsin, dsout, hashvar, demanderds, demanderid, supplierid, myenc, group, drawpool):
'Create a new dataset by copying the variables in dsin to dsout. No cases are created.\n Return number of variables in dsout.\n \n dsin is the intput dataset; dsout is the output dataset.\n hashvar is the name of the hash variable.\n if demanderid is not None, its definition from demanderds is appended to dsout.\n If using group, the demanderid name is suffixed with "_", since it would always duplicate\n the supplierid name.'
for v in dsin.varlist:
if (v.name != drawpool):
dsout.varlist.append(v.name, v.type)
unicodemode = isinstance(dsout.varlist[0].name, str)
if (unicodemode and (not isinstance(hashvar, str))):
hashvar = str(hashvar, myenc)
if ((demanderid is not None) and unicodemode and (not isinstance(demanderid, str))):
demanderid = str(demanderid, myenc)
if (hashvar not in [v.name for v in dsout.varlist]):
dsout.varlist.append(hashvar, 0)
if ((demanderid is not None) and (demanderid not in [v.name for v in dsout.varlist])):
try:
dsout.varlist.append(demanderid, demanderds.varlist[demanderid].type)
except:
if (not isinstance(demanderid, str)):
demanderid = str(demanderid, myenc)
raise ValueError((_('Demander id variable not found, or it duplicates a name in the supplier dataset: %s') % demanderid))
return len(dsout.varlist) | Create a new dataset by copying the variables in dsin to dsout. No cases are created.
Return number of variables in dsout.
dsin is the intput dataset; dsout is the output dataset.
hashvar is the name of the hash variable.
if demanderid is not None, its definition from demanderds is appended to dsout.
If using group, the demanderid name is suffixed with "_", since it would always duplicate
the supplierid name. | src/FUZZY.py | createds3 | IBMPredictiveAnalytics/FUZZY | 1 | python | def createds3(dsin, dsout, hashvar, demanderds, demanderid, supplierid, myenc, group, drawpool):
'Create a new dataset by copying the variables in dsin to dsout. No cases are created.\n Return number of variables in dsout.\n \n dsin is the intput dataset; dsout is the output dataset.\n hashvar is the name of the hash variable.\n if demanderid is not None, its definition from demanderds is appended to dsout.\n If using group, the demanderid name is suffixed with "_", since it would always duplicate\n the supplierid name.'
for v in dsin.varlist:
if (v.name != drawpool):
dsout.varlist.append(v.name, v.type)
unicodemode = isinstance(dsout.varlist[0].name, str)
if (unicodemode and (not isinstance(hashvar, str))):
hashvar = str(hashvar, myenc)
if ((demanderid is not None) and unicodemode and (not isinstance(demanderid, str))):
demanderid = str(demanderid, myenc)
if (hashvar not in [v.name for v in dsout.varlist]):
dsout.varlist.append(hashvar, 0)
if ((demanderid is not None) and (demanderid not in [v.name for v in dsout.varlist])):
try:
dsout.varlist.append(demanderid, demanderds.varlist[demanderid].type)
except:
if (not isinstance(demanderid, str)):
demanderid = str(demanderid, myenc)
raise ValueError((_('Demander id variable not found, or it duplicates a name in the supplier dataset: %s') % demanderid))
return len(dsout.varlist) | def createds3(dsin, dsout, hashvar, demanderds, demanderid, supplierid, myenc, group, drawpool):
'Create a new dataset by copying the variables in dsin to dsout. No cases are created.\n Return number of variables in dsout.\n \n dsin is the intput dataset; dsout is the output dataset.\n hashvar is the name of the hash variable.\n if demanderid is not None, its definition from demanderds is appended to dsout.\n If using group, the demanderid name is suffixed with "_", since it would always duplicate\n the supplierid name.'
for v in dsin.varlist:
if (v.name != drawpool):
dsout.varlist.append(v.name, v.type)
unicodemode = isinstance(dsout.varlist[0].name, str)
if (unicodemode and (not isinstance(hashvar, str))):
hashvar = str(hashvar, myenc)
if ((demanderid is not None) and unicodemode and (not isinstance(demanderid, str))):
demanderid = str(demanderid, myenc)
if (hashvar not in [v.name for v in dsout.varlist]):
dsout.varlist.append(hashvar, 0)
if ((demanderid is not None) and (demanderid not in [v.name for v in dsout.varlist])):
try:
dsout.varlist.append(demanderid, demanderds.varlist[demanderid].type)
except:
if (not isinstance(demanderid, str)):
demanderid = str(demanderid, myenc)
raise ValueError((_('Demander id variable not found, or it duplicates a name in the supplier dataset: %s') % demanderid))
return len(dsout.varlist)<|docstring|>Create a new dataset by copying the variables in dsin to dsout. No cases are created.
Return number of variables in dsout.
dsin is the intput dataset; dsout is the output dataset.
hashvar is the name of the hash variable.
if demanderid is not None, its definition from demanderds is appended to dsout.
If using group, the demanderid name is suffixed with "_", since it would always duplicate
the supplierid name.<|endoftext|> |
c8246cf9e2cf7d6454db7dc67cd63700da1b65daae7111ed59ddf7e7a39435eb | def diff(x, y):
'Return absolute difference between x and y, assumed to be of the same basic type\n \n if numeric and neither is missing (None), return ordinary absolute value\n if not numeric, return 0 if identical and not blank.\n Otherwise return BIG.'
BIG = 1e+100
try:
return abs((x - y))
except:
if isinstance(x, str):
x = x.rstrip()
y = y.rstrip()
if ((x == y) and (x != '')):
return 0
return BIG | Return absolute difference between x and y, assumed to be of the same basic type
if numeric and neither is missing (None), return ordinary absolute value
if not numeric, return 0 if identical and not blank.
Otherwise return BIG. | src/FUZZY.py | diff | IBMPredictiveAnalytics/FUZZY | 1 | python | def diff(x, y):
'Return absolute difference between x and y, assumed to be of the same basic type\n \n if numeric and neither is missing (None), return ordinary absolute value\n if not numeric, return 0 if identical and not blank.\n Otherwise return BIG.'
BIG = 1e+100
try:
return abs((x - y))
except:
if isinstance(x, str):
x = x.rstrip()
y = y.rstrip()
if ((x == y) and (x != )):
return 0
return BIG | def diff(x, y):
'Return absolute difference between x and y, assumed to be of the same basic type\n \n if numeric and neither is missing (None), return ordinary absolute value\n if not numeric, return 0 if identical and not blank.\n Otherwise return BIG.'
BIG = 1e+100
try:
return abs((x - y))
except:
if isinstance(x, str):
x = x.rstrip()
y = y.rstrip()
if ((x == y) and (x != )):
return 0
return BIG<|docstring|>Return absolute difference between x and y, assumed to be of the same basic type
if numeric and neither is missing (None), return ordinary absolute value
if not numeric, return 0 if identical and not blank.
Otherwise return BIG.<|endoftext|> |
21a0da29e50fd964e100d73ba5c30c42c6c07e1cb023c7b96a3c0d5bdbb5bf78 | def attributesFromDict(d):
'build self attributes from a dictionary d.'
self = d.pop('self')
for (name, value) in d.items():
setattr(self, name, value) | build self attributes from a dictionary d. | src/FUZZY.py | attributesFromDict | IBMPredictiveAnalytics/FUZZY | 1 | python | def attributesFromDict(d):
self = d.pop('self')
for (name, value) in d.items():
setattr(self, name, value) | def attributesFromDict(d):
self = d.pop('self')
for (name, value) in d.items():
setattr(self, name, value)<|docstring|>build self attributes from a dictionary d.<|endoftext|> |
cfce7fb824370a81757fbb9443228f34562548deedd67b3432ef3449bd79fedc | def StartProcedure(procname, omsid):
'Start a procedure\n \n procname is the name that will appear in the Viewer outline. It may be translated\n omsid is the OMS procedure identifier and should not be translated.\n \n Statistics versions prior to 19 support only a single term used for both purposes.\n For those versions, the omsid will be use for the procedure name.\n \n While the spss.StartProcedure function accepts the one argument, this function\n requires both.'
try:
spss.StartProcedure(procname, omsid)
except TypeError:
spss.StartProcedure(omsid) | Start a procedure
procname is the name that will appear in the Viewer outline. It may be translated
omsid is the OMS procedure identifier and should not be translated.
Statistics versions prior to 19 support only a single term used for both purposes.
For those versions, the omsid will be use for the procedure name.
While the spss.StartProcedure function accepts the one argument, this function
requires both. | src/FUZZY.py | StartProcedure | IBMPredictiveAnalytics/FUZZY | 1 | python | def StartProcedure(procname, omsid):
'Start a procedure\n \n procname is the name that will appear in the Viewer outline. It may be translated\n omsid is the OMS procedure identifier and should not be translated.\n \n Statistics versions prior to 19 support only a single term used for both purposes.\n For those versions, the omsid will be use for the procedure name.\n \n While the spss.StartProcedure function accepts the one argument, this function\n requires both.'
try:
spss.StartProcedure(procname, omsid)
except TypeError:
spss.StartProcedure(omsid) | def StartProcedure(procname, omsid):
'Start a procedure\n \n procname is the name that will appear in the Viewer outline. It may be translated\n omsid is the OMS procedure identifier and should not be translated.\n \n Statistics versions prior to 19 support only a single term used for both purposes.\n For those versions, the omsid will be use for the procedure name.\n \n While the spss.StartProcedure function accepts the one argument, this function\n requires both.'
try:
spss.StartProcedure(procname, omsid)
except TypeError:
spss.StartProcedure(omsid)<|docstring|>Start a procedure
procname is the name that will appear in the Viewer outline. It may be translated
omsid is the OMS procedure identifier and should not be translated.
Statistics versions prior to 19 support only a single term used for both purposes.
For those versions, the omsid will be use for the procedure name.
While the spss.StartProcedure function accepts the one argument, this function
requires both.<|endoftext|> |
f692197808a41bb09ca0bfa49145b2b51b5b8b1db17d99778f1a2aa3d4138069 | def helper():
'open html help in default browser window\n \n The location is computed from the current module name'
import webbrowser, os.path
path = os.path.splitext(__file__)[0]
helpspec = ((('file://' + path) + os.path.sep) + 'markdown.html')
browser = webbrowser.get()
if (not browser.open_new(helpspec)):
print(('Help file not found:' + helpspec)) | open html help in default browser window
The location is computed from the current module name | src/FUZZY.py | helper | IBMPredictiveAnalytics/FUZZY | 1 | python | def helper():
'open html help in default browser window\n \n The location is computed from the current module name'
import webbrowser, os.path
path = os.path.splitext(__file__)[0]
helpspec = ((('file://' + path) + os.path.sep) + 'markdown.html')
browser = webbrowser.get()
if (not browser.open_new(helpspec)):
print(('Help file not found:' + helpspec)) | def helper():
'open html help in default browser window\n \n The location is computed from the current module name'
import webbrowser, os.path
path = os.path.splitext(__file__)[0]
helpspec = ((('file://' + path) + os.path.sep) + 'markdown.html')
browser = webbrowser.get()
if (not browser.open_new(helpspec)):
print(('Help file not found:' + helpspec))<|docstring|>open html help in default browser window
The location is computed from the current module name<|endoftext|> |
e46de300c27015b6c9d2a40563ee5260bf62c4cf71b66ebcb97e62626334a2d9 | def __enter__(self):
'initialization for with statement'
try:
spss.StartDataStep()
except:
spss.Submit('EXECUTE')
spss.StartDataStep()
return self | initialization for with statement | src/FUZZY.py | __enter__ | IBMPredictiveAnalytics/FUZZY | 1 | python | def __enter__(self):
try:
spss.StartDataStep()
except:
spss.Submit('EXECUTE')
spss.StartDataStep()
return self | def __enter__(self):
try:
spss.StartDataStep()
except:
spss.Submit('EXECUTE')
spss.StartDataStep()
return self<|docstring|>initialization for with statement<|endoftext|> |
1c1139895d08ee34d976001bce35d6243e67ac8aa44288b64a31249d24089dbf | def __init__(self, demanderdscases, matcher, hashvarindex, supplierdscases, ds3cases, demandercopyindexes, suppliercopyindexes, demanderidindex, drawpoolindex, supplieridindex, group):
'demanderdscases is the demander case to match.\n matcher is the Matcher object to use.\n hashvarindex is the variable index for the hash value variable. The matches are written to following contiguous variables.\n demandercopyindexes and suppliercopyindexes are case indexes for copying values from supplierds to demanderds\n Only one match is allowed if copying.\n If there is no match, values of existing variables are not changed.\n \n If ds3cases is not None, supplier dataset cases used are written to ds3cases\n if demanderidindex is not None and ds3 is being created, its value is copied to ds3.'
attributesFromDict(locals()) | demanderdscases is the demander case to match.
matcher is the Matcher object to use.
hashvarindex is the variable index for the hash value variable. The matches are written to following contiguous variables.
demandercopyindexes and suppliercopyindexes are case indexes for copying values from supplierds to demanderds
Only one match is allowed if copying.
If there is no match, values of existing variables are not changed.
If ds3cases is not None, supplier dataset cases used are written to ds3cases
if demanderidindex is not None and ds3 is being created, its value is copied to ds3. | src/FUZZY.py | __init__ | IBMPredictiveAnalytics/FUZZY | 1 | python | def __init__(self, demanderdscases, matcher, hashvarindex, supplierdscases, ds3cases, demandercopyindexes, suppliercopyindexes, demanderidindex, drawpoolindex, supplieridindex, group):
'demanderdscases is the demander case to match.\n matcher is the Matcher object to use.\n hashvarindex is the variable index for the hash value variable. The matches are written to following contiguous variables.\n demandercopyindexes and suppliercopyindexes are case indexes for copying values from supplierds to demanderds\n Only one match is allowed if copying.\n If there is no match, values of existing variables are not changed.\n \n If ds3cases is not None, supplier dataset cases used are written to ds3cases\n if demanderidindex is not None and ds3 is being created, its value is copied to ds3.'
attributesFromDict(locals()) | def __init__(self, demanderdscases, matcher, hashvarindex, supplierdscases, ds3cases, demandercopyindexes, suppliercopyindexes, demanderidindex, drawpoolindex, supplieridindex, group):
'demanderdscases is the demander case to match.\n matcher is the Matcher object to use.\n hashvarindex is the variable index for the hash value variable. The matches are written to following contiguous variables.\n demandercopyindexes and suppliercopyindexes are case indexes for copying values from supplierds to demanderds\n Only one match is allowed if copying.\n If there is no match, values of existing variables are not changed.\n \n If ds3cases is not None, supplier dataset cases used are written to ds3cases\n if demanderidindex is not None and ds3 is being created, its value is copied to ds3.'
attributesFromDict(locals())<|docstring|>demanderdscases is the demander case to match.
matcher is the Matcher object to use.
hashvarindex is the variable index for the hash value variable. The matches are written to following contiguous variables.
demandercopyindexes and suppliercopyindexes are case indexes for copying values from supplierds to demanderds
Only one match is allowed if copying.
If there is no match, values of existing variables are not changed.
If ds3cases is not None, supplier dataset cases used are written to ds3cases
if demanderidindex is not None and ds3 is being created, its value is copied to ds3.<|endoftext|> |
a0197faa8a821a57da8780af1ca8417d1058a47715970d5bd2189a04fc47d5c2 | def do(self, casenumber):
'draw match(es) for case casenumber and propagate values as required'
if ((self.matcher.groupindex != None) and (self.demanderdscases[casenumber][self.matcher.groupindex] != 1)):
return 0
(hash, matches, drawpoolsize) = self.matcher.draw(self.demanderdscases[casenumber], self.supplierdscases)
self.demanderdscases[(casenumber, self.hashvarindex)] = hash
if self.drawpoolindex:
self.demanderdscases[(casenumber, self.drawpoolindex)] = drawpoolsize
for m in range(len(matches)):
casenum = matches[m][0]
self.demanderdscases[(casenumber, ((self.hashvarindex + 1) + m))] = matches[m][1]
if (casenum is not None):
for (dv, sv) in zip(self.demandercopyindexes, self.suppliercopyindexes):
self.demanderdscases[(casenumber, dv)] = self.supplierdscases[(casenum, sv)]
if self.ds3cases:
if (casenum is not None):
self.ds3cases.cases.append(self.supplierdscases[casenum])
if self.group:
self.ds3cases.cases[((- 1), (- 2))] = hash
self.ds3cases.cases[((- 1), (- 1))] = self.demanderdscases[(casenumber, self.supplieridindex)]
elif (self.demanderidindex is not None):
self.ds3cases.cases[((- 1), (- 2))] = hash
self.ds3cases.cases[((- 1), (- 1))] = self.demanderdscases[(casenumber, self.demanderidindex)]
else:
self.ds3cases.cases[((- 1), (- 1))] = hash
if (hash is None):
return 0
else:
return matches.count((None, None)) | draw match(es) for case casenumber and propagate values as required | src/FUZZY.py | do | IBMPredictiveAnalytics/FUZZY | 1 | python | def do(self, casenumber):
if ((self.matcher.groupindex != None) and (self.demanderdscases[casenumber][self.matcher.groupindex] != 1)):
return 0
(hash, matches, drawpoolsize) = self.matcher.draw(self.demanderdscases[casenumber], self.supplierdscases)
self.demanderdscases[(casenumber, self.hashvarindex)] = hash
if self.drawpoolindex:
self.demanderdscases[(casenumber, self.drawpoolindex)] = drawpoolsize
for m in range(len(matches)):
casenum = matches[m][0]
self.demanderdscases[(casenumber, ((self.hashvarindex + 1) + m))] = matches[m][1]
if (casenum is not None):
for (dv, sv) in zip(self.demandercopyindexes, self.suppliercopyindexes):
self.demanderdscases[(casenumber, dv)] = self.supplierdscases[(casenum, sv)]
if self.ds3cases:
if (casenum is not None):
self.ds3cases.cases.append(self.supplierdscases[casenum])
if self.group:
self.ds3cases.cases[((- 1), (- 2))] = hash
self.ds3cases.cases[((- 1), (- 1))] = self.demanderdscases[(casenumber, self.supplieridindex)]
elif (self.demanderidindex is not None):
self.ds3cases.cases[((- 1), (- 2))] = hash
self.ds3cases.cases[((- 1), (- 1))] = self.demanderdscases[(casenumber, self.demanderidindex)]
else:
self.ds3cases.cases[((- 1), (- 1))] = hash
if (hash is None):
return 0
else:
return matches.count((None, None)) | def do(self, casenumber):
if ((self.matcher.groupindex != None) and (self.demanderdscases[casenumber][self.matcher.groupindex] != 1)):
return 0
(hash, matches, drawpoolsize) = self.matcher.draw(self.demanderdscases[casenumber], self.supplierdscases)
self.demanderdscases[(casenumber, self.hashvarindex)] = hash
if self.drawpoolindex:
self.demanderdscases[(casenumber, self.drawpoolindex)] = drawpoolsize
for m in range(len(matches)):
casenum = matches[m][0]
self.demanderdscases[(casenumber, ((self.hashvarindex + 1) + m))] = matches[m][1]
if (casenum is not None):
for (dv, sv) in zip(self.demandercopyindexes, self.suppliercopyindexes):
self.demanderdscases[(casenumber, dv)] = self.supplierdscases[(casenum, sv)]
if self.ds3cases:
if (casenum is not None):
self.ds3cases.cases.append(self.supplierdscases[casenum])
if self.group:
self.ds3cases.cases[((- 1), (- 2))] = hash
self.ds3cases.cases[((- 1), (- 1))] = self.demanderdscases[(casenumber, self.supplieridindex)]
elif (self.demanderidindex is not None):
self.ds3cases.cases[((- 1), (- 2))] = hash
self.ds3cases.cases[((- 1), (- 1))] = self.demanderdscases[(casenumber, self.demanderidindex)]
else:
self.ds3cases.cases[((- 1), (- 1))] = hash
if (hash is None):
return 0
else:
return matches.count((None, None))<|docstring|>draw match(es) for case casenumber and propagate values as required<|endoftext|> |
5db057297f89f6aaae827f8a0e52b23d2d18f0c2ff8a33e99d9ae9e7b8187433 | def __init__(self, by, supplierid, demanderds, supplierds, nmatches, samplewithreplacement, minimizememory, fuzz, exactpriority, groupindex, customfuzz):
'by is a variable or list of variables to match on.\n supplierid is the id variable name in the supplier dataset.\n demanderds and supplierds are the demander and supplier datasets.\n nmatches is the number of matches requested for each demander.\n samplewithreplacement indicates sampling with or without replacement.\n If minimizememory is True, an extra data pass is required but memory usage for the supplier set is reduced.\n fuzz is a sequence of fuzz factors, one for each by variable. If the variable is not numeric, fuzz must be None.\n If exactpriority, exact matches get preference over fuzzy matches when fuzzy matching allowed.\n \n A DataStep is expected to be active for this class.'
'The demander object is a dictionary whose keys are the hash of the by variable(s).\n The values are lists of matching suppliers with each list item being a duple (casenumber, idvalue)'
self.demanders = {}
self.demanderbys = {}
self.demandercount = {}
self.suppliercount = {}
self.groupindex = groupindex
self.demandervars = self.buildvars(demanderds, by)
self.demanderscopy = set()
self.suppliervars = self.buildvars(supplierds, by)
self.samplewithreplacement = samplewithreplacement
self.demanderds = demanderds
self.supplierds = supplierds
self.supplierid = self.buildvars(supplierds, [supplierid])[0]
self.nmatches = nmatches
self.minimizememory = minimizememory
self.fuzz = fuzz
if customfuzz:
customparts = customfuzz.split('.')
__import__(customparts[0])
self.customfuzz = getattr(sys.modules[customparts[0]], customparts[1])
else:
self.customfuzz = None
if fuzz:
self.tries = dict(((i, 0) for i in range((len(fuzz) + 1))))
self.rejections = dict(((i, 0) for i in range((len(fuzz) + 1))))
elif customfuzz:
self.tries = {0: 0}
self.rejections = {0: 0}
self.freqs = Freqs()
self.exactpriority = exactpriority
self.bys = {}
self.exactcount = {}
self.counts = [0, 0, 0]
self.usedsuppliers = set() | by is a variable or list of variables to match on.
supplierid is the id variable name in the supplier dataset.
demanderds and supplierds are the demander and supplier datasets.
nmatches is the number of matches requested for each demander.
samplewithreplacement indicates sampling with or without replacement.
If minimizememory is True, an extra data pass is required but memory usage for the supplier set is reduced.
fuzz is a sequence of fuzz factors, one for each by variable. If the variable is not numeric, fuzz must be None.
If exactpriority, exact matches get preference over fuzzy matches when fuzzy matching allowed.
A DataStep is expected to be active for this class. | src/FUZZY.py | __init__ | IBMPredictiveAnalytics/FUZZY | 1 | python | def __init__(self, by, supplierid, demanderds, supplierds, nmatches, samplewithreplacement, minimizememory, fuzz, exactpriority, groupindex, customfuzz):
'by is a variable or list of variables to match on.\n supplierid is the id variable name in the supplier dataset.\n demanderds and supplierds are the demander and supplier datasets.\n nmatches is the number of matches requested for each demander.\n samplewithreplacement indicates sampling with or without replacement.\n If minimizememory is True, an extra data pass is required but memory usage for the supplier set is reduced.\n fuzz is a sequence of fuzz factors, one for each by variable. If the variable is not numeric, fuzz must be None.\n If exactpriority, exact matches get preference over fuzzy matches when fuzzy matching allowed.\n \n A DataStep is expected to be active for this class.'
'The demander object is a dictionary whose keys are the hash of the by variable(s).\n The values are lists of matching suppliers with each list item being a duple (casenumber, idvalue)'
self.demanders = {}
self.demanderbys = {}
self.demandercount = {}
self.suppliercount = {}
self.groupindex = groupindex
self.demandervars = self.buildvars(demanderds, by)
self.demanderscopy = set()
self.suppliervars = self.buildvars(supplierds, by)
self.samplewithreplacement = samplewithreplacement
self.demanderds = demanderds
self.supplierds = supplierds
self.supplierid = self.buildvars(supplierds, [supplierid])[0]
self.nmatches = nmatches
self.minimizememory = minimizememory
self.fuzz = fuzz
if customfuzz:
customparts = customfuzz.split('.')
__import__(customparts[0])
self.customfuzz = getattr(sys.modules[customparts[0]], customparts[1])
else:
self.customfuzz = None
if fuzz:
self.tries = dict(((i, 0) for i in range((len(fuzz) + 1))))
self.rejections = dict(((i, 0) for i in range((len(fuzz) + 1))))
elif customfuzz:
self.tries = {0: 0}
self.rejections = {0: 0}
self.freqs = Freqs()
self.exactpriority = exactpriority
self.bys = {}
self.exactcount = {}
self.counts = [0, 0, 0]
self.usedsuppliers = set() | def __init__(self, by, supplierid, demanderds, supplierds, nmatches, samplewithreplacement, minimizememory, fuzz, exactpriority, groupindex, customfuzz):
'by is a variable or list of variables to match on.\n supplierid is the id variable name in the supplier dataset.\n demanderds and supplierds are the demander and supplier datasets.\n nmatches is the number of matches requested for each demander.\n samplewithreplacement indicates sampling with or without replacement.\n If minimizememory is True, an extra data pass is required but memory usage for the supplier set is reduced.\n fuzz is a sequence of fuzz factors, one for each by variable. If the variable is not numeric, fuzz must be None.\n If exactpriority, exact matches get preference over fuzzy matches when fuzzy matching allowed.\n \n A DataStep is expected to be active for this class.'
'The demander object is a dictionary whose keys are the hash of the by variable(s).\n The values are lists of matching suppliers with each list item being a duple (casenumber, idvalue)'
self.demanders = {}
self.demanderbys = {}
self.demandercount = {}
self.suppliercount = {}
self.groupindex = groupindex
self.demandervars = self.buildvars(demanderds, by)
self.demanderscopy = set()
self.suppliervars = self.buildvars(supplierds, by)
self.samplewithreplacement = samplewithreplacement
self.demanderds = demanderds
self.supplierds = supplierds
self.supplierid = self.buildvars(supplierds, [supplierid])[0]
self.nmatches = nmatches
self.minimizememory = minimizememory
self.fuzz = fuzz
if customfuzz:
customparts = customfuzz.split('.')
__import__(customparts[0])
self.customfuzz = getattr(sys.modules[customparts[0]], customparts[1])
else:
self.customfuzz = None
if fuzz:
self.tries = dict(((i, 0) for i in range((len(fuzz) + 1))))
self.rejections = dict(((i, 0) for i in range((len(fuzz) + 1))))
elif customfuzz:
self.tries = {0: 0}
self.rejections = {0: 0}
self.freqs = Freqs()
self.exactpriority = exactpriority
self.bys = {}
self.exactcount = {}
self.counts = [0, 0, 0]
self.usedsuppliers = set()<|docstring|>by is a variable or list of variables to match on.
supplierid is the id variable name in the supplier dataset.
demanderds and supplierds are the demander and supplier datasets.
nmatches is the number of matches requested for each demander.
samplewithreplacement indicates sampling with or without replacement.
If minimizememory is True, an extra data pass is required but memory usage for the supplier set is reduced.
fuzz is a sequence of fuzz factors, one for each by variable. If the variable is not numeric, fuzz must be None.
If exactpriority, exact matches get preference over fuzzy matches when fuzzy matching allowed.
A DataStep is expected to be active for this class.<|endoftext|> |
67ec6fd8f38d13e18d1e30b7e31256b5dc0238f268c9d066f12867d6344ba605 | def adddemander(self, case):
'Add a demander. Return 0 or 1 for whether added or not'
if ((self.groupindex != None) and (case[self.groupindex] != 1)):
return 0
(h, keyvalues) = self.hash(self.demandervars, case)
if ((h is not None) and (not (h in self.demanders))):
self.demanders[h] = []
if (self.fuzz or self.customfuzz):
self.bys[h] = keyvalues
if (self.minimizememory and (h is not None)):
self.demandercount[h] = (self.demandercount.get(h, 0) + self.nmatches)
self.demanderscopy.add(h)
return 1 | Add a demander. Return 0 or 1 for whether added or not | src/FUZZY.py | adddemander | IBMPredictiveAnalytics/FUZZY | 1 | python | def adddemander(self, case):
if ((self.groupindex != None) and (case[self.groupindex] != 1)):
return 0
(h, keyvalues) = self.hash(self.demandervars, case)
if ((h is not None) and (not (h in self.demanders))):
self.demanders[h] = []
if (self.fuzz or self.customfuzz):
self.bys[h] = keyvalues
if (self.minimizememory and (h is not None)):
self.demandercount[h] = (self.demandercount.get(h, 0) + self.nmatches)
self.demanderscopy.add(h)
return 1 | def adddemander(self, case):
if ((self.groupindex != None) and (case[self.groupindex] != 1)):
return 0
(h, keyvalues) = self.hash(self.demandervars, case)
if ((h is not None) and (not (h in self.demanders))):
self.demanders[h] = []
if (self.fuzz or self.customfuzz):
self.bys[h] = keyvalues
if (self.minimizememory and (h is not None)):
self.demandercount[h] = (self.demandercount.get(h, 0) + self.nmatches)
self.demanderscopy.add(h)
return 1<|docstring|>Add a demander. Return 0 or 1 for whether added or not<|endoftext|> |
2d4697138eeacee339af26deb96ba9a336923c68ce0882120ff453893a5a82c8 | def addsupplier(self, case, casenum):
'Add a supplier. If no demander for this case, do nothing.\n \n case is the current supplier case, casenum is its case number saved for later use.\n'
if ((self.groupindex != None) and (case[self.groupindex] != 0)):
return 0
takecount = 0
hlist = []
if (not (self.fuzz or self.customfuzz)):
(h, values) = self.hash(self.suppliervars, case)
if (h in self.demanders):
if (not self.minimizememory):
self.demanders[h].append((casenum, case[self.supplierid]))
takecount += 1
elif (len(self.demanders[h]) < (self.demandercount[h] * self.nmatches)):
self.demanders[h].append((casenum, case[self.supplierid]))
takecount += 1
else:
if self.minimizememory:
demanders = self.demanderscopy
else:
demanders = self.demanders
for h in demanders:
matchlevel = self.rehash(h, case)
if (matchlevel == 0):
continue
if (not self.minimizememory):
if (matchlevel == 2):
self.demanders[h].insert(0, (casenum, case[self.supplierid]))
self.exactcount[h] = (self.exactcount.get(h, 0) + 1)
else:
self.demanders[h].append((casenum, case[self.supplierid]))
takecount += 1
else:
shortfall = ((self.demandercount[h] * self.nmatches) - len(self.demanders[h]))
if (shortfall == 1):
demanders.remove(h)
if (shortfall > 0):
hlist.append(h)
break
if (len(hlist) > 0):
winner = random.choice(hlist)
self.demanders[winner].append((casenum, case[self.supplierid]))
takecount = 1
return takecount | Add a supplier. If no demander for this case, do nothing.
case is the current supplier case, casenum is its case number saved for later use. | src/FUZZY.py | addsupplier | IBMPredictiveAnalytics/FUZZY | 1 | python | def addsupplier(self, case, casenum):
'Add a supplier. If no demander for this case, do nothing.\n \n case is the current supplier case, casenum is its case number saved for later use.\n'
if ((self.groupindex != None) and (case[self.groupindex] != 0)):
return 0
takecount = 0
hlist = []
if (not (self.fuzz or self.customfuzz)):
(h, values) = self.hash(self.suppliervars, case)
if (h in self.demanders):
if (not self.minimizememory):
self.demanders[h].append((casenum, case[self.supplierid]))
takecount += 1
elif (len(self.demanders[h]) < (self.demandercount[h] * self.nmatches)):
self.demanders[h].append((casenum, case[self.supplierid]))
takecount += 1
else:
if self.minimizememory:
demanders = self.demanderscopy
else:
demanders = self.demanders
for h in demanders:
matchlevel = self.rehash(h, case)
if (matchlevel == 0):
continue
if (not self.minimizememory):
if (matchlevel == 2):
self.demanders[h].insert(0, (casenum, case[self.supplierid]))
self.exactcount[h] = (self.exactcount.get(h, 0) + 1)
else:
self.demanders[h].append((casenum, case[self.supplierid]))
takecount += 1
else:
shortfall = ((self.demandercount[h] * self.nmatches) - len(self.demanders[h]))
if (shortfall == 1):
demanders.remove(h)
if (shortfall > 0):
hlist.append(h)
break
if (len(hlist) > 0):
winner = random.choice(hlist)
self.demanders[winner].append((casenum, case[self.supplierid]))
takecount = 1
return takecount | def addsupplier(self, case, casenum):
'Add a supplier. If no demander for this case, do nothing.\n \n case is the current supplier case, casenum is its case number saved for later use.\n'
if ((self.groupindex != None) and (case[self.groupindex] != 0)):
return 0
takecount = 0
hlist = []
if (not (self.fuzz or self.customfuzz)):
(h, values) = self.hash(self.suppliervars, case)
if (h in self.demanders):
if (not self.minimizememory):
self.demanders[h].append((casenum, case[self.supplierid]))
takecount += 1
elif (len(self.demanders[h]) < (self.demandercount[h] * self.nmatches)):
self.demanders[h].append((casenum, case[self.supplierid]))
takecount += 1
else:
if self.minimizememory:
demanders = self.demanderscopy
else:
demanders = self.demanders
for h in demanders:
matchlevel = self.rehash(h, case)
if (matchlevel == 0):
continue
if (not self.minimizememory):
if (matchlevel == 2):
self.demanders[h].insert(0, (casenum, case[self.supplierid]))
self.exactcount[h] = (self.exactcount.get(h, 0) + 1)
else:
self.demanders[h].append((casenum, case[self.supplierid]))
takecount += 1
else:
shortfall = ((self.demandercount[h] * self.nmatches) - len(self.demanders[h]))
if (shortfall == 1):
demanders.remove(h)
if (shortfall > 0):
hlist.append(h)
break
if (len(hlist) > 0):
winner = random.choice(hlist)
self.demanders[winner].append((casenum, case[self.supplierid]))
takecount = 1
return takecount<|docstring|>Add a supplier. If no demander for this case, do nothing.
case is the current supplier case, casenum is its case number saved for later use.<|endoftext|> |
48c25307399e6d0dc2ced5071eb15a3299ea1932a7b6a36a19a9bb57b33cd659 | def rehash(self, h, case):
'Test supplier case against demander case allowing for fuzzy matching.\n \n h is the current demander case hash\n case is the current supplier case\n return is \n - 0 if no match\n - 1 if fuzzy match\n - 2 if exact match\n '
(hh, values) = self.hash(self.suppliervars, case)
self.tries[0] += 1
if (hh == h):
return 2
else:
self.rejections[0] += 1
dcase = self.bys[h]
if self.customfuzz:
result = self.customfuzz(dcase, [case[i] for i in self.suppliervars])
else:
result = 1
for (i, fuzz) in enumerate(self.fuzz):
self.tries[(i + 1)] += 1
if (not (diff(dcase[i], case[self.suppliervars[i]]) <= fuzz)):
self.rejections[(i + 1)] += 1
result = 0
break
return result | Test supplier case against demander case allowing for fuzzy matching.
h is the current demander case hash
case is the current supplier case
return is
- 0 if no match
- 1 if fuzzy match
- 2 if exact match | src/FUZZY.py | rehash | IBMPredictiveAnalytics/FUZZY | 1 | python | def rehash(self, h, case):
'Test supplier case against demander case allowing for fuzzy matching.\n \n h is the current demander case hash\n case is the current supplier case\n return is \n - 0 if no match\n - 1 if fuzzy match\n - 2 if exact match\n '
(hh, values) = self.hash(self.suppliervars, case)
self.tries[0] += 1
if (hh == h):
return 2
else:
self.rejections[0] += 1
dcase = self.bys[h]
if self.customfuzz:
result = self.customfuzz(dcase, [case[i] for i in self.suppliervars])
else:
result = 1
for (i, fuzz) in enumerate(self.fuzz):
self.tries[(i + 1)] += 1
if (not (diff(dcase[i], case[self.suppliervars[i]]) <= fuzz)):
self.rejections[(i + 1)] += 1
result = 0
break
return result | def rehash(self, h, case):
'Test supplier case against demander case allowing for fuzzy matching.\n \n h is the current demander case hash\n case is the current supplier case\n return is \n - 0 if no match\n - 1 if fuzzy match\n - 2 if exact match\n '
(hh, values) = self.hash(self.suppliervars, case)
self.tries[0] += 1
if (hh == h):
return 2
else:
self.rejections[0] += 1
dcase = self.bys[h]
if self.customfuzz:
result = self.customfuzz(dcase, [case[i] for i in self.suppliervars])
else:
result = 1
for (i, fuzz) in enumerate(self.fuzz):
self.tries[(i + 1)] += 1
if (not (diff(dcase[i], case[self.suppliervars[i]]) <= fuzz)):
self.rejections[(i + 1)] += 1
result = 0
break
return result<|docstring|>Test supplier case against demander case allowing for fuzzy matching.
h is the current demander case hash
case is the current supplier case
return is
- 0 if no match
- 1 if fuzzy match
- 2 if exact match<|endoftext|> |
df5c772dbb813246c5582f91fb4d9766fb3ea8803b5083fde856deefa92ab3bb | def filteredlist(self, h):
'Return the list of potential suppliers\n \n h is the demander hash\n If samplewithreplacement is False, any suppliers already used are removed and the exactcount\n field is adjusted'
thelist = self.demanders.get(h, ())
if self.samplewithreplacement:
return thelist
exactcount = self.exactcount.get(h, 0)
lenthelist = len(thelist)
for j in range(lenthelist, 0, (- 1)):
i = (j - 1)
(casenum, hh) = thelist[i]
if (casenum in self.usedsuppliers):
thelist.pop(i)
if (i < exactcount):
self.exactcount[h] -= 1
return thelist | Return the list of potential suppliers
h is the demander hash
If samplewithreplacement is False, any suppliers already used are removed and the exactcount
field is adjusted | src/FUZZY.py | filteredlist | IBMPredictiveAnalytics/FUZZY | 1 | python | def filteredlist(self, h):
'Return the list of potential suppliers\n \n h is the demander hash\n If samplewithreplacement is False, any suppliers already used are removed and the exactcount\n field is adjusted'
thelist = self.demanders.get(h, ())
if self.samplewithreplacement:
return thelist
exactcount = self.exactcount.get(h, 0)
lenthelist = len(thelist)
for j in range(lenthelist, 0, (- 1)):
i = (j - 1)
(casenum, hh) = thelist[i]
if (casenum in self.usedsuppliers):
thelist.pop(i)
if (i < exactcount):
self.exactcount[h] -= 1
return thelist | def filteredlist(self, h):
'Return the list of potential suppliers\n \n h is the demander hash\n If samplewithreplacement is False, any suppliers already used are removed and the exactcount\n field is adjusted'
thelist = self.demanders.get(h, ())
if self.samplewithreplacement:
return thelist
exactcount = self.exactcount.get(h, 0)
lenthelist = len(thelist)
for j in range(lenthelist, 0, (- 1)):
i = (j - 1)
(casenum, hh) = thelist[i]
if (casenum in self.usedsuppliers):
thelist.pop(i)
if (i < exactcount):
self.exactcount[h] -= 1
return thelist<|docstring|>Return the list of potential suppliers
h is the demander hash
If samplewithreplacement is False, any suppliers already used are removed and the exactcount
field is adjusted<|endoftext|> |
d31cd87e68f248917a8964c60c7ccdae40035dc7b78cfe0fe831671485540ad6 | def draw(self, case, supplierdscases):
'Try to draw matches for demander case case.\n \n Return a list of nmatches match ids preceded by the hash value. If no match is possible, None is returned for each.\n If the case is missing any match variable, no matches will be drawn.\n If using fuzzy matching and exact matches get priority, an exact match is first attempted and if not available, a fallback\n to a fuzzy match is attempted.\n '
if ((self.groupindex != None) and (case[self.groupindex] != 1)):
return (None, [(None, None)], None)
(h, values) = self.hash(self.demandervars, case)
thelist = self.filteredlist(h)
draws = []
listsize = len(thelist)
initiallistsize = listsize
self.freqs.accumulate(initiallistsize)
for i in range(self.nmatches):
if (listsize == 0):
draws.append((None, None))
self.counts[2] += 1
else:
if (self.fuzz and self.exactpriority):
exactcount = self.exactcount.get(h, 0)
if (exactcount > 0):
choiceindex = (random.randint(1, exactcount) - 1)
if self.samplewithreplacement:
draws.append(thelist[choiceindex])
else:
draws.append(thelist.pop(choiceindex))
self.usedsuppliers.add(draws[(- 1)][0])
self.exactcount[h] -= 1
listsize -= 1
self.counts[0] += 1
continue
choiceindex = (random.randint(1, listsize) - 1)
if self.samplewithreplacement:
draws.append(thelist[choiceindex])
else:
draws.append(thelist.pop(choiceindex))
self.usedsuppliers.add(draws[(- 1)][0])
listsize -= 1
(shash, svalues) = self.hash(self.suppliervars, supplierdscases[draws[(- 1)][0]])
if (shash == h):
self.counts[0] += 1
else:
self.counts[1] += 1
return (h, draws, initiallistsize) | Try to draw matches for demander case case.
Return a list of nmatches match ids preceded by the hash value. If no match is possible, None is returned for each.
If the case is missing any match variable, no matches will be drawn.
If using fuzzy matching and exact matches get priority, an exact match is first attempted and if not available, a fallback
to a fuzzy match is attempted. | src/FUZZY.py | draw | IBMPredictiveAnalytics/FUZZY | 1 | python | def draw(self, case, supplierdscases):
'Try to draw matches for demander case case.\n \n Return a list of nmatches match ids preceded by the hash value. If no match is possible, None is returned for each.\n If the case is missing any match variable, no matches will be drawn.\n If using fuzzy matching and exact matches get priority, an exact match is first attempted and if not available, a fallback\n to a fuzzy match is attempted.\n '
if ((self.groupindex != None) and (case[self.groupindex] != 1)):
return (None, [(None, None)], None)
(h, values) = self.hash(self.demandervars, case)
thelist = self.filteredlist(h)
draws = []
listsize = len(thelist)
initiallistsize = listsize
self.freqs.accumulate(initiallistsize)
for i in range(self.nmatches):
if (listsize == 0):
draws.append((None, None))
self.counts[2] += 1
else:
if (self.fuzz and self.exactpriority):
exactcount = self.exactcount.get(h, 0)
if (exactcount > 0):
choiceindex = (random.randint(1, exactcount) - 1)
if self.samplewithreplacement:
draws.append(thelist[choiceindex])
else:
draws.append(thelist.pop(choiceindex))
self.usedsuppliers.add(draws[(- 1)][0])
self.exactcount[h] -= 1
listsize -= 1
self.counts[0] += 1
continue
choiceindex = (random.randint(1, listsize) - 1)
if self.samplewithreplacement:
draws.append(thelist[choiceindex])
else:
draws.append(thelist.pop(choiceindex))
self.usedsuppliers.add(draws[(- 1)][0])
listsize -= 1
(shash, svalues) = self.hash(self.suppliervars, supplierdscases[draws[(- 1)][0]])
if (shash == h):
self.counts[0] += 1
else:
self.counts[1] += 1
return (h, draws, initiallistsize) | def draw(self, case, supplierdscases):
'Try to draw matches for demander case case.\n \n Return a list of nmatches match ids preceded by the hash value. If no match is possible, None is returned for each.\n If the case is missing any match variable, no matches will be drawn.\n If using fuzzy matching and exact matches get priority, an exact match is first attempted and if not available, a fallback\n to a fuzzy match is attempted.\n '
if ((self.groupindex != None) and (case[self.groupindex] != 1)):
return (None, [(None, None)], None)
(h, values) = self.hash(self.demandervars, case)
thelist = self.filteredlist(h)
draws = []
listsize = len(thelist)
initiallistsize = listsize
self.freqs.accumulate(initiallistsize)
for i in range(self.nmatches):
if (listsize == 0):
draws.append((None, None))
self.counts[2] += 1
else:
if (self.fuzz and self.exactpriority):
exactcount = self.exactcount.get(h, 0)
if (exactcount > 0):
choiceindex = (random.randint(1, exactcount) - 1)
if self.samplewithreplacement:
draws.append(thelist[choiceindex])
else:
draws.append(thelist.pop(choiceindex))
self.usedsuppliers.add(draws[(- 1)][0])
self.exactcount[h] -= 1
listsize -= 1
self.counts[0] += 1
continue
choiceindex = (random.randint(1, listsize) - 1)
if self.samplewithreplacement:
draws.append(thelist[choiceindex])
else:
draws.append(thelist.pop(choiceindex))
self.usedsuppliers.add(draws[(- 1)][0])
listsize -= 1
(shash, svalues) = self.hash(self.suppliervars, supplierdscases[draws[(- 1)][0]])
if (shash == h):
self.counts[0] += 1
else:
self.counts[1] += 1
return (h, draws, initiallistsize)<|docstring|>Try to draw matches for demander case case.
Return a list of nmatches match ids preceded by the hash value. If no match is possible, None is returned for each.
If the case is missing any match variable, no matches will be drawn.
If using fuzzy matching and exact matches get priority, an exact match is first attempted and if not available, a fallback
to a fuzzy match is attempted.<|endoftext|> |
05703a26a7ac8d38dd7cf48e0a8da355f6a00b790ffd87100771ff9cf375f344 | def hash(self, indexes, case):
'Return a hash of the case according to the indexes in the indexes tuple and the key values.\n \n If any value in the index is None or, for strings, blank, the result is None, None\n indexes is the list of indexes into the case vector'
keys = tuple([case[v] for v in indexes])
for v in keys:
if isinstance(v, str):
if (v.rstrip() == ''):
return (None, None)
elif (v is None):
return (None, None)
return (hash(keys), keys) | Return a hash of the case according to the indexes in the indexes tuple and the key values.
If any value in the index is None or, for strings, blank, the result is None, None
indexes is the list of indexes into the case vector | src/FUZZY.py | hash | IBMPredictiveAnalytics/FUZZY | 1 | python | def hash(self, indexes, case):
'Return a hash of the case according to the indexes in the indexes tuple and the key values.\n \n If any value in the index is None or, for strings, blank, the result is None, None\n indexes is the list of indexes into the case vector'
keys = tuple([case[v] for v in indexes])
for v in keys:
if isinstance(v, str):
if (v.rstrip() == ):
return (None, None)
elif (v is None):
return (None, None)
return (hash(keys), keys) | def hash(self, indexes, case):
'Return a hash of the case according to the indexes in the indexes tuple and the key values.\n \n If any value in the index is None or, for strings, blank, the result is None, None\n indexes is the list of indexes into the case vector'
keys = tuple([case[v] for v in indexes])
for v in keys:
if isinstance(v, str):
if (v.rstrip() == ):
return (None, None)
elif (v is None):
return (None, None)
return (hash(keys), keys)<|docstring|>Return a hash of the case according to the indexes in the indexes tuple and the key values.
If any value in the index is None or, for strings, blank, the result is None, None
indexes is the list of indexes into the case vector<|endoftext|> |
96cf94ab33ddb12bff013b5ce2a4a1570f9c1e6f8da385af2253e28b618d8fd4 | def buildvars(self, ds, by):
'return a tuple of variable indexes for by.\n \n ds is the dataset.\n by is a sequence of variables for matching'
try:
return tuple([ds.varlist[v].index for v in by])
except:
raise ValueError((_('Undefined variable in BY list: %s') % v)) | return a tuple of variable indexes for by.
ds is the dataset.
by is a sequence of variables for matching | src/FUZZY.py | buildvars | IBMPredictiveAnalytics/FUZZY | 1 | python | def buildvars(self, ds, by):
'return a tuple of variable indexes for by.\n \n ds is the dataset.\n by is a sequence of variables for matching'
try:
return tuple([ds.varlist[v].index for v in by])
except:
raise ValueError((_('Undefined variable in BY list: %s') % v)) | def buildvars(self, ds, by):
'return a tuple of variable indexes for by.\n \n ds is the dataset.\n by is a sequence of variables for matching'
try:
return tuple([ds.varlist[v].index for v in by])
except:
raise ValueError((_('Undefined variable in BY list: %s') % v))<|docstring|>return a tuple of variable indexes for by.
ds is the dataset.
by is a sequence of variables for matching<|endoftext|> |
618542bdb451665104b2baca717509c1d65cf97b8b2aaa7b2ba0c30cababe8a9 | def __init__(self, logfile, accessmode):
'Enable logging\n \n logfile is the path and name for the log file or None\n accessmode is "overwrite" or "append" '
self.logfile = logfile
if (logfile is not None):
filemode = (((accessmode == 'overwrite') and 'w') or 'a')
logging.basicConfig(filename=logfile, level=logging.INFO, filemode=filemode, format='%(asctime)s: %(message)s', datefmt='%H:%M:%S')
logging.info(('Run started: %s' % time.asctime()))
self.starttime = time.time() | Enable logging
logfile is the path and name for the log file or None
accessmode is "overwrite" or "append" | src/FUZZY.py | __init__ | IBMPredictiveAnalytics/FUZZY | 1 | python | def __init__(self, logfile, accessmode):
'Enable logging\n \n logfile is the path and name for the log file or None\n accessmode is "overwrite" or "append" '
self.logfile = logfile
if (logfile is not None):
filemode = (((accessmode == 'overwrite') and 'w') or 'a')
logging.basicConfig(filename=logfile, level=logging.INFO, filemode=filemode, format='%(asctime)s: %(message)s', datefmt='%H:%M:%S')
logging.info(('Run started: %s' % time.asctime()))
self.starttime = time.time() | def __init__(self, logfile, accessmode):
'Enable logging\n \n logfile is the path and name for the log file or None\n accessmode is "overwrite" or "append" '
self.logfile = logfile
if (logfile is not None):
filemode = (((accessmode == 'overwrite') and 'w') or 'a')
logging.basicConfig(filename=logfile, level=logging.INFO, filemode=filemode, format='%(asctime)s: %(message)s', datefmt='%H:%M:%S')
logging.info(('Run started: %s' % time.asctime()))
self.starttime = time.time()<|docstring|>Enable logging
logfile is the path and name for the log file or None
accessmode is "overwrite" or "append"<|endoftext|> |
cabd5b7566773f94bb99ee8efebc9ef09faddb5ef8f516f6dde41dacc846b403 | def info(self, message):
'Add message to the log if logging'
if self.logfile:
logging.info(message) | Add message to the log if logging | src/FUZZY.py | info | IBMPredictiveAnalytics/FUZZY | 1 | python | def info(self, message):
if self.logfile:
logging.info(message) | def info(self, message):
if self.logfile:
logging.info(message)<|docstring|>Add message to the log if logging<|endoftext|> |
06687d5b393256a7440690732cba09f1ce401b6fd9479156b9efd3e815486c55 | def setup_package():
'\n Runs package setup\n '
setup(**INFO) | Runs package setup | setup.py | setup_package | vishalbelsare/uravu | 19 | python | def setup_package():
'\n \n '
setup(**INFO) | def setup_package():
'\n \n '
setup(**INFO)<|docstring|>Runs package setup<|endoftext|> |
1e6623e8e5472d2976c876185406f141809b22725b817ff2aec479a7a51a2d76 | def formula_str_to_dict(sumform: Union[(str, bytes)]) -> Dict[(str, str)]:
'\n converts an atom name like C12 to the element symbol C\n Use this code to find the atoms while going through the character astream of a sumformula\n e.g. C12H6O3Mn7\n Find two-char atoms, them one-char, and see if numbers are in between.\n '
elements = [x.upper() for x in atoms]
atlist = {}
nums = []
try:
sumform = sumform.upper().replace(' ', '').replace('\n', '').replace('\r', '')
except AttributeError:
print('Error in formula_str_to_dict')
return atlist
def isnumber(el):
for x in el:
if (x.isnumeric() or (x == '.')):
nums.append(x)
else:
break
while sumform:
if (sumform[0:2] in elements):
isnumber(sumform[2:])
atlist[sumform[0:2].capitalize()] = ''.join(nums)
sumform = sumform[(2 + len(nums)):]
nums.clear()
elif (sumform[0] in elements):
isnumber(sumform[1:])
atlist[sumform[0]] = ''.join(nums)
sumform = sumform[(1 + len(nums)):]
nums.clear()
else:
raise KeyError
return atlist | converts an atom name like C12 to the element symbol C
Use this code to find the atoms while going through the character astream of a sumformula
e.g. C12H6O3Mn7
Find two-char atoms, them one-char, and see if numbers are in between. | tools/sumformula.py | formula_str_to_dict | dkratzert/FinalCif | 13 | python | def formula_str_to_dict(sumform: Union[(str, bytes)]) -> Dict[(str, str)]:
'\n converts an atom name like C12 to the element symbol C\n Use this code to find the atoms while going through the character astream of a sumformula\n e.g. C12H6O3Mn7\n Find two-char atoms, them one-char, and see if numbers are in between.\n '
elements = [x.upper() for x in atoms]
atlist = {}
nums = []
try:
sumform = sumform.upper().replace(' ', ).replace('\n', ).replace('\r', )
except AttributeError:
print('Error in formula_str_to_dict')
return atlist
def isnumber(el):
for x in el:
if (x.isnumeric() or (x == '.')):
nums.append(x)
else:
break
while sumform:
if (sumform[0:2] in elements):
isnumber(sumform[2:])
atlist[sumform[0:2].capitalize()] = .join(nums)
sumform = sumform[(2 + len(nums)):]
nums.clear()
elif (sumform[0] in elements):
isnumber(sumform[1:])
atlist[sumform[0]] = .join(nums)
sumform = sumform[(1 + len(nums)):]
nums.clear()
else:
raise KeyError
return atlist | def formula_str_to_dict(sumform: Union[(str, bytes)]) -> Dict[(str, str)]:
'\n converts an atom name like C12 to the element symbol C\n Use this code to find the atoms while going through the character astream of a sumformula\n e.g. C12H6O3Mn7\n Find two-char atoms, them one-char, and see if numbers are in between.\n '
elements = [x.upper() for x in atoms]
atlist = {}
nums = []
try:
sumform = sumform.upper().replace(' ', ).replace('\n', ).replace('\r', )
except AttributeError:
print('Error in formula_str_to_dict')
return atlist
def isnumber(el):
for x in el:
if (x.isnumeric() or (x == '.')):
nums.append(x)
else:
break
while sumform:
if (sumform[0:2] in elements):
isnumber(sumform[2:])
atlist[sumform[0:2].capitalize()] = .join(nums)
sumform = sumform[(2 + len(nums)):]
nums.clear()
elif (sumform[0] in elements):
isnumber(sumform[1:])
atlist[sumform[0]] = .join(nums)
sumform = sumform[(1 + len(nums)):]
nums.clear()
else:
raise KeyError
return atlist<|docstring|>converts an atom name like C12 to the element symbol C
Use this code to find the atoms while going through the character astream of a sumformula
e.g. C12H6O3Mn7
Find two-char atoms, them one-char, and see if numbers are in between.<|endoftext|> |
c3b4cacfd6f1dcf0ecaeed101dcfb0f67d631af2f1c39418c597b62348dacd00 | def sum_formula_to_html(sumform: Dict[(str, str)], break_after: int=99) -> str:
'\n Makes html formatted sum formula from dictionary.\n '
if (not sumform):
return ''
l = ['<html><body>']
num = 0
for el in sumform:
if ((sumform[el] == 0) or (sumform[el] == None)):
continue
try:
times = round(float(sumform[el]), 1)
except (TypeError, ValueError):
times = 1
if ((num > 3) and ((num % break_after) == 0)):
l.append('<br>')
if (times == 1):
l.append('{}'.format(el))
else:
l.append('{}<sub>{:g}</sub>'.format(el, times))
num += 1
l.append('</body></html>')
formula = ''.join(l)
return formula | Makes html formatted sum formula from dictionary. | tools/sumformula.py | sum_formula_to_html | dkratzert/FinalCif | 13 | python | def sum_formula_to_html(sumform: Dict[(str, str)], break_after: int=99) -> str:
'\n \n '
if (not sumform):
return
l = ['<html><body>']
num = 0
for el in sumform:
if ((sumform[el] == 0) or (sumform[el] == None)):
continue
try:
times = round(float(sumform[el]), 1)
except (TypeError, ValueError):
times = 1
if ((num > 3) and ((num % break_after) == 0)):
l.append('<br>')
if (times == 1):
l.append('{}'.format(el))
else:
l.append('{}<sub>{:g}</sub>'.format(el, times))
num += 1
l.append('</body></html>')
formula = .join(l)
return formula | def sum_formula_to_html(sumform: Dict[(str, str)], break_after: int=99) -> str:
'\n \n '
if (not sumform):
return
l = ['<html><body>']
num = 0
for el in sumform:
if ((sumform[el] == 0) or (sumform[el] == None)):
continue
try:
times = round(float(sumform[el]), 1)
except (TypeError, ValueError):
times = 1
if ((num > 3) and ((num % break_after) == 0)):
l.append('<br>')
if (times == 1):
l.append('{}'.format(el))
else:
l.append('{}<sub>{:g}</sub>'.format(el, times))
num += 1
l.append('</body></html>')
formula = .join(l)
return formula<|docstring|>Makes html formatted sum formula from dictionary.<|endoftext|> |
fcad6fa5cd38035a4ccad94f5b68e4af3dcc828553ae0131859df4dd62cc8a19 | def reset_NGLsettings():
'\n Reset NGL settings to their default values as specified in the phil definition string\n '
NGLparams = NGLmaster_phil.fetch(source=libtbx.phil.parse(ngl_philstr)).extract() | Reset NGL settings to their default values as specified in the phil definition string | crys3d/hklview/jsview_3d.py | reset_NGLsettings | indu-in/cctbx_project1 | 2 | python | def reset_NGLsettings():
'\n \n '
NGLparams = NGLmaster_phil.fetch(source=libtbx.phil.parse(ngl_philstr)).extract() | def reset_NGLsettings():
'\n \n '
NGLparams = NGLmaster_phil.fetch(source=libtbx.phil.parse(ngl_philstr)).extract()<|docstring|>Reset NGL settings to their default values as specified in the phil definition string<|endoftext|> |
b87f14a301b171c15d607a7f40aceac53ab23ec49260d798cd13d7e9dec780f3 | def NGLsettings():
'\n Get a global phil parameters object containing some NGL settings\n '
return NGLparams | Get a global phil parameters object containing some NGL settings | crys3d/hklview/jsview_3d.py | NGLsettings | indu-in/cctbx_project1 | 2 | python | def NGLsettings():
'\n \n '
return NGLparams | def NGLsettings():
'\n \n '
return NGLparams<|docstring|>Get a global phil parameters object containing some NGL settings<|endoftext|> |
08c5198a9c4085abf3c8b55b26611b18fc1d8d24a5f7b97d106997c52aee6a52 | def AddVector(self, s1, s2, s3, t1, t2, t3, isreciprocal=True, label='', r=0, g=0, b=0, name=''):
'\n Place vector from {s1, s2, s3] to [t1, t2, t3] with colour r,g,b and label\n If name=="" creation is deferred until AddVector is eventually called with name != ""\n These vectors are then joined in the same NGL representation\n '
uc = self.miller_array.unit_cell()
vec1 = ((s1 * self.scene.renderscale), (s2 * self.scene.renderscale), (s3 * self.scene.renderscale))
vec2 = ((t1 * self.scene.renderscale), (t2 * self.scene.renderscale), (t3 * self.scene.renderscale))
if isreciprocal:
vec1 = list((vec1 * matrix.sqr(uc.fractionalization_matrix()).transpose()))
vec2 = list((vec2 * matrix.sqr(uc.fractionalization_matrix()).transpose()))
svec1 = [vec1[0], vec1[1], vec1[2]]
svec2 = [vec2[0], vec2[1], vec2[2]]
else:
vec1 = list((vec1 * matrix.sqr(uc.orthogonalization_matrix())))
vec2 = list((vec2 * matrix.sqr(uc.orthogonalization_matrix())))
vscale = 1.0
svec1 = [(vscale * vec1[0]), (vscale * vec1[1]), (vscale * vec1[2])]
svec2 = [(vscale * vec2[0]), (vscale * vec2[1]), (vscale * vec2[2])]
self.mprint(('cartesian vector is: %s to %s' % (str(roundoff(svec1)), str(roundoff(svec2)))), verbose=2)
svec = [(svec2[0] - svec1[0]), (svec2[1] - svec1[1]), (svec2[2] - svec1[2])]
xyvec = svec[:]
xyvec[2] = 0.0
xyvecnorm = math.sqrt(((xyvec[0] * xyvec[0]) + (xyvec[1] * xyvec[1])))
if (xyvecnorm > 0.0):
angle_x_xyvec = ((math.acos((xyvec[0] / xyvecnorm)) * 180.0) / math.pi)
angle_y_xyvec = ((math.acos((xyvec[1] / xyvecnorm)) * 180.0) / math.pi)
else:
angle_x_xyvec = 90.0
angle_y_xyvec = 90.0
yzvec = svec[:]
yzvec[0] = 0.0
yzvecnorm = math.sqrt(((yzvec[1] * yzvec[1]) + (yzvec[2] * yzvec[2])))
if (yzvecnorm > 0.0):
angle_y_yzvec = ((math.acos((yzvec[1] / yzvecnorm)) * 180.0) / math.pi)
angle_z_yzvec = ((math.acos((yzvec[2] / yzvecnorm)) * 180.0) / math.pi)
else:
angle_y_yzvec = 90.0
angle_z_yzvec = 90.0
svecnorm = math.sqrt((((svec[0] * svec[0]) + (svec[1] * svec[1])) + (svec[2] * svec[2])))
angle_x_svec = ((math.acos((svec[0] / svecnorm)) * 180.0) / math.pi)
angle_y_svec = ((math.acos((svec[1] / svecnorm)) * 180.0) / math.pi)
angle_z_svec = ((math.acos((svec[2] / svecnorm)) * 180.0) / math.pi)
if (angle_y_svec > 90.0):
angle_x_xyvec = (- angle_x_xyvec)
self.mprint(('angles in xy plane to x,y axis are: %s, %s' % (angle_x_xyvec, angle_y_xyvec)), verbose=2)
self.mprint(('angles in yz plane to y,z axis are: %s, %s' % (angle_y_yzvec, angle_z_yzvec)), verbose=2)
self.mprint(('angles to x,y,z axis are: %s, %s, %s' % (angle_x_svec, angle_y_svec, angle_z_svec)), verbose=2)
self.mprint(('deferred rendering vector from (%s, %s, %s) to (%s, %s, %s)' % (s1, s2, s3, t1, t2, t3)), verbose=2)
self.AddToBrowserMsgQueue('AddVector', ('%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s' % tuple(((svec1 + svec2) + [r, g, b, label, name]))))
return (angle_x_xyvec, angle_z_svec) | Place vector from {s1, s2, s3] to [t1, t2, t3] with colour r,g,b and label
If name=="" creation is deferred until AddVector is eventually called with name != ""
These vectors are then joined in the same NGL representation | crys3d/hklview/jsview_3d.py | AddVector | indu-in/cctbx_project1 | 2 | python | def AddVector(self, s1, s2, s3, t1, t2, t3, isreciprocal=True, label=, r=0, g=0, b=0, name=):
'\n Place vector from {s1, s2, s3] to [t1, t2, t3] with colour r,g,b and label\n If name== creation is deferred until AddVector is eventually called with name != \n These vectors are then joined in the same NGL representation\n '
uc = self.miller_array.unit_cell()
vec1 = ((s1 * self.scene.renderscale), (s2 * self.scene.renderscale), (s3 * self.scene.renderscale))
vec2 = ((t1 * self.scene.renderscale), (t2 * self.scene.renderscale), (t3 * self.scene.renderscale))
if isreciprocal:
vec1 = list((vec1 * matrix.sqr(uc.fractionalization_matrix()).transpose()))
vec2 = list((vec2 * matrix.sqr(uc.fractionalization_matrix()).transpose()))
svec1 = [vec1[0], vec1[1], vec1[2]]
svec2 = [vec2[0], vec2[1], vec2[2]]
else:
vec1 = list((vec1 * matrix.sqr(uc.orthogonalization_matrix())))
vec2 = list((vec2 * matrix.sqr(uc.orthogonalization_matrix())))
vscale = 1.0
svec1 = [(vscale * vec1[0]), (vscale * vec1[1]), (vscale * vec1[2])]
svec2 = [(vscale * vec2[0]), (vscale * vec2[1]), (vscale * vec2[2])]
self.mprint(('cartesian vector is: %s to %s' % (str(roundoff(svec1)), str(roundoff(svec2)))), verbose=2)
svec = [(svec2[0] - svec1[0]), (svec2[1] - svec1[1]), (svec2[2] - svec1[2])]
xyvec = svec[:]
xyvec[2] = 0.0
xyvecnorm = math.sqrt(((xyvec[0] * xyvec[0]) + (xyvec[1] * xyvec[1])))
if (xyvecnorm > 0.0):
angle_x_xyvec = ((math.acos((xyvec[0] / xyvecnorm)) * 180.0) / math.pi)
angle_y_xyvec = ((math.acos((xyvec[1] / xyvecnorm)) * 180.0) / math.pi)
else:
angle_x_xyvec = 90.0
angle_y_xyvec = 90.0
yzvec = svec[:]
yzvec[0] = 0.0
yzvecnorm = math.sqrt(((yzvec[1] * yzvec[1]) + (yzvec[2] * yzvec[2])))
if (yzvecnorm > 0.0):
angle_y_yzvec = ((math.acos((yzvec[1] / yzvecnorm)) * 180.0) / math.pi)
angle_z_yzvec = ((math.acos((yzvec[2] / yzvecnorm)) * 180.0) / math.pi)
else:
angle_y_yzvec = 90.0
angle_z_yzvec = 90.0
svecnorm = math.sqrt((((svec[0] * svec[0]) + (svec[1] * svec[1])) + (svec[2] * svec[2])))
angle_x_svec = ((math.acos((svec[0] / svecnorm)) * 180.0) / math.pi)
angle_y_svec = ((math.acos((svec[1] / svecnorm)) * 180.0) / math.pi)
angle_z_svec = ((math.acos((svec[2] / svecnorm)) * 180.0) / math.pi)
if (angle_y_svec > 90.0):
angle_x_xyvec = (- angle_x_xyvec)
self.mprint(('angles in xy plane to x,y axis are: %s, %s' % (angle_x_xyvec, angle_y_xyvec)), verbose=2)
self.mprint(('angles in yz plane to y,z axis are: %s, %s' % (angle_y_yzvec, angle_z_yzvec)), verbose=2)
self.mprint(('angles to x,y,z axis are: %s, %s, %s' % (angle_x_svec, angle_y_svec, angle_z_svec)), verbose=2)
self.mprint(('deferred rendering vector from (%s, %s, %s) to (%s, %s, %s)' % (s1, s2, s3, t1, t2, t3)), verbose=2)
self.AddToBrowserMsgQueue('AddVector', ('%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s' % tuple(((svec1 + svec2) + [r, g, b, label, name]))))
return (angle_x_xyvec, angle_z_svec) | def AddVector(self, s1, s2, s3, t1, t2, t3, isreciprocal=True, label=, r=0, g=0, b=0, name=):
'\n Place vector from {s1, s2, s3] to [t1, t2, t3] with colour r,g,b and label\n If name== creation is deferred until AddVector is eventually called with name != \n These vectors are then joined in the same NGL representation\n '
uc = self.miller_array.unit_cell()
vec1 = ((s1 * self.scene.renderscale), (s2 * self.scene.renderscale), (s3 * self.scene.renderscale))
vec2 = ((t1 * self.scene.renderscale), (t2 * self.scene.renderscale), (t3 * self.scene.renderscale))
if isreciprocal:
vec1 = list((vec1 * matrix.sqr(uc.fractionalization_matrix()).transpose()))
vec2 = list((vec2 * matrix.sqr(uc.fractionalization_matrix()).transpose()))
svec1 = [vec1[0], vec1[1], vec1[2]]
svec2 = [vec2[0], vec2[1], vec2[2]]
else:
vec1 = list((vec1 * matrix.sqr(uc.orthogonalization_matrix())))
vec2 = list((vec2 * matrix.sqr(uc.orthogonalization_matrix())))
vscale = 1.0
svec1 = [(vscale * vec1[0]), (vscale * vec1[1]), (vscale * vec1[2])]
svec2 = [(vscale * vec2[0]), (vscale * vec2[1]), (vscale * vec2[2])]
self.mprint(('cartesian vector is: %s to %s' % (str(roundoff(svec1)), str(roundoff(svec2)))), verbose=2)
svec = [(svec2[0] - svec1[0]), (svec2[1] - svec1[1]), (svec2[2] - svec1[2])]
xyvec = svec[:]
xyvec[2] = 0.0
xyvecnorm = math.sqrt(((xyvec[0] * xyvec[0]) + (xyvec[1] * xyvec[1])))
if (xyvecnorm > 0.0):
angle_x_xyvec = ((math.acos((xyvec[0] / xyvecnorm)) * 180.0) / math.pi)
angle_y_xyvec = ((math.acos((xyvec[1] / xyvecnorm)) * 180.0) / math.pi)
else:
angle_x_xyvec = 90.0
angle_y_xyvec = 90.0
yzvec = svec[:]
yzvec[0] = 0.0
yzvecnorm = math.sqrt(((yzvec[1] * yzvec[1]) + (yzvec[2] * yzvec[2])))
if (yzvecnorm > 0.0):
angle_y_yzvec = ((math.acos((yzvec[1] / yzvecnorm)) * 180.0) / math.pi)
angle_z_yzvec = ((math.acos((yzvec[2] / yzvecnorm)) * 180.0) / math.pi)
else:
angle_y_yzvec = 90.0
angle_z_yzvec = 90.0
svecnorm = math.sqrt((((svec[0] * svec[0]) + (svec[1] * svec[1])) + (svec[2] * svec[2])))
angle_x_svec = ((math.acos((svec[0] / svecnorm)) * 180.0) / math.pi)
angle_y_svec = ((math.acos((svec[1] / svecnorm)) * 180.0) / math.pi)
angle_z_svec = ((math.acos((svec[2] / svecnorm)) * 180.0) / math.pi)
if (angle_y_svec > 90.0):
angle_x_xyvec = (- angle_x_xyvec)
self.mprint(('angles in xy plane to x,y axis are: %s, %s' % (angle_x_xyvec, angle_y_xyvec)), verbose=2)
self.mprint(('angles in yz plane to y,z axis are: %s, %s' % (angle_y_yzvec, angle_z_yzvec)), verbose=2)
self.mprint(('angles to x,y,z axis are: %s, %s, %s' % (angle_x_svec, angle_y_svec, angle_z_svec)), verbose=2)
self.mprint(('deferred rendering vector from (%s, %s, %s) to (%s, %s, %s)' % (s1, s2, s3, t1, t2, t3)), verbose=2)
self.AddToBrowserMsgQueue('AddVector', ('%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s' % tuple(((svec1 + svec2) + [r, g, b, label, name]))))
return (angle_x_xyvec, angle_z_svec)<|docstring|>Place vector from {s1, s2, s3] to [t1, t2, t3] with colour r,g,b and label
If name=="" creation is deferred until AddVector is eventually called with name != ""
These vectors are then joined in the same NGL representation<|endoftext|> |
9eae436ac6a0e16e5868816f20144ac114521d76289d64d116ed8dff8c690024 | def getParser(self):
'\n setup my argument parser\n \n sets self.parser as a side effect\n \n Returns:\n ArgumentParser: the argument parser\n '
parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('-l', '--login', dest='login', action='store_true', help='login to source wiki for access permission')
parser.add_argument('-s', '--source', dest='source', help='source wiki id', required=True)
parser.add_argument('-p', '--pages', dest='pages', nargs='+', help='Names of the pages the action should be applied to')
parser.add_argument('--wikiTextPath', dest='backupPath', help='Path to store/update the wiki entries', required=False)
parser.add_argument('--listFile', dest='file_list', help='List of pages from which the data should be extracted', required=False)
parser.add_argument('-t', '--template', dest='template', help='Select a template (entity) to user for rendering/filtering')
parser.add_argument('-stdin', dest='stdin', action='store_true', help='Use the input from STD IN using pipes')
parser.add_argument('--debug', dest='debug', action='store_true', default=False, help='Enable debug mode')
self.parser = parser
return parser | setup my argument parser
sets self.parser as a side effect
Returns:
ArgumentParser: the argument parser | wikifile/cmdline.py | getParser | tholzheim/wikirender | 0 | python | def getParser(self):
'\n setup my argument parser\n \n sets self.parser as a side effect\n \n Returns:\n ArgumentParser: the argument parser\n '
parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('-l', '--login', dest='login', action='store_true', help='login to source wiki for access permission')
parser.add_argument('-s', '--source', dest='source', help='source wiki id', required=True)
parser.add_argument('-p', '--pages', dest='pages', nargs='+', help='Names of the pages the action should be applied to')
parser.add_argument('--wikiTextPath', dest='backupPath', help='Path to store/update the wiki entries', required=False)
parser.add_argument('--listFile', dest='file_list', help='List of pages from which the data should be extracted', required=False)
parser.add_argument('-t', '--template', dest='template', help='Select a template (entity) to user for rendering/filtering')
parser.add_argument('-stdin', dest='stdin', action='store_true', help='Use the input from STD IN using pipes')
parser.add_argument('--debug', dest='debug', action='store_true', default=False, help='Enable debug mode')
self.parser = parser
return parser | def getParser(self):
'\n setup my argument parser\n \n sets self.parser as a side effect\n \n Returns:\n ArgumentParser: the argument parser\n '
parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('-l', '--login', dest='login', action='store_true', help='login to source wiki for access permission')
parser.add_argument('-s', '--source', dest='source', help='source wiki id', required=True)
parser.add_argument('-p', '--pages', dest='pages', nargs='+', help='Names of the pages the action should be applied to')
parser.add_argument('--wikiTextPath', dest='backupPath', help='Path to store/update the wiki entries', required=False)
parser.add_argument('--listFile', dest='file_list', help='List of pages from which the data should be extracted', required=False)
parser.add_argument('-t', '--template', dest='template', help='Select a template (entity) to user for rendering/filtering')
parser.add_argument('-stdin', dest='stdin', action='store_true', help='Use the input from STD IN using pipes')
parser.add_argument('--debug', dest='debug', action='store_true', default=False, help='Enable debug mode')
self.parser = parser
return parser<|docstring|>setup my argument parser
sets self.parser as a side effect
Returns:
ArgumentParser: the argument parser<|endoftext|> |
a2256bf427f8952e5269f4b40bbec10d8aa5dbd99659597e54a989e0b8860155 | def initLogging(self, args):
'\n initialize the logging\n '
if args.debug:
logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
else:
logging.basicConfig(stream=sys.stdout, level=logging.INFO) | initialize the logging | wikifile/cmdline.py | initLogging | tholzheim/wikirender | 0 | python | def initLogging(self, args):
'\n \n '
if args.debug:
logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
else:
logging.basicConfig(stream=sys.stdout, level=logging.INFO) | def initLogging(self, args):
'\n \n '
if args.debug:
logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
else:
logging.basicConfig(stream=sys.stdout, level=logging.INFO)<|docstring|>initialize the logging<|endoftext|> |
44feb95bc9d34370b43f0ee9724a4ca59be0a8b5e059901a740065c1788fde36 | def getPageTitlesForArgs(self, args):
'\n see also wikirestore in wikipush of py-3rdparty-mediawiki\n \n Args:\n args(): parsed arguments\n \n Returns:\n List of pageTitles as specified\n '
page_titles = args.pages
stdIn = args.stdin
file_list = args.file_list
file_parameters = [args.stdin, args.pages, args.file_list]
if ((len(file_parameters) - (file_parameters.count(None) + file_parameters.count(False))) > 1):
logging.error('Multiple file selection options were used. Please use only one or none to select all files in the backup folder.')
raise Exception('Invalid parameters')
if stdIn:
page_titles = sys.stdin.readlines()
pageTitlesfix = []
for page in page_titles:
pageTitlesfix.append(page)
page_titles = pageTitlesfix
elif (file_list is not None):
f = open(file_list, 'r')
allx = f.readlines()
page_titles = []
for page in allx:
page_titles.append(page)
elif (page_titles is None):
page_titles = CmdLineAble.getPageTitlesForWikiTextPath(args.backupPath)
total = len(page_titles)
logging.debug(f'extracting templates from {total} wikifiles.')
return page_titles | see also wikirestore in wikipush of py-3rdparty-mediawiki
Args:
args(): parsed arguments
Returns:
List of pageTitles as specified | wikifile/cmdline.py | getPageTitlesForArgs | tholzheim/wikirender | 0 | python | def getPageTitlesForArgs(self, args):
'\n see also wikirestore in wikipush of py-3rdparty-mediawiki\n \n Args:\n args(): parsed arguments\n \n Returns:\n List of pageTitles as specified\n '
page_titles = args.pages
stdIn = args.stdin
file_list = args.file_list
file_parameters = [args.stdin, args.pages, args.file_list]
if ((len(file_parameters) - (file_parameters.count(None) + file_parameters.count(False))) > 1):
logging.error('Multiple file selection options were used. Please use only one or none to select all files in the backup folder.')
raise Exception('Invalid parameters')
if stdIn:
page_titles = sys.stdin.readlines()
pageTitlesfix = []
for page in page_titles:
pageTitlesfix.append(page)
page_titles = pageTitlesfix
elif (file_list is not None):
f = open(file_list, 'r')
allx = f.readlines()
page_titles = []
for page in allx:
page_titles.append(page)
elif (page_titles is None):
page_titles = CmdLineAble.getPageTitlesForWikiTextPath(args.backupPath)
total = len(page_titles)
logging.debug(f'extracting templates from {total} wikifiles.')
return page_titles | def getPageTitlesForArgs(self, args):
'\n see also wikirestore in wikipush of py-3rdparty-mediawiki\n \n Args:\n args(): parsed arguments\n \n Returns:\n List of pageTitles as specified\n '
page_titles = args.pages
stdIn = args.stdin
file_list = args.file_list
file_parameters = [args.stdin, args.pages, args.file_list]
if ((len(file_parameters) - (file_parameters.count(None) + file_parameters.count(False))) > 1):
logging.error('Multiple file selection options were used. Please use only one or none to select all files in the backup folder.')
raise Exception('Invalid parameters')
if stdIn:
page_titles = sys.stdin.readlines()
pageTitlesfix = []
for page in page_titles:
pageTitlesfix.append(page)
page_titles = pageTitlesfix
elif (file_list is not None):
f = open(file_list, 'r')
allx = f.readlines()
page_titles = []
for page in allx:
page_titles.append(page)
elif (page_titles is None):
page_titles = CmdLineAble.getPageTitlesForWikiTextPath(args.backupPath)
total = len(page_titles)
logging.debug(f'extracting templates from {total} wikifiles.')
return page_titles<|docstring|>see also wikirestore in wikipush of py-3rdparty-mediawiki
Args:
args(): parsed arguments
Returns:
List of pageTitles as specified<|endoftext|> |
575db3f0253b2e3287c313d533ba34b0087c70729e2ccef88b1d52c18e1bf91e | @staticmethod
def getPageTitlesForWikiTextPath(backup_path: str) -> list:
'\n get the page titles for the given backupPath\n \n Args: \n backup_path(str): the path to the WikiText Files (e.g. created by wikibackup)\n \n Returns:\n list: a list of PageTitles\n '
page_titles = []
if backup_path:
for (path, _subdirs, files) in os.walk(backup_path):
for name in files:
filename = os.path.join(path, name)[(len(backup_path) + 1):]
if filename.endswith('.wiki'):
page_titles.append(filename[:(- len('.wiki'))])
return page_titles | get the page titles for the given backupPath
Args:
backup_path(str): the path to the WikiText Files (e.g. created by wikibackup)
Returns:
list: a list of PageTitles | wikifile/cmdline.py | getPageTitlesForWikiTextPath | tholzheim/wikirender | 0 | python | @staticmethod
def getPageTitlesForWikiTextPath(backup_path: str) -> list:
'\n get the page titles for the given backupPath\n \n Args: \n backup_path(str): the path to the WikiText Files (e.g. created by wikibackup)\n \n Returns:\n list: a list of PageTitles\n '
page_titles = []
if backup_path:
for (path, _subdirs, files) in os.walk(backup_path):
for name in files:
filename = os.path.join(path, name)[(len(backup_path) + 1):]
if filename.endswith('.wiki'):
page_titles.append(filename[:(- len('.wiki'))])
return page_titles | @staticmethod
def getPageTitlesForWikiTextPath(backup_path: str) -> list:
'\n get the page titles for the given backupPath\n \n Args: \n backup_path(str): the path to the WikiText Files (e.g. created by wikibackup)\n \n Returns:\n list: a list of PageTitles\n '
page_titles = []
if backup_path:
for (path, _subdirs, files) in os.walk(backup_path):
for name in files:
filename = os.path.join(path, name)[(len(backup_path) + 1):]
if filename.endswith('.wiki'):
page_titles.append(filename[:(- len('.wiki'))])
return page_titles<|docstring|>get the page titles for the given backupPath
Args:
backup_path(str): the path to the WikiText Files (e.g. created by wikibackup)
Returns:
list: a list of PageTitles<|endoftext|> |
2555f609189c9c07eabb45c4a275bb9fb8e88543638a086d579f19849504d18e | @classmethod
def _parse_list(cls, data, sub_item=False):
'Parse a list of JSON objects into a result set of model instances.'
results = ResultSet()
data = (data or [])
for obj in data:
if obj:
results.append(cls._parse(obj, sub_item=sub_item))
return results | Parse a list of JSON objects into a result set of model instances. | musixmatch/models.py | _parse_list | yakupadakli/python-musixmatch | 3 | python | @classmethod
def _parse_list(cls, data, sub_item=False):
results = ResultSet()
data = (data or [])
for obj in data:
if obj:
results.append(cls._parse(obj, sub_item=sub_item))
return results | @classmethod
def _parse_list(cls, data, sub_item=False):
results = ResultSet()
data = (data or [])
for obj in data:
if obj:
results.append(cls._parse(obj, sub_item=sub_item))
return results<|docstring|>Parse a list of JSON objects into a result set of model instances.<|endoftext|> |
847970a9ef0781a994754c5c28d08c5cd0c32917af55dabe071b52490bdab1b1 | def circles(self, x, y, s, c='b', vmin=None, vmax=None, **kwargs):
"\n See https://gist.github.com/syrte/592a062c562cd2a98a83 \n\n Make a scatter plot of circles. \n Similar to plt.scatter, but the size of circles are in data scale.\n Parameters\n ----------\n x, y : scalar or array_like, shape (n, )\n Input data\n s : scalar or array_like, shape (n, ) \n Radius of circles.\n c : color or sequence of color, optional, default : 'b'\n `c` can be a single color format string, or a sequence of color\n specifications of length `N`, or a sequence of `N` numbers to be\n mapped to colors using the `cmap` and `norm` specified via kwargs.\n Note that `c` should not be a single numeric RGB or RGBA sequence \n because that is indistinguishable from an array of values\n to be colormapped. (If you insist, use `color` instead.) \n `c` can be a 2-D array in which the rows are RGB or RGBA, however. \n vmin, vmax : scalar, optional, default: None\n `vmin` and `vmax` are used in conjunction with `norm` to normalize\n luminance data. If either are `None`, the min and max of the\n color array is used.\n kwargs : `~matplotlib.collections.Collection` properties\n Eg. alpha, edgecolor(ec), facecolor(fc), linewidth(lw), linestyle(ls), \n norm, cmap, transform, etc.\n Returns\n -------\n paths : `~matplotlib.collections.PathCollection`\n Examples\n --------\n a = np.arange(11)\n circles(a, a, s=a*0.2, c=a, alpha=0.5, ec='none')\n plt.colorbar()\n License\n --------\n This code is under [The BSD 3-Clause License]\n (http://opensource.org/licenses/BSD-3-Clause)\n "
if np.isscalar(c):
kwargs.setdefault('color', c)
c = None
if ('fc' in kwargs):
kwargs.setdefault('facecolor', kwargs.pop('fc'))
if ('ec' in kwargs):
kwargs.setdefault('edgecolor', kwargs.pop('ec'))
if ('ls' in kwargs):
kwargs.setdefault('linestyle', kwargs.pop('ls'))
if ('lw' in kwargs):
kwargs.setdefault('linewidth', kwargs.pop('lw'))
zipped = np.broadcast(x, y, s)
patches = [Circle((x_, y_), s_) for (x_, y_, s_) in zipped]
collection = PatchCollection(patches, **kwargs)
if (c is not None):
c = np.broadcast_to(c, zipped.shape).ravel()
collection.set_array(c)
collection.set_clim(vmin, vmax)
ax = plt.gca()
ax.add_collection(collection)
ax.autoscale_view()
plt.draw_if_interactive()
if (c is not None):
plt.sci(collection)
return collection | See https://gist.github.com/syrte/592a062c562cd2a98a83
Make a scatter plot of circles.
Similar to plt.scatter, but the size of circles are in data scale.
Parameters
----------
x, y : scalar or array_like, shape (n, )
Input data
s : scalar or array_like, shape (n, )
Radius of circles.
c : color or sequence of color, optional, default : 'b'
`c` can be a single color format string, or a sequence of color
specifications of length `N`, or a sequence of `N` numbers to be
mapped to colors using the `cmap` and `norm` specified via kwargs.
Note that `c` should not be a single numeric RGB or RGBA sequence
because that is indistinguishable from an array of values
to be colormapped. (If you insist, use `color` instead.)
`c` can be a 2-D array in which the rows are RGB or RGBA, however.
vmin, vmax : scalar, optional, default: None
`vmin` and `vmax` are used in conjunction with `norm` to normalize
luminance data. If either are `None`, the min and max of the
color array is used.
kwargs : `~matplotlib.collections.Collection` properties
Eg. alpha, edgecolor(ec), facecolor(fc), linewidth(lw), linestyle(ls),
norm, cmap, transform, etc.
Returns
-------
paths : `~matplotlib.collections.PathCollection`
Examples
--------
a = np.arange(11)
circles(a, a, s=a*0.2, c=a, alpha=0.5, ec='none')
plt.colorbar()
License
--------
This code is under [The BSD 3-Clause License]
(http://opensource.org/licenses/BSD-3-Clause) | bin/svg.py | circles | rheiland/pc4training | 6 | python | def circles(self, x, y, s, c='b', vmin=None, vmax=None, **kwargs):
"\n See https://gist.github.com/syrte/592a062c562cd2a98a83 \n\n Make a scatter plot of circles. \n Similar to plt.scatter, but the size of circles are in data scale.\n Parameters\n ----------\n x, y : scalar or array_like, shape (n, )\n Input data\n s : scalar or array_like, shape (n, ) \n Radius of circles.\n c : color or sequence of color, optional, default : 'b'\n `c` can be a single color format string, or a sequence of color\n specifications of length `N`, or a sequence of `N` numbers to be\n mapped to colors using the `cmap` and `norm` specified via kwargs.\n Note that `c` should not be a single numeric RGB or RGBA sequence \n because that is indistinguishable from an array of values\n to be colormapped. (If you insist, use `color` instead.) \n `c` can be a 2-D array in which the rows are RGB or RGBA, however. \n vmin, vmax : scalar, optional, default: None\n `vmin` and `vmax` are used in conjunction with `norm` to normalize\n luminance data. If either are `None`, the min and max of the\n color array is used.\n kwargs : `~matplotlib.collections.Collection` properties\n Eg. alpha, edgecolor(ec), facecolor(fc), linewidth(lw), linestyle(ls), \n norm, cmap, transform, etc.\n Returns\n -------\n paths : `~matplotlib.collections.PathCollection`\n Examples\n --------\n a = np.arange(11)\n circles(a, a, s=a*0.2, c=a, alpha=0.5, ec='none')\n plt.colorbar()\n License\n --------\n This code is under [The BSD 3-Clause License]\n (http://opensource.org/licenses/BSD-3-Clause)\n "
if np.isscalar(c):
kwargs.setdefault('color', c)
c = None
if ('fc' in kwargs):
kwargs.setdefault('facecolor', kwargs.pop('fc'))
if ('ec' in kwargs):
kwargs.setdefault('edgecolor', kwargs.pop('ec'))
if ('ls' in kwargs):
kwargs.setdefault('linestyle', kwargs.pop('ls'))
if ('lw' in kwargs):
kwargs.setdefault('linewidth', kwargs.pop('lw'))
zipped = np.broadcast(x, y, s)
patches = [Circle((x_, y_), s_) for (x_, y_, s_) in zipped]
collection = PatchCollection(patches, **kwargs)
if (c is not None):
c = np.broadcast_to(c, zipped.shape).ravel()
collection.set_array(c)
collection.set_clim(vmin, vmax)
ax = plt.gca()
ax.add_collection(collection)
ax.autoscale_view()
plt.draw_if_interactive()
if (c is not None):
plt.sci(collection)
return collection | def circles(self, x, y, s, c='b', vmin=None, vmax=None, **kwargs):
"\n See https://gist.github.com/syrte/592a062c562cd2a98a83 \n\n Make a scatter plot of circles. \n Similar to plt.scatter, but the size of circles are in data scale.\n Parameters\n ----------\n x, y : scalar or array_like, shape (n, )\n Input data\n s : scalar or array_like, shape (n, ) \n Radius of circles.\n c : color or sequence of color, optional, default : 'b'\n `c` can be a single color format string, or a sequence of color\n specifications of length `N`, or a sequence of `N` numbers to be\n mapped to colors using the `cmap` and `norm` specified via kwargs.\n Note that `c` should not be a single numeric RGB or RGBA sequence \n because that is indistinguishable from an array of values\n to be colormapped. (If you insist, use `color` instead.) \n `c` can be a 2-D array in which the rows are RGB or RGBA, however. \n vmin, vmax : scalar, optional, default: None\n `vmin` and `vmax` are used in conjunction with `norm` to normalize\n luminance data. If either are `None`, the min and max of the\n color array is used.\n kwargs : `~matplotlib.collections.Collection` properties\n Eg. alpha, edgecolor(ec), facecolor(fc), linewidth(lw), linestyle(ls), \n norm, cmap, transform, etc.\n Returns\n -------\n paths : `~matplotlib.collections.PathCollection`\n Examples\n --------\n a = np.arange(11)\n circles(a, a, s=a*0.2, c=a, alpha=0.5, ec='none')\n plt.colorbar()\n License\n --------\n This code is under [The BSD 3-Clause License]\n (http://opensource.org/licenses/BSD-3-Clause)\n "
if np.isscalar(c):
kwargs.setdefault('color', c)
c = None
if ('fc' in kwargs):
kwargs.setdefault('facecolor', kwargs.pop('fc'))
if ('ec' in kwargs):
kwargs.setdefault('edgecolor', kwargs.pop('ec'))
if ('ls' in kwargs):
kwargs.setdefault('linestyle', kwargs.pop('ls'))
if ('lw' in kwargs):
kwargs.setdefault('linewidth', kwargs.pop('lw'))
zipped = np.broadcast(x, y, s)
patches = [Circle((x_, y_), s_) for (x_, y_, s_) in zipped]
collection = PatchCollection(patches, **kwargs)
if (c is not None):
c = np.broadcast_to(c, zipped.shape).ravel()
collection.set_array(c)
collection.set_clim(vmin, vmax)
ax = plt.gca()
ax.add_collection(collection)
ax.autoscale_view()
plt.draw_if_interactive()
if (c is not None):
plt.sci(collection)
return collection<|docstring|>See https://gist.github.com/syrte/592a062c562cd2a98a83
Make a scatter plot of circles.
Similar to plt.scatter, but the size of circles are in data scale.
Parameters
----------
x, y : scalar or array_like, shape (n, )
Input data
s : scalar or array_like, shape (n, )
Radius of circles.
c : color or sequence of color, optional, default : 'b'
`c` can be a single color format string, or a sequence of color
specifications of length `N`, or a sequence of `N` numbers to be
mapped to colors using the `cmap` and `norm` specified via kwargs.
Note that `c` should not be a single numeric RGB or RGBA sequence
because that is indistinguishable from an array of values
to be colormapped. (If you insist, use `color` instead.)
`c` can be a 2-D array in which the rows are RGB or RGBA, however.
vmin, vmax : scalar, optional, default: None
`vmin` and `vmax` are used in conjunction with `norm` to normalize
luminance data. If either are `None`, the min and max of the
color array is used.
kwargs : `~matplotlib.collections.Collection` properties
Eg. alpha, edgecolor(ec), facecolor(fc), linewidth(lw), linestyle(ls),
norm, cmap, transform, etc.
Returns
-------
paths : `~matplotlib.collections.PathCollection`
Examples
--------
a = np.arange(11)
circles(a, a, s=a*0.2, c=a, alpha=0.5, ec='none')
plt.colorbar()
License
--------
This code is under [The BSD 3-Clause License]
(http://opensource.org/licenses/BSD-3-Clause)<|endoftext|> |
92a4f0b6175ae722d0ea34b16cb9a5728a21ea91ba59135afbeba7d401d2dd6c | def compare_json(json1, json2):
'Compares two JSON values for equality'
return JsonType.eq(json1, json2) | Compares two JSON values for equality | nmostesting/TestHelper.py | compare_json | AMWA-TV/nmos-testing | 25 | python | def compare_json(json1, json2):
return JsonType.eq(json1, json2) | def compare_json(json1, json2):
return JsonType.eq(json1, json2)<|docstring|>Compares two JSON values for equality<|endoftext|> |
d69a9ee31ce6766fc36ad730e2077b3663980169ec14bfeadfdcd1124acce389 | def get_default_ip():
"Get this machine's preferred IPv4 address"
if (CONFIG.BIND_INTERFACE is None):
default_gw = netifaces.gateways()['default']
if (netifaces.AF_INET in default_gw):
preferred_interface = default_gw[netifaces.AF_INET][1]
else:
interfaces = netifaces.interfaces()
preferred_interface = next((i for i in interfaces if (i != 'lo')), interfaces[0])
else:
preferred_interface = CONFIG.BIND_INTERFACE
return netifaces.ifaddresses(preferred_interface)[netifaces.AF_INET][0]['addr'] | Get this machine's preferred IPv4 address | nmostesting/TestHelper.py | get_default_ip | AMWA-TV/nmos-testing | 25 | python | def get_default_ip():
if (CONFIG.BIND_INTERFACE is None):
default_gw = netifaces.gateways()['default']
if (netifaces.AF_INET in default_gw):
preferred_interface = default_gw[netifaces.AF_INET][1]
else:
interfaces = netifaces.interfaces()
preferred_interface = next((i for i in interfaces if (i != 'lo')), interfaces[0])
else:
preferred_interface = CONFIG.BIND_INTERFACE
return netifaces.ifaddresses(preferred_interface)[netifaces.AF_INET][0]['addr'] | def get_default_ip():
if (CONFIG.BIND_INTERFACE is None):
default_gw = netifaces.gateways()['default']
if (netifaces.AF_INET in default_gw):
preferred_interface = default_gw[netifaces.AF_INET][1]
else:
interfaces = netifaces.interfaces()
preferred_interface = next((i for i in interfaces if (i != 'lo')), interfaces[0])
else:
preferred_interface = CONFIG.BIND_INTERFACE
return netifaces.ifaddresses(preferred_interface)[netifaces.AF_INET][0]['addr']<|docstring|>Get this machine's preferred IPv4 address<|endoftext|> |
cfe4c762c237c54d02d8bb218e7dba31679aecb40215c4f5ba2945e758748ea7 | def do_request(method, url, **kwargs):
'Perform a basic HTTP request with appropriate error handling'
try:
s = requests.Session()
if (('headers' in kwargs) and (kwargs['headers'] is None)):
del kwargs['headers']
if (CONFIG.ENABLE_AUTH and CONFIG.AUTH_TOKEN and ('headers' not in kwargs)):
req = requests.Request(method, url, headers={'Authorization': ('Bearer ' + CONFIG.AUTH_TOKEN)}, **kwargs)
else:
req = requests.Request(method, url, **kwargs)
prepped = s.prepare_request(req)
settings = s.merge_environment_settings(prepped.url, {}, None, CONFIG.CERT_TRUST_ROOT_CA, None)
response = s.send(prepped, timeout=CONFIG.HTTP_TIMEOUT, **settings)
if prepped.url.startswith('https://'):
if (not response.url.startswith('https://')):
return (False, 'Redirect changed protocol')
if (response.history is not None):
for res in response.history:
if (not res.url.startswith('https://')):
return (False, 'Redirect changed protocol')
return (True, response)
except requests.exceptions.Timeout:
return (False, 'Connection timeout')
except requests.exceptions.TooManyRedirects:
return (False, 'Too many redirects')
except requests.exceptions.ConnectionError as e:
return (False, str(e))
except requests.exceptions.RequestException as e:
return (False, str(e)) | Perform a basic HTTP request with appropriate error handling | nmostesting/TestHelper.py | do_request | AMWA-TV/nmos-testing | 25 | python | def do_request(method, url, **kwargs):
try:
s = requests.Session()
if (('headers' in kwargs) and (kwargs['headers'] is None)):
del kwargs['headers']
if (CONFIG.ENABLE_AUTH and CONFIG.AUTH_TOKEN and ('headers' not in kwargs)):
req = requests.Request(method, url, headers={'Authorization': ('Bearer ' + CONFIG.AUTH_TOKEN)}, **kwargs)
else:
req = requests.Request(method, url, **kwargs)
prepped = s.prepare_request(req)
settings = s.merge_environment_settings(prepped.url, {}, None, CONFIG.CERT_TRUST_ROOT_CA, None)
response = s.send(prepped, timeout=CONFIG.HTTP_TIMEOUT, **settings)
if prepped.url.startswith('https://'):
if (not response.url.startswith('https://')):
return (False, 'Redirect changed protocol')
if (response.history is not None):
for res in response.history:
if (not res.url.startswith('https://')):
return (False, 'Redirect changed protocol')
return (True, response)
except requests.exceptions.Timeout:
return (False, 'Connection timeout')
except requests.exceptions.TooManyRedirects:
return (False, 'Too many redirects')
except requests.exceptions.ConnectionError as e:
return (False, str(e))
except requests.exceptions.RequestException as e:
return (False, str(e)) | def do_request(method, url, **kwargs):
try:
s = requests.Session()
if (('headers' in kwargs) and (kwargs['headers'] is None)):
del kwargs['headers']
if (CONFIG.ENABLE_AUTH and CONFIG.AUTH_TOKEN and ('headers' not in kwargs)):
req = requests.Request(method, url, headers={'Authorization': ('Bearer ' + CONFIG.AUTH_TOKEN)}, **kwargs)
else:
req = requests.Request(method, url, **kwargs)
prepped = s.prepare_request(req)
settings = s.merge_environment_settings(prepped.url, {}, None, CONFIG.CERT_TRUST_ROOT_CA, None)
response = s.send(prepped, timeout=CONFIG.HTTP_TIMEOUT, **settings)
if prepped.url.startswith('https://'):
if (not response.url.startswith('https://')):
return (False, 'Redirect changed protocol')
if (response.history is not None):
for res in response.history:
if (not res.url.startswith('https://')):
return (False, 'Redirect changed protocol')
return (True, response)
except requests.exceptions.Timeout:
return (False, 'Connection timeout')
except requests.exceptions.TooManyRedirects:
return (False, 'Too many redirects')
except requests.exceptions.ConnectionError as e:
return (False, str(e))
except requests.exceptions.RequestException as e:
return (False, str(e))<|docstring|>Perform a basic HTTP request with appropriate error handling<|endoftext|> |
d3929844aae9e7b2f96300d9644c6df89ff00ad376684e0aebb976b9becc983c | def load_resolved_schema(spec_path, file_name=None, schema_obj=None, path_prefix=True):
'\n Parses JSON as well as resolves any `$ref`s, including references to\n local files and remote (HTTP/S) files.\n '
assert (bool(file_name) != bool(schema_obj))
if path_prefix:
spec_path = os.path.join(spec_path, 'APIs/schemas/')
base_path = os.path.abspath(spec_path)
if (not base_path.endswith('/')):
base_path = (base_path + '/')
if (os.name == 'nt'):
base_uri_path = ('file:///' + base_path.replace('\\', '/'))
else:
base_uri_path = ('file://' + base_path)
loader = jsonref.JsonLoader(cache_results=False)
if file_name:
json_file = str((Path(base_path) / file_name))
with open(json_file, 'r') as f:
schema = jsonref.load(f, base_uri=base_uri_path, loader=loader, jsonschema=True)
elif schema_obj:
if has_jsonref(schema_obj):
schema = jsonref.JsonRef.replace_refs(schema_obj, base_uri=base_uri_path, loader=loader, jsonschema=True)
else:
schema = schema_obj
return schema | Parses JSON as well as resolves any `$ref`s, including references to
local files and remote (HTTP/S) files. | nmostesting/TestHelper.py | load_resolved_schema | AMWA-TV/nmos-testing | 25 | python | def load_resolved_schema(spec_path, file_name=None, schema_obj=None, path_prefix=True):
'\n Parses JSON as well as resolves any `$ref`s, including references to\n local files and remote (HTTP/S) files.\n '
assert (bool(file_name) != bool(schema_obj))
if path_prefix:
spec_path = os.path.join(spec_path, 'APIs/schemas/')
base_path = os.path.abspath(spec_path)
if (not base_path.endswith('/')):
base_path = (base_path + '/')
if (os.name == 'nt'):
base_uri_path = ('file:///' + base_path.replace('\\', '/'))
else:
base_uri_path = ('file://' + base_path)
loader = jsonref.JsonLoader(cache_results=False)
if file_name:
json_file = str((Path(base_path) / file_name))
with open(json_file, 'r') as f:
schema = jsonref.load(f, base_uri=base_uri_path, loader=loader, jsonschema=True)
elif schema_obj:
if has_jsonref(schema_obj):
schema = jsonref.JsonRef.replace_refs(schema_obj, base_uri=base_uri_path, loader=loader, jsonschema=True)
else:
schema = schema_obj
return schema | def load_resolved_schema(spec_path, file_name=None, schema_obj=None, path_prefix=True):
'\n Parses JSON as well as resolves any `$ref`s, including references to\n local files and remote (HTTP/S) files.\n '
assert (bool(file_name) != bool(schema_obj))
if path_prefix:
spec_path = os.path.join(spec_path, 'APIs/schemas/')
base_path = os.path.abspath(spec_path)
if (not base_path.endswith('/')):
base_path = (base_path + '/')
if (os.name == 'nt'):
base_uri_path = ('file:///' + base_path.replace('\\', '/'))
else:
base_uri_path = ('file://' + base_path)
loader = jsonref.JsonLoader(cache_results=False)
if file_name:
json_file = str((Path(base_path) / file_name))
with open(json_file, 'r') as f:
schema = jsonref.load(f, base_uri=base_uri_path, loader=loader, jsonschema=True)
elif schema_obj:
if has_jsonref(schema_obj):
schema = jsonref.JsonRef.replace_refs(schema_obj, base_uri=base_uri_path, loader=loader, jsonschema=True)
else:
schema = schema_obj
return schema<|docstring|>Parses JSON as well as resolves any `$ref`s, including references to
local files and remote (HTTP/S) files.<|endoftext|> |
491333cee8ee5c1bdfe69a37e5f0242ad74f7ea0b55cbd1b3497fa21e0e85909 | def __init__(self, ws_href):
'\n Initializer\n :param ws_href: websocket url (string)\n '
if (CONFIG.ENABLE_AUTH and CONFIG.AUTH_TOKEN and ('access_token' not in ws_href)):
if ('?' in ws_href):
ws_href += '&access_token={}'.format(CONFIG.AUTH_TOKEN)
else:
ws_href += '?access_token={}'.format(CONFIG.AUTH_TOKEN)
threading.Thread.__init__(self, daemon=True)
self.ws_href = ws_href
try:
self.ws = websocket.WebSocketApp(ws_href, on_message=self.on_message, on_close=self.on_close, on_open=self.on_open, on_error=self.on_error)
except AttributeError:
print(" * ERROR: You have the wrong Python websocket module installed. Please uninstall 'websocket' and install 'websocket-client'")
raise
self.messages = list()
self.error_occurred = False
self.connected = False
self.error_message = '' | Initializer
:param ws_href: websocket url (string) | nmostesting/TestHelper.py | __init__ | AMWA-TV/nmos-testing | 25 | python | def __init__(self, ws_href):
'\n Initializer\n :param ws_href: websocket url (string)\n '
if (CONFIG.ENABLE_AUTH and CONFIG.AUTH_TOKEN and ('access_token' not in ws_href)):
if ('?' in ws_href):
ws_href += '&access_token={}'.format(CONFIG.AUTH_TOKEN)
else:
ws_href += '?access_token={}'.format(CONFIG.AUTH_TOKEN)
threading.Thread.__init__(self, daemon=True)
self.ws_href = ws_href
try:
self.ws = websocket.WebSocketApp(ws_href, on_message=self.on_message, on_close=self.on_close, on_open=self.on_open, on_error=self.on_error)
except AttributeError:
print(" * ERROR: You have the wrong Python websocket module installed. Please uninstall 'websocket' and install 'websocket-client'")
raise
self.messages = list()
self.error_occurred = False
self.connected = False
self.error_message = | def __init__(self, ws_href):
'\n Initializer\n :param ws_href: websocket url (string)\n '
if (CONFIG.ENABLE_AUTH and CONFIG.AUTH_TOKEN and ('access_token' not in ws_href)):
if ('?' in ws_href):
ws_href += '&access_token={}'.format(CONFIG.AUTH_TOKEN)
else:
ws_href += '?access_token={}'.format(CONFIG.AUTH_TOKEN)
threading.Thread.__init__(self, daemon=True)
self.ws_href = ws_href
try:
self.ws = websocket.WebSocketApp(ws_href, on_message=self.on_message, on_close=self.on_close, on_open=self.on_open, on_error=self.on_error)
except AttributeError:
print(" * ERROR: You have the wrong Python websocket module installed. Please uninstall 'websocket' and install 'websocket-client'")
raise
self.messages = list()
self.error_occurred = False
self.connected = False
self.error_message = <|docstring|>Initializer
:param ws_href: websocket url (string)<|endoftext|> |
edbc5e2219d2f788fd1a342585187ea39157023c4f067f411ef027e63e1b104a | def __init__(self, host, port, secure=False, username=None, password=None, topics=[]):
'\n Initializer\n :param host: broker hostname (string)\n :param port: broker port (int)\n :param secure: use TLS (bool)\n :param username: broker username (string)\n :param password: broker password (string)\n :param topics: list of topics to subscribe to (list of string)\n '
self.host = host
self.port = port
self.error_occurred = False
self.connected = False
self.error_message = ''
self.client = mqtt.Client(protocol=mqtt.MQTTv5)
self.client.on_connect = (lambda client, userdata, flags, rc, properties=None: self.on_connect(flags, rc))
self.client.on_disconnect = (lambda client, userdata, rc: self.on_disconnect(rc))
self.client.on_message = (lambda client, userdata, msg: self.on_message(msg))
self.client.on_subscribe = (lambda client, userdata, mid, *args: self.on_subscribe(mid))
self.client.on_log = (lambda client, userdata, level, buf: self.on_log(level, buf))
if secure:
self.client.tls_set(CONFIG.CERT_TRUST_ROOT_CA)
if (username or password):
self.client.username_pw_set(username, password)
self.topics = topics
self.pending_subs = set()
self.messages = [] | Initializer
:param host: broker hostname (string)
:param port: broker port (int)
:param secure: use TLS (bool)
:param username: broker username (string)
:param password: broker password (string)
:param topics: list of topics to subscribe to (list of string) | nmostesting/TestHelper.py | __init__ | AMWA-TV/nmos-testing | 25 | python | def __init__(self, host, port, secure=False, username=None, password=None, topics=[]):
'\n Initializer\n :param host: broker hostname (string)\n :param port: broker port (int)\n :param secure: use TLS (bool)\n :param username: broker username (string)\n :param password: broker password (string)\n :param topics: list of topics to subscribe to (list of string)\n '
self.host = host
self.port = port
self.error_occurred = False
self.connected = False
self.error_message =
self.client = mqtt.Client(protocol=mqtt.MQTTv5)
self.client.on_connect = (lambda client, userdata, flags, rc, properties=None: self.on_connect(flags, rc))
self.client.on_disconnect = (lambda client, userdata, rc: self.on_disconnect(rc))
self.client.on_message = (lambda client, userdata, msg: self.on_message(msg))
self.client.on_subscribe = (lambda client, userdata, mid, *args: self.on_subscribe(mid))
self.client.on_log = (lambda client, userdata, level, buf: self.on_log(level, buf))
if secure:
self.client.tls_set(CONFIG.CERT_TRUST_ROOT_CA)
if (username or password):
self.client.username_pw_set(username, password)
self.topics = topics
self.pending_subs = set()
self.messages = [] | def __init__(self, host, port, secure=False, username=None, password=None, topics=[]):
'\n Initializer\n :param host: broker hostname (string)\n :param port: broker port (int)\n :param secure: use TLS (bool)\n :param username: broker username (string)\n :param password: broker password (string)\n :param topics: list of topics to subscribe to (list of string)\n '
self.host = host
self.port = port
self.error_occurred = False
self.connected = False
self.error_message =
self.client = mqtt.Client(protocol=mqtt.MQTTv5)
self.client.on_connect = (lambda client, userdata, flags, rc, properties=None: self.on_connect(flags, rc))
self.client.on_disconnect = (lambda client, userdata, rc: self.on_disconnect(rc))
self.client.on_message = (lambda client, userdata, msg: self.on_message(msg))
self.client.on_subscribe = (lambda client, userdata, mid, *args: self.on_subscribe(mid))
self.client.on_log = (lambda client, userdata, level, buf: self.on_log(level, buf))
if secure:
self.client.tls_set(CONFIG.CERT_TRUST_ROOT_CA)
if (username or password):
self.client.username_pw_set(username, password)
self.topics = topics
self.pending_subs = set()
self.messages = []<|docstring|>Initializer
:param host: broker hostname (string)
:param port: broker port (int)
:param secure: use TLS (bool)
:param username: broker username (string)
:param password: broker password (string)
:param topics: list of topics to subscribe to (list of string)<|endoftext|> |
f288167bbcd1096bc3c33168c88d43e35f66c4fc52c8341387977abd9fd5856f | def solve_board(board, timeout=2):
'\n Returns result[0]=True/False(Solved/Unsolved)\n Returns result[1]=solved board/{"error", "invalid", "unsolved"}\n '
result = []
stop_it = Event()
start = time.time()
stuff_doing_thread = Thread(target=solve_board_1, args=(board, stop_it, result))
stuff_doing_thread.start()
stuff_doing_thread.join(timeout=timeout)
end = time.time()
if ((not stop_it.is_set()) or ((result[0] == False) and (result[1] == 'error'))):
start = time.time()
status = solve_board_2(board)
end = time.time()
if (status == True):
bas = ''
for row in board:
for element in row:
bas += (str(element) + ' ')
result.extend([True, bas])
else:
result.extend([False, 'unsolved'])
time_taken = str((end - start))
time_taken = time_taken[:min(6, len(time_taken))]
return (result[0], result[1], time_taken) | Returns result[0]=True/False(Solved/Unsolved)
Returns result[1]=solved board/{"error", "invalid", "unsolved"} | server/utility/masterSolver.py | solve_board | snehsagarajput/sudoku-solver-app | 0 | python | def solve_board(board, timeout=2):
'\n Returns result[0]=True/False(Solved/Unsolved)\n Returns result[1]=solved board/{"error", "invalid", "unsolved"}\n '
result = []
stop_it = Event()
start = time.time()
stuff_doing_thread = Thread(target=solve_board_1, args=(board, stop_it, result))
stuff_doing_thread.start()
stuff_doing_thread.join(timeout=timeout)
end = time.time()
if ((not stop_it.is_set()) or ((result[0] == False) and (result[1] == 'error'))):
start = time.time()
status = solve_board_2(board)
end = time.time()
if (status == True):
bas =
for row in board:
for element in row:
bas += (str(element) + ' ')
result.extend([True, bas])
else:
result.extend([False, 'unsolved'])
time_taken = str((end - start))
time_taken = time_taken[:min(6, len(time_taken))]
return (result[0], result[1], time_taken) | def solve_board(board, timeout=2):
'\n Returns result[0]=True/False(Solved/Unsolved)\n Returns result[1]=solved board/{"error", "invalid", "unsolved"}\n '
result = []
stop_it = Event()
start = time.time()
stuff_doing_thread = Thread(target=solve_board_1, args=(board, stop_it, result))
stuff_doing_thread.start()
stuff_doing_thread.join(timeout=timeout)
end = time.time()
if ((not stop_it.is_set()) or ((result[0] == False) and (result[1] == 'error'))):
start = time.time()
status = solve_board_2(board)
end = time.time()
if (status == True):
bas =
for row in board:
for element in row:
bas += (str(element) + ' ')
result.extend([True, bas])
else:
result.extend([False, 'unsolved'])
time_taken = str((end - start))
time_taken = time_taken[:min(6, len(time_taken))]
return (result[0], result[1], time_taken)<|docstring|>Returns result[0]=True/False(Solved/Unsolved)
Returns result[1]=solved board/{"error", "invalid", "unsolved"}<|endoftext|> |
84f397c78e4444f458e0464323a4484430f53977e9973e2d725f08af8f5ef282 | def model_proto_to_bytes_and_metadata(model_proto):
'Convert the model protobuf to bytes and metadata.\n\n Args:\n model_proto: Protobuf of the model\n\n Returns:\n bytes_dict: Dictionary of the bytes contained in the model protobuf\n metadata_dict: Dictionary of the meta data in the model protobuf\n '
bytes_dict = {}
metadata_dict = {}
round_number = None
for tensor_proto in model_proto.tensors:
bytes_dict[tensor_proto.name] = tensor_proto.data_bytes
metadata_dict[tensor_proto.name] = [{'int_to_float': proto.int_to_float, 'int_list': proto.int_list, 'bool_list': proto.bool_list} for proto in tensor_proto.transformer_metadata]
if (round_number is None):
round_number = tensor_proto.round_number
else:
assert (round_number == tensor_proto.round_number), f'Round numbers in model are inconsistent: {round_number} and {tensor_proto.round_number}'
return (bytes_dict, metadata_dict, round_number) | Convert the model protobuf to bytes and metadata.
Args:
model_proto: Protobuf of the model
Returns:
bytes_dict: Dictionary of the bytes contained in the model protobuf
metadata_dict: Dictionary of the meta data in the model protobuf | openfl/protocols/utils.py | model_proto_to_bytes_and_metadata | psfoley/openfl | 297 | python | def model_proto_to_bytes_and_metadata(model_proto):
'Convert the model protobuf to bytes and metadata.\n\n Args:\n model_proto: Protobuf of the model\n\n Returns:\n bytes_dict: Dictionary of the bytes contained in the model protobuf\n metadata_dict: Dictionary of the meta data in the model protobuf\n '
bytes_dict = {}
metadata_dict = {}
round_number = None
for tensor_proto in model_proto.tensors:
bytes_dict[tensor_proto.name] = tensor_proto.data_bytes
metadata_dict[tensor_proto.name] = [{'int_to_float': proto.int_to_float, 'int_list': proto.int_list, 'bool_list': proto.bool_list} for proto in tensor_proto.transformer_metadata]
if (round_number is None):
round_number = tensor_proto.round_number
else:
assert (round_number == tensor_proto.round_number), f'Round numbers in model are inconsistent: {round_number} and {tensor_proto.round_number}'
return (bytes_dict, metadata_dict, round_number) | def model_proto_to_bytes_and_metadata(model_proto):
'Convert the model protobuf to bytes and metadata.\n\n Args:\n model_proto: Protobuf of the model\n\n Returns:\n bytes_dict: Dictionary of the bytes contained in the model protobuf\n metadata_dict: Dictionary of the meta data in the model protobuf\n '
bytes_dict = {}
metadata_dict = {}
round_number = None
for tensor_proto in model_proto.tensors:
bytes_dict[tensor_proto.name] = tensor_proto.data_bytes
metadata_dict[tensor_proto.name] = [{'int_to_float': proto.int_to_float, 'int_list': proto.int_list, 'bool_list': proto.bool_list} for proto in tensor_proto.transformer_metadata]
if (round_number is None):
round_number = tensor_proto.round_number
else:
assert (round_number == tensor_proto.round_number), f'Round numbers in model are inconsistent: {round_number} and {tensor_proto.round_number}'
return (bytes_dict, metadata_dict, round_number)<|docstring|>Convert the model protobuf to bytes and metadata.
Args:
model_proto: Protobuf of the model
Returns:
bytes_dict: Dictionary of the bytes contained in the model protobuf
metadata_dict: Dictionary of the meta data in the model protobuf<|endoftext|> |
a43c36648434ec029c7bf552540259dd96f7a74d7da2ff5d78364586aef00cca | def bytes_and_metadata_to_model_proto(bytes_dict, model_id, model_version, is_delta, metadata_dict):
'Convert bytes and metadata to model protobuf.'
model_header = ModelHeader(id=model_id, version=model_version, is_delta=is_delta)
tensor_protos = []
for (key, data_bytes) in bytes_dict.items():
transformer_metadata = metadata_dict[key]
metadata_protos = []
for metadata in transformer_metadata:
if (metadata.get('int_to_float') is not None):
int_to_float = metadata.get('int_to_float')
else:
int_to_float = {}
if (metadata.get('int_list') is not None):
int_list = metadata.get('int_list')
else:
int_list = []
if (metadata.get('bool_list') is not None):
bool_list = metadata.get('bool_list')
else:
bool_list = []
metadata_protos.append(MetadataProto(int_to_float=int_to_float, int_list=int_list, bool_list=bool_list))
tensor_protos.append(TensorProto(name=key, data_bytes=data_bytes, transformer_metadata=metadata_protos))
return ModelProto(header=model_header, tensors=tensor_protos) | Convert bytes and metadata to model protobuf. | openfl/protocols/utils.py | bytes_and_metadata_to_model_proto | psfoley/openfl | 297 | python | def bytes_and_metadata_to_model_proto(bytes_dict, model_id, model_version, is_delta, metadata_dict):
model_header = ModelHeader(id=model_id, version=model_version, is_delta=is_delta)
tensor_protos = []
for (key, data_bytes) in bytes_dict.items():
transformer_metadata = metadata_dict[key]
metadata_protos = []
for metadata in transformer_metadata:
if (metadata.get('int_to_float') is not None):
int_to_float = metadata.get('int_to_float')
else:
int_to_float = {}
if (metadata.get('int_list') is not None):
int_list = metadata.get('int_list')
else:
int_list = []
if (metadata.get('bool_list') is not None):
bool_list = metadata.get('bool_list')
else:
bool_list = []
metadata_protos.append(MetadataProto(int_to_float=int_to_float, int_list=int_list, bool_list=bool_list))
tensor_protos.append(TensorProto(name=key, data_bytes=data_bytes, transformer_metadata=metadata_protos))
return ModelProto(header=model_header, tensors=tensor_protos) | def bytes_and_metadata_to_model_proto(bytes_dict, model_id, model_version, is_delta, metadata_dict):
model_header = ModelHeader(id=model_id, version=model_version, is_delta=is_delta)
tensor_protos = []
for (key, data_bytes) in bytes_dict.items():
transformer_metadata = metadata_dict[key]
metadata_protos = []
for metadata in transformer_metadata:
if (metadata.get('int_to_float') is not None):
int_to_float = metadata.get('int_to_float')
else:
int_to_float = {}
if (metadata.get('int_list') is not None):
int_list = metadata.get('int_list')
else:
int_list = []
if (metadata.get('bool_list') is not None):
bool_list = metadata.get('bool_list')
else:
bool_list = []
metadata_protos.append(MetadataProto(int_to_float=int_to_float, int_list=int_list, bool_list=bool_list))
tensor_protos.append(TensorProto(name=key, data_bytes=data_bytes, transformer_metadata=metadata_protos))
return ModelProto(header=model_header, tensors=tensor_protos)<|docstring|>Convert bytes and metadata to model protobuf.<|endoftext|> |
ef30306781d5f7291c9641db0758e1013bea901df087e404b0a9b483171c11cd | def construct_named_tensor(tensor_key, nparray, transformer_metadata, lossless):
'Construct named tensor.'
metadata_protos = []
for metadata in transformer_metadata:
if (metadata.get('int_to_float') is not None):
int_to_float = metadata.get('int_to_float')
else:
int_to_float = {}
if (metadata.get('int_list') is not None):
int_list = metadata.get('int_list')
else:
int_list = []
if (metadata.get('bool_list') is not None):
bool_list = metadata.get('bool_list')
else:
bool_list = []
metadata_protos.append(MetadataProto(int_to_float=int_to_float, int_list=int_list, bool_list=bool_list))
(tensor_name, origin, round_number, report, tags) = tensor_key
return NamedTensor(name=tensor_name, round_number=round_number, lossless=lossless, report=report, tags=tags, transformer_metadata=metadata_protos, data_bytes=nparray) | Construct named tensor. | openfl/protocols/utils.py | construct_named_tensor | psfoley/openfl | 297 | python | def construct_named_tensor(tensor_key, nparray, transformer_metadata, lossless):
metadata_protos = []
for metadata in transformer_metadata:
if (metadata.get('int_to_float') is not None):
int_to_float = metadata.get('int_to_float')
else:
int_to_float = {}
if (metadata.get('int_list') is not None):
int_list = metadata.get('int_list')
else:
int_list = []
if (metadata.get('bool_list') is not None):
bool_list = metadata.get('bool_list')
else:
bool_list = []
metadata_protos.append(MetadataProto(int_to_float=int_to_float, int_list=int_list, bool_list=bool_list))
(tensor_name, origin, round_number, report, tags) = tensor_key
return NamedTensor(name=tensor_name, round_number=round_number, lossless=lossless, report=report, tags=tags, transformer_metadata=metadata_protos, data_bytes=nparray) | def construct_named_tensor(tensor_key, nparray, transformer_metadata, lossless):
metadata_protos = []
for metadata in transformer_metadata:
if (metadata.get('int_to_float') is not None):
int_to_float = metadata.get('int_to_float')
else:
int_to_float = {}
if (metadata.get('int_list') is not None):
int_list = metadata.get('int_list')
else:
int_list = []
if (metadata.get('bool_list') is not None):
bool_list = metadata.get('bool_list')
else:
bool_list = []
metadata_protos.append(MetadataProto(int_to_float=int_to_float, int_list=int_list, bool_list=bool_list))
(tensor_name, origin, round_number, report, tags) = tensor_key
return NamedTensor(name=tensor_name, round_number=round_number, lossless=lossless, report=report, tags=tags, transformer_metadata=metadata_protos, data_bytes=nparray)<|docstring|>Construct named tensor.<|endoftext|> |
85228e1575061c1f89de5f25d34aae76ace2a025f51e631f76359dcc8f787af8 | def construct_proto(tensor_dict, model_id, model_version, is_delta, compression_pipeline):
'Construct proto.'
bytes_dict = {}
metadata_dict = {}
for (key, array) in tensor_dict.items():
(bytes_dict[key], metadata_dict[key]) = compression_pipeline.forward(data=array)
model_proto = bytes_and_metadata_to_model_proto(bytes_dict=bytes_dict, model_id=model_id, model_version=model_version, is_delta=is_delta, metadata_dict=metadata_dict)
return model_proto | Construct proto. | openfl/protocols/utils.py | construct_proto | psfoley/openfl | 297 | python | def construct_proto(tensor_dict, model_id, model_version, is_delta, compression_pipeline):
bytes_dict = {}
metadata_dict = {}
for (key, array) in tensor_dict.items():
(bytes_dict[key], metadata_dict[key]) = compression_pipeline.forward(data=array)
model_proto = bytes_and_metadata_to_model_proto(bytes_dict=bytes_dict, model_id=model_id, model_version=model_version, is_delta=is_delta, metadata_dict=metadata_dict)
return model_proto | def construct_proto(tensor_dict, model_id, model_version, is_delta, compression_pipeline):
bytes_dict = {}
metadata_dict = {}
for (key, array) in tensor_dict.items():
(bytes_dict[key], metadata_dict[key]) = compression_pipeline.forward(data=array)
model_proto = bytes_and_metadata_to_model_proto(bytes_dict=bytes_dict, model_id=model_id, model_version=model_version, is_delta=is_delta, metadata_dict=metadata_dict)
return model_proto<|docstring|>Construct proto.<|endoftext|> |
0b4477f9d73bf9fc2148ae5fda7bd0607a31129c29ba24e7224823ca2fd24391 | def construct_model_proto(tensor_dict, round_number, tensor_pipe):
'Construct model proto from tensor dict.'
named_tensors = []
for (key, nparray) in tensor_dict.items():
(bytes_data, transformer_metadata) = tensor_pipe.forward(data=nparray)
tensor_key = TensorKey(key, 'agg', round_number, False, ('model',))
named_tensors.append(construct_named_tensor(tensor_key, bytes_data, transformer_metadata, lossless=True))
return ModelProto(tensors=named_tensors) | Construct model proto from tensor dict. | openfl/protocols/utils.py | construct_model_proto | psfoley/openfl | 297 | python | def construct_model_proto(tensor_dict, round_number, tensor_pipe):
named_tensors = []
for (key, nparray) in tensor_dict.items():
(bytes_data, transformer_metadata) = tensor_pipe.forward(data=nparray)
tensor_key = TensorKey(key, 'agg', round_number, False, ('model',))
named_tensors.append(construct_named_tensor(tensor_key, bytes_data, transformer_metadata, lossless=True))
return ModelProto(tensors=named_tensors) | def construct_model_proto(tensor_dict, round_number, tensor_pipe):
named_tensors = []
for (key, nparray) in tensor_dict.items():
(bytes_data, transformer_metadata) = tensor_pipe.forward(data=nparray)
tensor_key = TensorKey(key, 'agg', round_number, False, ('model',))
named_tensors.append(construct_named_tensor(tensor_key, bytes_data, transformer_metadata, lossless=True))
return ModelProto(tensors=named_tensors)<|docstring|>Construct model proto from tensor dict.<|endoftext|> |
8323f1bbdc28649a61e1bdf49e437b5af47821f5db1e3f0584780ec81a2f7f2c | def deconstruct_model_proto(model_proto, compression_pipeline):
'Deconstruct model proto.'
(bytes_dict, metadata_dict, round_number) = model_proto_to_bytes_and_metadata(model_proto)
tensor_dict = {}
for key in bytes_dict:
tensor_dict[key] = compression_pipeline.backward(data=bytes_dict[key], transformer_metadata=metadata_dict[key])
return (tensor_dict, round_number) | Deconstruct model proto. | openfl/protocols/utils.py | deconstruct_model_proto | psfoley/openfl | 297 | python | def deconstruct_model_proto(model_proto, compression_pipeline):
(bytes_dict, metadata_dict, round_number) = model_proto_to_bytes_and_metadata(model_proto)
tensor_dict = {}
for key in bytes_dict:
tensor_dict[key] = compression_pipeline.backward(data=bytes_dict[key], transformer_metadata=metadata_dict[key])
return (tensor_dict, round_number) | def deconstruct_model_proto(model_proto, compression_pipeline):
(bytes_dict, metadata_dict, round_number) = model_proto_to_bytes_and_metadata(model_proto)
tensor_dict = {}
for key in bytes_dict:
tensor_dict[key] = compression_pipeline.backward(data=bytes_dict[key], transformer_metadata=metadata_dict[key])
return (tensor_dict, round_number)<|docstring|>Deconstruct model proto.<|endoftext|> |
89e1e41addf46d16f5722706f113c9a1dacd42180b7832e01a73adba39913bea | def deconstruct_proto(model_proto, compression_pipeline):
'Deconstruct the protobuf.\n\n Args:\n model_proto: The protobuf of the model\n compression_pipeline: The compression pipeline object\n\n Returns:\n protobuf: A protobuf of the model\n '
(bytes_dict, metadata_dict) = model_proto_to_bytes_and_metadata(model_proto)
tensor_dict = {}
for key in bytes_dict:
tensor_dict[key] = compression_pipeline.backward(data=bytes_dict[key], transformer_metadata=metadata_dict[key])
return tensor_dict | Deconstruct the protobuf.
Args:
model_proto: The protobuf of the model
compression_pipeline: The compression pipeline object
Returns:
protobuf: A protobuf of the model | openfl/protocols/utils.py | deconstruct_proto | psfoley/openfl | 297 | python | def deconstruct_proto(model_proto, compression_pipeline):
'Deconstruct the protobuf.\n\n Args:\n model_proto: The protobuf of the model\n compression_pipeline: The compression pipeline object\n\n Returns:\n protobuf: A protobuf of the model\n '
(bytes_dict, metadata_dict) = model_proto_to_bytes_and_metadata(model_proto)
tensor_dict = {}
for key in bytes_dict:
tensor_dict[key] = compression_pipeline.backward(data=bytes_dict[key], transformer_metadata=metadata_dict[key])
return tensor_dict | def deconstruct_proto(model_proto, compression_pipeline):
'Deconstruct the protobuf.\n\n Args:\n model_proto: The protobuf of the model\n compression_pipeline: The compression pipeline object\n\n Returns:\n protobuf: A protobuf of the model\n '
(bytes_dict, metadata_dict) = model_proto_to_bytes_and_metadata(model_proto)
tensor_dict = {}
for key in bytes_dict:
tensor_dict[key] = compression_pipeline.backward(data=bytes_dict[key], transformer_metadata=metadata_dict[key])
return tensor_dict<|docstring|>Deconstruct the protobuf.
Args:
model_proto: The protobuf of the model
compression_pipeline: The compression pipeline object
Returns:
protobuf: A protobuf of the model<|endoftext|> |
ea782010d2da7acf04d06cabb0687f00a8d2e3717f0329d79f4f2ee92615fbe9 | def load_proto(fpath):
'Load the protobuf.\n\n Args:\n fpath: The filepath for the protobuf\n\n Returns:\n protobuf: A protobuf of the model\n '
with open(fpath, 'rb') as f:
loaded = f.read()
model = ModelProto().FromString(loaded)
return model | Load the protobuf.
Args:
fpath: The filepath for the protobuf
Returns:
protobuf: A protobuf of the model | openfl/protocols/utils.py | load_proto | psfoley/openfl | 297 | python | def load_proto(fpath):
'Load the protobuf.\n\n Args:\n fpath: The filepath for the protobuf\n\n Returns:\n protobuf: A protobuf of the model\n '
with open(fpath, 'rb') as f:
loaded = f.read()
model = ModelProto().FromString(loaded)
return model | def load_proto(fpath):
'Load the protobuf.\n\n Args:\n fpath: The filepath for the protobuf\n\n Returns:\n protobuf: A protobuf of the model\n '
with open(fpath, 'rb') as f:
loaded = f.read()
model = ModelProto().FromString(loaded)
return model<|docstring|>Load the protobuf.
Args:
fpath: The filepath for the protobuf
Returns:
protobuf: A protobuf of the model<|endoftext|> |
53a418df8cfa50e29fe4d065bed40a7c77ff0670fad699212c103463daa64dd9 | def dump_proto(model_proto, fpath):
'Dump the protobuf to a file.\n\n Args:\n model_proto: The protobuf of the model\n fpath: The filename to save the model protobuf\n\n '
s = model_proto.SerializeToString()
with open(fpath, 'wb') as f:
f.write(s) | Dump the protobuf to a file.
Args:
model_proto: The protobuf of the model
fpath: The filename to save the model protobuf | openfl/protocols/utils.py | dump_proto | psfoley/openfl | 297 | python | def dump_proto(model_proto, fpath):
'Dump the protobuf to a file.\n\n Args:\n model_proto: The protobuf of the model\n fpath: The filename to save the model protobuf\n\n '
s = model_proto.SerializeToString()
with open(fpath, 'wb') as f:
f.write(s) | def dump_proto(model_proto, fpath):
'Dump the protobuf to a file.\n\n Args:\n model_proto: The protobuf of the model\n fpath: The filename to save the model protobuf\n\n '
s = model_proto.SerializeToString()
with open(fpath, 'wb') as f:
f.write(s)<|docstring|>Dump the protobuf to a file.
Args:
model_proto: The protobuf of the model
fpath: The filename to save the model protobuf<|endoftext|> |
e676dc30eada408139ab34756fce692fe2045cb8f2ffad3a3b57d472d351e5ea | def datastream_to_proto(proto, stream, logger=None):
'Convert the datastream to the protobuf.\n\n Args:\n model_proto: The protobuf of the model\n stream: The data stream from the remote connection\n logger: (Optional) The log object\n\n Returns:\n protobuf: A protobuf of the model\n '
npbytes = b''
for chunk in stream:
npbytes += chunk.npbytes
if (len(npbytes) > 0):
proto.ParseFromString(npbytes)
if (logger is not None):
logger.debug(f'datastream_to_proto parsed a {type(proto)}.')
return proto
else:
raise RuntimeError(f'Received empty stream message of type {type(proto)}') | Convert the datastream to the protobuf.
Args:
model_proto: The protobuf of the model
stream: The data stream from the remote connection
logger: (Optional) The log object
Returns:
protobuf: A protobuf of the model | openfl/protocols/utils.py | datastream_to_proto | psfoley/openfl | 297 | python | def datastream_to_proto(proto, stream, logger=None):
'Convert the datastream to the protobuf.\n\n Args:\n model_proto: The protobuf of the model\n stream: The data stream from the remote connection\n logger: (Optional) The log object\n\n Returns:\n protobuf: A protobuf of the model\n '
npbytes = b
for chunk in stream:
npbytes += chunk.npbytes
if (len(npbytes) > 0):
proto.ParseFromString(npbytes)
if (logger is not None):
logger.debug(f'datastream_to_proto parsed a {type(proto)}.')
return proto
else:
raise RuntimeError(f'Received empty stream message of type {type(proto)}') | def datastream_to_proto(proto, stream, logger=None):
'Convert the datastream to the protobuf.\n\n Args:\n model_proto: The protobuf of the model\n stream: The data stream from the remote connection\n logger: (Optional) The log object\n\n Returns:\n protobuf: A protobuf of the model\n '
npbytes = b
for chunk in stream:
npbytes += chunk.npbytes
if (len(npbytes) > 0):
proto.ParseFromString(npbytes)
if (logger is not None):
logger.debug(f'datastream_to_proto parsed a {type(proto)}.')
return proto
else:
raise RuntimeError(f'Received empty stream message of type {type(proto)}')<|docstring|>Convert the datastream to the protobuf.
Args:
model_proto: The protobuf of the model
stream: The data stream from the remote connection
logger: (Optional) The log object
Returns:
protobuf: A protobuf of the model<|endoftext|> |
c9fc645a4fd77d546ab7351e7c205ba361951e7d0305241fe9f78ace48b131ed | def proto_to_datastream(proto, logger, max_buffer_size=((2 * 1024) * 1024)):
'Convert the protobuf to the datastream for the remote connection.\n\n Args:\n model_proto: The protobuf of the model\n logger: The log object\n max_buffer_size: The buffer size (Default= 2*1024*1024)\n Returns:\n reply: The message for the remote connection.\n '
npbytes = proto.SerializeToString()
data_size = len(npbytes)
buffer_size = (data_size if (max_buffer_size > data_size) else max_buffer_size)
logger.debug(f'Setting stream chunks with size {buffer_size} for proto of type {type(proto)}')
for i in range(0, data_size, buffer_size):
chunk = npbytes[i:(i + buffer_size)]
reply = DataStream(npbytes=chunk, size=len(chunk))
(yield reply) | Convert the protobuf to the datastream for the remote connection.
Args:
model_proto: The protobuf of the model
logger: The log object
max_buffer_size: The buffer size (Default= 2*1024*1024)
Returns:
reply: The message for the remote connection. | openfl/protocols/utils.py | proto_to_datastream | psfoley/openfl | 297 | python | def proto_to_datastream(proto, logger, max_buffer_size=((2 * 1024) * 1024)):
'Convert the protobuf to the datastream for the remote connection.\n\n Args:\n model_proto: The protobuf of the model\n logger: The log object\n max_buffer_size: The buffer size (Default= 2*1024*1024)\n Returns:\n reply: The message for the remote connection.\n '
npbytes = proto.SerializeToString()
data_size = len(npbytes)
buffer_size = (data_size if (max_buffer_size > data_size) else max_buffer_size)
logger.debug(f'Setting stream chunks with size {buffer_size} for proto of type {type(proto)}')
for i in range(0, data_size, buffer_size):
chunk = npbytes[i:(i + buffer_size)]
reply = DataStream(npbytes=chunk, size=len(chunk))
(yield reply) | def proto_to_datastream(proto, logger, max_buffer_size=((2 * 1024) * 1024)):
'Convert the protobuf to the datastream for the remote connection.\n\n Args:\n model_proto: The protobuf of the model\n logger: The log object\n max_buffer_size: The buffer size (Default= 2*1024*1024)\n Returns:\n reply: The message for the remote connection.\n '
npbytes = proto.SerializeToString()
data_size = len(npbytes)
buffer_size = (data_size if (max_buffer_size > data_size) else max_buffer_size)
logger.debug(f'Setting stream chunks with size {buffer_size} for proto of type {type(proto)}')
for i in range(0, data_size, buffer_size):
chunk = npbytes[i:(i + buffer_size)]
reply = DataStream(npbytes=chunk, size=len(chunk))
(yield reply)<|docstring|>Convert the protobuf to the datastream for the remote connection.
Args:
model_proto: The protobuf of the model
logger: The log object
max_buffer_size: The buffer size (Default= 2*1024*1024)
Returns:
reply: The message for the remote connection.<|endoftext|> |
636b484bbb36cd2f514bce0d0b1c7b7e568321448347cfd9710bf96027ed63b9 | def get_headers(context) -> dict:
'Get headers from context.'
return {header[0]: header[1] for header in context.invocation_metadata()} | Get headers from context. | openfl/protocols/utils.py | get_headers | psfoley/openfl | 297 | python | def get_headers(context) -> dict:
return {header[0]: header[1] for header in context.invocation_metadata()} | def get_headers(context) -> dict:
return {header[0]: header[1] for header in context.invocation_metadata()}<|docstring|>Get headers from context.<|endoftext|> |
ffaee16312bf89d6e1b908678a638f451cb8ac9bac27d107527dd278b158b59f | def _check_layout_validity(self):
'\n Check the current layout is a valid one.\n '
self._visible_areas = []
if (self.ID is None):
raise SpyderAPIError('A Layout must define an `ID` class attribute!')
self.get_name()
if (not self._areas):
raise SpyderAPIError('A Layout must define add least one area!')
default_areas = []
area_zero_zero = False
for area in self._areas:
default_areas.append(area['default'])
if area['default']:
self._default_area = area
self._visible_areas.append(area['visible'])
if (area_zero_zero and (area['row'] == 0) and (area['column'] == 0)):
raise SpyderAPIError('Multiple areas defined their row and column as 0!')
if ((area['row'] == 0) and (area['column'] == 0)):
area_zero_zero = True
if (not (set(area['hidden_plugin_ids']) <= set(area['plugin_ids']))):
raise SpyderAPIError('At least 1 hidden plugin id is not being specified in the area plugin ids list!\n SpyderLayout: {}\n hidden_plugin_ids: {}\nplugin_ids: {}'.format(self.get_name(), area['hidden_plugin_ids'], area['plugin_ids']))
if (not any(self._visible_areas)):
raise SpyderAPIError('At least 1 area must be `visible`')
if (not any(default_areas)):
raise SpyderAPIError('No area is the `default`!')
if (default_areas.count(True) != 1):
raise SpyderAPIError('Only 1 area can be the `default`!')
if (not area_zero_zero):
raise SpyderAPIError('1 area needs to be specified with row 0 and column 0!')
self._check_area() | Check the current layout is a valid one. | spyder/plugins/layout/api.py | _check_layout_validity | mrclary/spyder | 7,956 | python | def _check_layout_validity(self):
'\n \n '
self._visible_areas = []
if (self.ID is None):
raise SpyderAPIError('A Layout must define an `ID` class attribute!')
self.get_name()
if (not self._areas):
raise SpyderAPIError('A Layout must define add least one area!')
default_areas = []
area_zero_zero = False
for area in self._areas:
default_areas.append(area['default'])
if area['default']:
self._default_area = area
self._visible_areas.append(area['visible'])
if (area_zero_zero and (area['row'] == 0) and (area['column'] == 0)):
raise SpyderAPIError('Multiple areas defined their row and column as 0!')
if ((area['row'] == 0) and (area['column'] == 0)):
area_zero_zero = True
if (not (set(area['hidden_plugin_ids']) <= set(area['plugin_ids']))):
raise SpyderAPIError('At least 1 hidden plugin id is not being specified in the area plugin ids list!\n SpyderLayout: {}\n hidden_plugin_ids: {}\nplugin_ids: {}'.format(self.get_name(), area['hidden_plugin_ids'], area['plugin_ids']))
if (not any(self._visible_areas)):
raise SpyderAPIError('At least 1 area must be `visible`')
if (not any(default_areas)):
raise SpyderAPIError('No area is the `default`!')
if (default_areas.count(True) != 1):
raise SpyderAPIError('Only 1 area can be the `default`!')
if (not area_zero_zero):
raise SpyderAPIError('1 area needs to be specified with row 0 and column 0!')
self._check_area() | def _check_layout_validity(self):
'\n \n '
self._visible_areas = []
if (self.ID is None):
raise SpyderAPIError('A Layout must define an `ID` class attribute!')
self.get_name()
if (not self._areas):
raise SpyderAPIError('A Layout must define add least one area!')
default_areas = []
area_zero_zero = False
for area in self._areas:
default_areas.append(area['default'])
if area['default']:
self._default_area = area
self._visible_areas.append(area['visible'])
if (area_zero_zero and (area['row'] == 0) and (area['column'] == 0)):
raise SpyderAPIError('Multiple areas defined their row and column as 0!')
if ((area['row'] == 0) and (area['column'] == 0)):
area_zero_zero = True
if (not (set(area['hidden_plugin_ids']) <= set(area['plugin_ids']))):
raise SpyderAPIError('At least 1 hidden plugin id is not being specified in the area plugin ids list!\n SpyderLayout: {}\n hidden_plugin_ids: {}\nplugin_ids: {}'.format(self.get_name(), area['hidden_plugin_ids'], area['plugin_ids']))
if (not any(self._visible_areas)):
raise SpyderAPIError('At least 1 area must be `visible`')
if (not any(default_areas)):
raise SpyderAPIError('No area is the `default`!')
if (default_areas.count(True) != 1):
raise SpyderAPIError('Only 1 area can be the `default`!')
if (not area_zero_zero):
raise SpyderAPIError('1 area needs to be specified with row 0 and column 0!')
self._check_area()<|docstring|>Check the current layout is a valid one.<|endoftext|> |
615338e2fbc162e31925e7630f4f07ef09db269439524c30c6648bc3f578444b | def _check_area(self):
'\n Check if the current layout added areas cover the entire rectangle.\n\n Rectangle given by the extreme points for the added areas.\n '
self._area_rects = []
height = (self._rows + 1)
area_float_rects = []
delta = 0.0001
for (index, area) in enumerate(self._areas):
rectf = QRectF()
rectf.setLeft((area['column'] + delta))
rectf.setRight(((area['column'] + area['col_span']) - delta))
rectf.setTop(((height - area['row']) - delta))
rectf.setBottom((((height - area['row']) - area['row_span']) + delta))
rectf.index = index
rectf.plugin_ids = area['plugin_ids']
area_float_rects.append(rectf)
rect = QRectF()
rect.setLeft(area['column'])
rect.setRight((area['column'] + area['col_span']))
rect.setTop((height - area['row']))
rect.setBottom(((height - area['row']) - area['row_span']))
rect.index = index
rect.plugin_ids = area['plugin_ids']
self._area_rects.append(rect)
for rect_1 in area_float_rects:
for rect_2 in area_float_rects:
if (rect_1.index != rect_2.index):
if rect_1.intersects(rect_2):
raise SpyderAPIError('Area with plugins {0} is overlapping area with plugins {1}'.format(rect_1.plugin_ids, rect_2.plugin_ids))
total_area = 0
tops = []
rights = []
for (index, rect) in enumerate(self._area_rects):
tops.append(rect.top())
rights.append(rect.right())
area = abs((rect.width() * rect.height()))
total_area += area
self._areas[index]['area'] = area
if (total_area != (max(rights) * max(tops))):
raise SpyderAPIError('Areas are not covering the entire section!\nEither an area is missing or col_span/row_span are not correctly set!') | Check if the current layout added areas cover the entire rectangle.
Rectangle given by the extreme points for the added areas. | spyder/plugins/layout/api.py | _check_area | mrclary/spyder | 7,956 | python | def _check_area(self):
'\n Check if the current layout added areas cover the entire rectangle.\n\n Rectangle given by the extreme points for the added areas.\n '
self._area_rects = []
height = (self._rows + 1)
area_float_rects = []
delta = 0.0001
for (index, area) in enumerate(self._areas):
rectf = QRectF()
rectf.setLeft((area['column'] + delta))
rectf.setRight(((area['column'] + area['col_span']) - delta))
rectf.setTop(((height - area['row']) - delta))
rectf.setBottom((((height - area['row']) - area['row_span']) + delta))
rectf.index = index
rectf.plugin_ids = area['plugin_ids']
area_float_rects.append(rectf)
rect = QRectF()
rect.setLeft(area['column'])
rect.setRight((area['column'] + area['col_span']))
rect.setTop((height - area['row']))
rect.setBottom(((height - area['row']) - area['row_span']))
rect.index = index
rect.plugin_ids = area['plugin_ids']
self._area_rects.append(rect)
for rect_1 in area_float_rects:
for rect_2 in area_float_rects:
if (rect_1.index != rect_2.index):
if rect_1.intersects(rect_2):
raise SpyderAPIError('Area with plugins {0} is overlapping area with plugins {1}'.format(rect_1.plugin_ids, rect_2.plugin_ids))
total_area = 0
tops = []
rights = []
for (index, rect) in enumerate(self._area_rects):
tops.append(rect.top())
rights.append(rect.right())
area = abs((rect.width() * rect.height()))
total_area += area
self._areas[index]['area'] = area
if (total_area != (max(rights) * max(tops))):
raise SpyderAPIError('Areas are not covering the entire section!\nEither an area is missing or col_span/row_span are not correctly set!') | def _check_area(self):
'\n Check if the current layout added areas cover the entire rectangle.\n\n Rectangle given by the extreme points for the added areas.\n '
self._area_rects = []
height = (self._rows + 1)
area_float_rects = []
delta = 0.0001
for (index, area) in enumerate(self._areas):
rectf = QRectF()
rectf.setLeft((area['column'] + delta))
rectf.setRight(((area['column'] + area['col_span']) - delta))
rectf.setTop(((height - area['row']) - delta))
rectf.setBottom((((height - area['row']) - area['row_span']) + delta))
rectf.index = index
rectf.plugin_ids = area['plugin_ids']
area_float_rects.append(rectf)
rect = QRectF()
rect.setLeft(area['column'])
rect.setRight((area['column'] + area['col_span']))
rect.setTop((height - area['row']))
rect.setBottom(((height - area['row']) - area['row_span']))
rect.index = index
rect.plugin_ids = area['plugin_ids']
self._area_rects.append(rect)
for rect_1 in area_float_rects:
for rect_2 in area_float_rects:
if (rect_1.index != rect_2.index):
if rect_1.intersects(rect_2):
raise SpyderAPIError('Area with plugins {0} is overlapping area with plugins {1}'.format(rect_1.plugin_ids, rect_2.plugin_ids))
total_area = 0
tops = []
rights = []
for (index, rect) in enumerate(self._area_rects):
tops.append(rect.top())
rights.append(rect.right())
area = abs((rect.width() * rect.height()))
total_area += area
self._areas[index]['area'] = area
if (total_area != (max(rights) * max(tops))):
raise SpyderAPIError('Areas are not covering the entire section!\nEither an area is missing or col_span/row_span are not correctly set!')<|docstring|>Check if the current layout added areas cover the entire rectangle.
Rectangle given by the extreme points for the added areas.<|endoftext|> |
8fa71deb194f4ac338a2f97b9cd5f9bde7ae2ea0903b488c49d65b5f818be7a1 | def get_name(self):
'\n Return the layout localized name.\n\n Returns\n -------\n str\n Localized name of the layout.\n\n Notes\n -----\n This is a method to be able to update localization without a restart.\n '
raise NotImplementedError('A layout must define a `get_name` method!') | Return the layout localized name.
Returns
-------
str
Localized name of the layout.
Notes
-----
This is a method to be able to update localization without a restart. | spyder/plugins/layout/api.py | get_name | mrclary/spyder | 7,956 | python | def get_name(self):
'\n Return the layout localized name.\n\n Returns\n -------\n str\n Localized name of the layout.\n\n Notes\n -----\n This is a method to be able to update localization without a restart.\n '
raise NotImplementedError('A layout must define a `get_name` method!') | def get_name(self):
'\n Return the layout localized name.\n\n Returns\n -------\n str\n Localized name of the layout.\n\n Notes\n -----\n This is a method to be able to update localization without a restart.\n '
raise NotImplementedError('A layout must define a `get_name` method!')<|docstring|>Return the layout localized name.
Returns
-------
str
Localized name of the layout.
Notes
-----
This is a method to be able to update localization without a restart.<|endoftext|> |
03d656d6aac34e82ec5ddb38110a577cdab77ff5fe1718283a82ea1bc0339c2a | def add_area(self, plugin_ids, row, column, row_span=1, col_span=1, default=False, visible=True, hidden_plugin_ids=[]):
'\n Add a new area and `plugin_ids` that will populate it to the layout.\n\n The area will start at row, column spanning row_pan rows and\n column_span columns.\n\n Parameters\n ----------\n plugin_ids: list\n List of plugin ids that will be in the area\n row: int\n Initial row where the area starts\n column: int\n Initial column where the area starts\n row_span: int, optional\n Number of rows that the area covers\n col_span: int, optional\n Number of columns the area covers\n default: bool, optiona\n Defines an area as the default one, i.e all other plugins that where\n not passed in the `plugins_ids` will be added to the default area.\n By default is False.\n visible: bool, optional\n Defines if the area is visible when setting up the layout.\n Default is True.\n\n Notes\n -----\n See: https://doc.qt.io/qt-5/qgridlayout.html\n '
if (self._default_added and default):
raise SpyderAPIError('A default location has already been defined!')
self._plugin_ids += plugin_ids
self._rows = max(row, self._rows)
self._cols = max(column, self._cols)
self._default_added = default
self._column_stretchs[column] = 1
self._row_stretchs[row] = 1
self._areas.append(dict(plugin_ids=plugin_ids, row=row, column=column, row_span=row_span, col_span=col_span, default=default, visible=visible, hidden_plugin_ids=hidden_plugin_ids)) | Add a new area and `plugin_ids` that will populate it to the layout.
The area will start at row, column spanning row_pan rows and
column_span columns.
Parameters
----------
plugin_ids: list
List of plugin ids that will be in the area
row: int
Initial row where the area starts
column: int
Initial column where the area starts
row_span: int, optional
Number of rows that the area covers
col_span: int, optional
Number of columns the area covers
default: bool, optiona
Defines an area as the default one, i.e all other plugins that where
not passed in the `plugins_ids` will be added to the default area.
By default is False.
visible: bool, optional
Defines if the area is visible when setting up the layout.
Default is True.
Notes
-----
See: https://doc.qt.io/qt-5/qgridlayout.html | spyder/plugins/layout/api.py | add_area | mrclary/spyder | 7,956 | python | def add_area(self, plugin_ids, row, column, row_span=1, col_span=1, default=False, visible=True, hidden_plugin_ids=[]):
'\n Add a new area and `plugin_ids` that will populate it to the layout.\n\n The area will start at row, column spanning row_pan rows and\n column_span columns.\n\n Parameters\n ----------\n plugin_ids: list\n List of plugin ids that will be in the area\n row: int\n Initial row where the area starts\n column: int\n Initial column where the area starts\n row_span: int, optional\n Number of rows that the area covers\n col_span: int, optional\n Number of columns the area covers\n default: bool, optiona\n Defines an area as the default one, i.e all other plugins that where\n not passed in the `plugins_ids` will be added to the default area.\n By default is False.\n visible: bool, optional\n Defines if the area is visible when setting up the layout.\n Default is True.\n\n Notes\n -----\n See: https://doc.qt.io/qt-5/qgridlayout.html\n '
if (self._default_added and default):
raise SpyderAPIError('A default location has already been defined!')
self._plugin_ids += plugin_ids
self._rows = max(row, self._rows)
self._cols = max(column, self._cols)
self._default_added = default
self._column_stretchs[column] = 1
self._row_stretchs[row] = 1
self._areas.append(dict(plugin_ids=plugin_ids, row=row, column=column, row_span=row_span, col_span=col_span, default=default, visible=visible, hidden_plugin_ids=hidden_plugin_ids)) | def add_area(self, plugin_ids, row, column, row_span=1, col_span=1, default=False, visible=True, hidden_plugin_ids=[]):
'\n Add a new area and `plugin_ids` that will populate it to the layout.\n\n The area will start at row, column spanning row_pan rows and\n column_span columns.\n\n Parameters\n ----------\n plugin_ids: list\n List of plugin ids that will be in the area\n row: int\n Initial row where the area starts\n column: int\n Initial column where the area starts\n row_span: int, optional\n Number of rows that the area covers\n col_span: int, optional\n Number of columns the area covers\n default: bool, optiona\n Defines an area as the default one, i.e all other plugins that where\n not passed in the `plugins_ids` will be added to the default area.\n By default is False.\n visible: bool, optional\n Defines if the area is visible when setting up the layout.\n Default is True.\n\n Notes\n -----\n See: https://doc.qt.io/qt-5/qgridlayout.html\n '
if (self._default_added and default):
raise SpyderAPIError('A default location has already been defined!')
self._plugin_ids += plugin_ids
self._rows = max(row, self._rows)
self._cols = max(column, self._cols)
self._default_added = default
self._column_stretchs[column] = 1
self._row_stretchs[row] = 1
self._areas.append(dict(plugin_ids=plugin_ids, row=row, column=column, row_span=row_span, col_span=col_span, default=default, visible=visible, hidden_plugin_ids=hidden_plugin_ids))<|docstring|>Add a new area and `plugin_ids` that will populate it to the layout.
The area will start at row, column spanning row_pan rows and
column_span columns.
Parameters
----------
plugin_ids: list
List of plugin ids that will be in the area
row: int
Initial row where the area starts
column: int
Initial column where the area starts
row_span: int, optional
Number of rows that the area covers
col_span: int, optional
Number of columns the area covers
default: bool, optiona
Defines an area as the default one, i.e all other plugins that where
not passed in the `plugins_ids` will be added to the default area.
By default is False.
visible: bool, optional
Defines if the area is visible when setting up the layout.
Default is True.
Notes
-----
See: https://doc.qt.io/qt-5/qgridlayout.html<|endoftext|> |
3fe18845f471aa9e94d776be69444d939e5ec7c49762dabcb647bbaac42e10a3 | def set_column_stretch(self, column, stretch):
'\n Set the factor of column to stretch.\n\n The stretch factor is relative to the other columns in this grid.\n Columns with a higher stretch factor take more of the available space.\n\n Parameters\n ----------\n column: int\n The column number. The first column is number 0.\n stretch: int\n Column stretch factor.\n\n Notes\n -----\n See: https://doc.qt.io/qt-5/qgridlayout.html\n '
self._column_stretchs[column] = stretch | Set the factor of column to stretch.
The stretch factor is relative to the other columns in this grid.
Columns with a higher stretch factor take more of the available space.
Parameters
----------
column: int
The column number. The first column is number 0.
stretch: int
Column stretch factor.
Notes
-----
See: https://doc.qt.io/qt-5/qgridlayout.html | spyder/plugins/layout/api.py | set_column_stretch | mrclary/spyder | 7,956 | python | def set_column_stretch(self, column, stretch):
'\n Set the factor of column to stretch.\n\n The stretch factor is relative to the other columns in this grid.\n Columns with a higher stretch factor take more of the available space.\n\n Parameters\n ----------\n column: int\n The column number. The first column is number 0.\n stretch: int\n Column stretch factor.\n\n Notes\n -----\n See: https://doc.qt.io/qt-5/qgridlayout.html\n '
self._column_stretchs[column] = stretch | def set_column_stretch(self, column, stretch):
'\n Set the factor of column to stretch.\n\n The stretch factor is relative to the other columns in this grid.\n Columns with a higher stretch factor take more of the available space.\n\n Parameters\n ----------\n column: int\n The column number. The first column is number 0.\n stretch: int\n Column stretch factor.\n\n Notes\n -----\n See: https://doc.qt.io/qt-5/qgridlayout.html\n '
self._column_stretchs[column] = stretch<|docstring|>Set the factor of column to stretch.
The stretch factor is relative to the other columns in this grid.
Columns with a higher stretch factor take more of the available space.
Parameters
----------
column: int
The column number. The first column is number 0.
stretch: int
Column stretch factor.
Notes
-----
See: https://doc.qt.io/qt-5/qgridlayout.html<|endoftext|> |
8f3906457f1caa10f7699b6c7d27324a1c0508ca083ef7fdc77f37d40867bb29 | def set_row_stretch(self, row, stretch):
'\n Set the factor of row to stretch.\n\n The stretch factor is relative to the other rows in this grid.\n Rows with a higher stretch factor take more of the available space.\n\n Parameters\n ----------\n row: int\n The row number. The first row is number 0.\n stretch: int\n Row stretch factor.\n\n Notes\n -----\n See: https://doc.qt.io/qt-5/qgridlayout.html\n '
self._row_stretchs[row] = stretch | Set the factor of row to stretch.
The stretch factor is relative to the other rows in this grid.
Rows with a higher stretch factor take more of the available space.
Parameters
----------
row: int
The row number. The first row is number 0.
stretch: int
Row stretch factor.
Notes
-----
See: https://doc.qt.io/qt-5/qgridlayout.html | spyder/plugins/layout/api.py | set_row_stretch | mrclary/spyder | 7,956 | python | def set_row_stretch(self, row, stretch):
'\n Set the factor of row to stretch.\n\n The stretch factor is relative to the other rows in this grid.\n Rows with a higher stretch factor take more of the available space.\n\n Parameters\n ----------\n row: int\n The row number. The first row is number 0.\n stretch: int\n Row stretch factor.\n\n Notes\n -----\n See: https://doc.qt.io/qt-5/qgridlayout.html\n '
self._row_stretchs[row] = stretch | def set_row_stretch(self, row, stretch):
'\n Set the factor of row to stretch.\n\n The stretch factor is relative to the other rows in this grid.\n Rows with a higher stretch factor take more of the available space.\n\n Parameters\n ----------\n row: int\n The row number. The first row is number 0.\n stretch: int\n Row stretch factor.\n\n Notes\n -----\n See: https://doc.qt.io/qt-5/qgridlayout.html\n '
self._row_stretchs[row] = stretch<|docstring|>Set the factor of row to stretch.
The stretch factor is relative to the other rows in this grid.
Rows with a higher stretch factor take more of the available space.
Parameters
----------
row: int
The row number. The first row is number 0.
stretch: int
Row stretch factor.
Notes
-----
See: https://doc.qt.io/qt-5/qgridlayout.html<|endoftext|> |
d2ebb9a0e5f41a0b574a74f824a7a536be56a3850cbb696708e32c74d434e06f | def preview_layout(self, show_hidden_areas=False):
'\n Show the layout with placeholder texts using a QWidget.\n '
from spyder.utils.qthelpers import qapplication
app = qapplication()
widget = QWidget()
layout = QGridLayout()
for area in self._areas:
label = QPlainTextEdit()
label.setReadOnly(True)
label.setPlainText('\n'.join(area['plugin_ids']))
if (area['visible'] or show_hidden_areas):
layout.addWidget(label, area['row'], area['column'], area['row_span'], area['col_span'])
if area['default']:
label.setStyleSheet('QPlainTextEdit {background-color: #ff0000;}')
if (not area['visible']):
label.setStyleSheet('QPlainTextEdit {background-color: #eeeeee;}')
for (row, stretch) in self._row_stretchs.items():
layout.setRowStretch(row, stretch)
for (col, stretch) in self._column_stretchs.items():
layout.setColumnStretch(col, stretch)
widget.setLayout(layout)
widget.showMaximized()
app.exec_() | Show the layout with placeholder texts using a QWidget. | spyder/plugins/layout/api.py | preview_layout | mrclary/spyder | 7,956 | python | def preview_layout(self, show_hidden_areas=False):
'\n \n '
from spyder.utils.qthelpers import qapplication
app = qapplication()
widget = QWidget()
layout = QGridLayout()
for area in self._areas:
label = QPlainTextEdit()
label.setReadOnly(True)
label.setPlainText('\n'.join(area['plugin_ids']))
if (area['visible'] or show_hidden_areas):
layout.addWidget(label, area['row'], area['column'], area['row_span'], area['col_span'])
if area['default']:
label.setStyleSheet('QPlainTextEdit {background-color: #ff0000;}')
if (not area['visible']):
label.setStyleSheet('QPlainTextEdit {background-color: #eeeeee;}')
for (row, stretch) in self._row_stretchs.items():
layout.setRowStretch(row, stretch)
for (col, stretch) in self._column_stretchs.items():
layout.setColumnStretch(col, stretch)
widget.setLayout(layout)
widget.showMaximized()
app.exec_() | def preview_layout(self, show_hidden_areas=False):
'\n \n '
from spyder.utils.qthelpers import qapplication
app = qapplication()
widget = QWidget()
layout = QGridLayout()
for area in self._areas:
label = QPlainTextEdit()
label.setReadOnly(True)
label.setPlainText('\n'.join(area['plugin_ids']))
if (area['visible'] or show_hidden_areas):
layout.addWidget(label, area['row'], area['column'], area['row_span'], area['col_span'])
if area['default']:
label.setStyleSheet('QPlainTextEdit {background-color: #ff0000;}')
if (not area['visible']):
label.setStyleSheet('QPlainTextEdit {background-color: #eeeeee;}')
for (row, stretch) in self._row_stretchs.items():
layout.setRowStretch(row, stretch)
for (col, stretch) in self._column_stretchs.items():
layout.setColumnStretch(col, stretch)
widget.setLayout(layout)
widget.showMaximized()
app.exec_()<|docstring|>Show the layout with placeholder texts using a QWidget.<|endoftext|> |
cae21d7ac17fa34d4b45a59f17b47a9fe3c1a31d4c57a5d02c2d1e4bd702fc3d | def set_main_window_layout(self, main_window, dockable_plugins):
'\n Set the given mainwindow layout.\n\n First validate the current layout definition, then clear the mainwindow\n current layout and finally calculate and set the new layout.\n '
all_plugin_ids = []
for plugin in dockable_plugins:
all_plugin_ids.append(plugin.NAME)
plugin.toggle_view(False)
patched_default_area = copy.deepcopy(self._default_area)
unassgined_plugin_ids = list((set(self._plugin_ids) ^ set(all_plugin_ids)))
patched_default_area['plugin_ids'] += unassgined_plugin_ids
patched_default_area['hidden_plugin_ids'] += unassgined_plugin_ids
patched_areas = [(patched_default_area if area['default'] else area) for area in self._areas]
docks = {}
for area in patched_areas:
current_area = area
plugin_id = current_area['plugin_ids'][0]
plugin = main_window.get_plugin(plugin_id, error=False)
if plugin:
dock = plugin.dockwidget
docks[(current_area['row'], current_area['column'])] = dock
dock.area = area['area']
dock.col_span = area['col_span']
dock.row_span = area['row_span']
plugin.toggle_view(area['visible'])
layout_data = []
direction = Qt.Horizontal
for row in range(0, (self._rows + 1)):
dock = None
for col in range(0, (self._cols + 1)):
key = (row, col)
if (key in docks):
if (dock is None):
dock = docks[key]
else:
layout_data.append(((1 / docks[key].area), key, dock, docks[key], direction))
dock = docks[key]
main_window.addDockWidget(Qt.LeftDockWidgetArea, dock, direction)
direction = Qt.Vertical
for col in range(0, (self._cols + 1)):
dock = None
for row in range(0, (self._rows + 1)):
key = (row, col)
if (key in docks):
if (dock is None):
dock = docks[key]
else:
layout_data.append(((1 / docks[key].area), key, dock, docks[key], direction))
dock = docks[key]
sorted_data = sorted(layout_data, key=(lambda x: (x[0], x[1])))
for (area, key, first, second, direction) in sorted_data:
main_window.splitDockWidget(first, second, direction)
plugins_to_tabify = []
for area in patched_areas:
area_visible = area['visible']
base_plugin = main_window.get_plugin(area['plugin_ids'][0], error=False)
if base_plugin:
plugin_ids = area['plugin_ids'][1:]
hidden_plugin_ids = area['hidden_plugin_ids']
for plugin_id in plugin_ids:
current_plugin = main_window.get_plugin(plugin_id, error=False)
if current_plugin:
if ((plugin_id in unassgined_plugin_ids) and hasattr(current_plugin, 'TABIFY')):
plugins_to_tabify.append((current_plugin, base_plugin))
else:
main_window.tabify_plugins(base_plugin, current_plugin)
if (plugin_id not in hidden_plugin_ids):
current_plugin.toggle_view(area_visible)
else:
current_plugin.toggle_view(False)
if area['visible']:
base_plugin.dockwidget.show()
base_plugin.dockwidget.raise_()
for (plugin, base_plugin) in plugins_to_tabify:
if (not main_window.tabify_plugin(plugin)):
main_window.tabify_plugins(base_plugin, plugin)
current_plugin.toggle_view(False)
column_docks = []
column_stretches = []
for (key, dock) in docks.items():
for (col, stretch) in self._column_stretchs.items():
if ((key[1] == col) and (dock.col_span == 1)):
column_docks.append(dock)
column_stretches.append(stretch)
row_docks = []
row_stretches = []
for (key, dock) in docks.items():
for (row, stretch) in self._row_stretchs.items():
if ((key[0] == row) and (dock.row_span == 1)):
row_docks.append(dock)
row_stretches.append(stretch)
main_window.showMaximized()
main_window.resizeDocks(column_docks, column_stretches, Qt.Horizontal)
main_window.resizeDocks(row_docks, row_stretches, Qt.Vertical) | Set the given mainwindow layout.
First validate the current layout definition, then clear the mainwindow
current layout and finally calculate and set the new layout. | spyder/plugins/layout/api.py | set_main_window_layout | mrclary/spyder | 7,956 | python | def set_main_window_layout(self, main_window, dockable_plugins):
'\n Set the given mainwindow layout.\n\n First validate the current layout definition, then clear the mainwindow\n current layout and finally calculate and set the new layout.\n '
all_plugin_ids = []
for plugin in dockable_plugins:
all_plugin_ids.append(plugin.NAME)
plugin.toggle_view(False)
patched_default_area = copy.deepcopy(self._default_area)
unassgined_plugin_ids = list((set(self._plugin_ids) ^ set(all_plugin_ids)))
patched_default_area['plugin_ids'] += unassgined_plugin_ids
patched_default_area['hidden_plugin_ids'] += unassgined_plugin_ids
patched_areas = [(patched_default_area if area['default'] else area) for area in self._areas]
docks = {}
for area in patched_areas:
current_area = area
plugin_id = current_area['plugin_ids'][0]
plugin = main_window.get_plugin(plugin_id, error=False)
if plugin:
dock = plugin.dockwidget
docks[(current_area['row'], current_area['column'])] = dock
dock.area = area['area']
dock.col_span = area['col_span']
dock.row_span = area['row_span']
plugin.toggle_view(area['visible'])
layout_data = []
direction = Qt.Horizontal
for row in range(0, (self._rows + 1)):
dock = None
for col in range(0, (self._cols + 1)):
key = (row, col)
if (key in docks):
if (dock is None):
dock = docks[key]
else:
layout_data.append(((1 / docks[key].area), key, dock, docks[key], direction))
dock = docks[key]
main_window.addDockWidget(Qt.LeftDockWidgetArea, dock, direction)
direction = Qt.Vertical
for col in range(0, (self._cols + 1)):
dock = None
for row in range(0, (self._rows + 1)):
key = (row, col)
if (key in docks):
if (dock is None):
dock = docks[key]
else:
layout_data.append(((1 / docks[key].area), key, dock, docks[key], direction))
dock = docks[key]
sorted_data = sorted(layout_data, key=(lambda x: (x[0], x[1])))
for (area, key, first, second, direction) in sorted_data:
main_window.splitDockWidget(first, second, direction)
plugins_to_tabify = []
for area in patched_areas:
area_visible = area['visible']
base_plugin = main_window.get_plugin(area['plugin_ids'][0], error=False)
if base_plugin:
plugin_ids = area['plugin_ids'][1:]
hidden_plugin_ids = area['hidden_plugin_ids']
for plugin_id in plugin_ids:
current_plugin = main_window.get_plugin(plugin_id, error=False)
if current_plugin:
if ((plugin_id in unassgined_plugin_ids) and hasattr(current_plugin, 'TABIFY')):
plugins_to_tabify.append((current_plugin, base_plugin))
else:
main_window.tabify_plugins(base_plugin, current_plugin)
if (plugin_id not in hidden_plugin_ids):
current_plugin.toggle_view(area_visible)
else:
current_plugin.toggle_view(False)
if area['visible']:
base_plugin.dockwidget.show()
base_plugin.dockwidget.raise_()
for (plugin, base_plugin) in plugins_to_tabify:
if (not main_window.tabify_plugin(plugin)):
main_window.tabify_plugins(base_plugin, plugin)
current_plugin.toggle_view(False)
column_docks = []
column_stretches = []
for (key, dock) in docks.items():
for (col, stretch) in self._column_stretchs.items():
if ((key[1] == col) and (dock.col_span == 1)):
column_docks.append(dock)
column_stretches.append(stretch)
row_docks = []
row_stretches = []
for (key, dock) in docks.items():
for (row, stretch) in self._row_stretchs.items():
if ((key[0] == row) and (dock.row_span == 1)):
row_docks.append(dock)
row_stretches.append(stretch)
main_window.showMaximized()
main_window.resizeDocks(column_docks, column_stretches, Qt.Horizontal)
main_window.resizeDocks(row_docks, row_stretches, Qt.Vertical) | def set_main_window_layout(self, main_window, dockable_plugins):
'\n Set the given mainwindow layout.\n\n First validate the current layout definition, then clear the mainwindow\n current layout and finally calculate and set the new layout.\n '
all_plugin_ids = []
for plugin in dockable_plugins:
all_plugin_ids.append(plugin.NAME)
plugin.toggle_view(False)
patched_default_area = copy.deepcopy(self._default_area)
unassgined_plugin_ids = list((set(self._plugin_ids) ^ set(all_plugin_ids)))
patched_default_area['plugin_ids'] += unassgined_plugin_ids
patched_default_area['hidden_plugin_ids'] += unassgined_plugin_ids
patched_areas = [(patched_default_area if area['default'] else area) for area in self._areas]
docks = {}
for area in patched_areas:
current_area = area
plugin_id = current_area['plugin_ids'][0]
plugin = main_window.get_plugin(plugin_id, error=False)
if plugin:
dock = plugin.dockwidget
docks[(current_area['row'], current_area['column'])] = dock
dock.area = area['area']
dock.col_span = area['col_span']
dock.row_span = area['row_span']
plugin.toggle_view(area['visible'])
layout_data = []
direction = Qt.Horizontal
for row in range(0, (self._rows + 1)):
dock = None
for col in range(0, (self._cols + 1)):
key = (row, col)
if (key in docks):
if (dock is None):
dock = docks[key]
else:
layout_data.append(((1 / docks[key].area), key, dock, docks[key], direction))
dock = docks[key]
main_window.addDockWidget(Qt.LeftDockWidgetArea, dock, direction)
direction = Qt.Vertical
for col in range(0, (self._cols + 1)):
dock = None
for row in range(0, (self._rows + 1)):
key = (row, col)
if (key in docks):
if (dock is None):
dock = docks[key]
else:
layout_data.append(((1 / docks[key].area), key, dock, docks[key], direction))
dock = docks[key]
sorted_data = sorted(layout_data, key=(lambda x: (x[0], x[1])))
for (area, key, first, second, direction) in sorted_data:
main_window.splitDockWidget(first, second, direction)
plugins_to_tabify = []
for area in patched_areas:
area_visible = area['visible']
base_plugin = main_window.get_plugin(area['plugin_ids'][0], error=False)
if base_plugin:
plugin_ids = area['plugin_ids'][1:]
hidden_plugin_ids = area['hidden_plugin_ids']
for plugin_id in plugin_ids:
current_plugin = main_window.get_plugin(plugin_id, error=False)
if current_plugin:
if ((plugin_id in unassgined_plugin_ids) and hasattr(current_plugin, 'TABIFY')):
plugins_to_tabify.append((current_plugin, base_plugin))
else:
main_window.tabify_plugins(base_plugin, current_plugin)
if (plugin_id not in hidden_plugin_ids):
current_plugin.toggle_view(area_visible)
else:
current_plugin.toggle_view(False)
if area['visible']:
base_plugin.dockwidget.show()
base_plugin.dockwidget.raise_()
for (plugin, base_plugin) in plugins_to_tabify:
if (not main_window.tabify_plugin(plugin)):
main_window.tabify_plugins(base_plugin, plugin)
current_plugin.toggle_view(False)
column_docks = []
column_stretches = []
for (key, dock) in docks.items():
for (col, stretch) in self._column_stretchs.items():
if ((key[1] == col) and (dock.col_span == 1)):
column_docks.append(dock)
column_stretches.append(stretch)
row_docks = []
row_stretches = []
for (key, dock) in docks.items():
for (row, stretch) in self._row_stretchs.items():
if ((key[0] == row) and (dock.row_span == 1)):
row_docks.append(dock)
row_stretches.append(stretch)
main_window.showMaximized()
main_window.resizeDocks(column_docks, column_stretches, Qt.Horizontal)
main_window.resizeDocks(row_docks, row_stretches, Qt.Vertical)<|docstring|>Set the given mainwindow layout.
First validate the current layout definition, then clear the mainwindow
current layout and finally calculate and set the new layout.<|endoftext|> |
270362003de44c4a2c9a0fb76bdbc5b5e2e2358ed101bbf70f2cd9fc053182b7 | def load_vgg(sess, vgg_path):
'\n Load Pretrained VGG Model into TensorFlow.\n :param sess: TensorFlow Session\n :param vgg_path: Path to vgg folder, containing "variables/" and "saved_model.pb"\n :return: Tuple of Tensors from VGG model (image_input, keep_prob, layer3_out, layer4_out, layer7_out)\n '
vgg_tag = 'vgg16'
vgg_input_tensor_name = 'image_input:0'
vgg_keep_prob_tensor_name = 'keep_prob:0'
vgg_layer3_out_tensor_name = 'layer3_out:0'
vgg_layer4_out_tensor_name = 'layer4_out:0'
vgg_layer7_out_tensor_name = 'layer7_out:0'
tf.save_model.loader.load(sess, [vgg_tag], vgg_path)
graph = tf.get_default_graph()
image_input = graph.get_tensor_by_name(vgg_input_tensor_name)
keep_prob = graph.get_tensor_by_name(vgg_keep_prob_tensor_name)
layer3_out = graph.get_tensor_by_name(vgg_layer3_out_tensor_name)
layer4_out = graph.get_tensor_by_name(vgg_layer4_out_tensor_name)
layer7_out = graph.get_tensor_by_name(vgg_layer7_out_tensor_name)
return (image_input, keep_prob, layer3_out, layer4_out, layer7_out) | Load Pretrained VGG Model into TensorFlow.
:param sess: TensorFlow Session
:param vgg_path: Path to vgg folder, containing "variables/" and "saved_model.pb"
:return: Tuple of Tensors from VGG model (image_input, keep_prob, layer3_out, layer4_out, layer7_out) | main.py | load_vgg | shjzhao/CarND-Semantic-Segmentation | 0 | python | def load_vgg(sess, vgg_path):
'\n Load Pretrained VGG Model into TensorFlow.\n :param sess: TensorFlow Session\n :param vgg_path: Path to vgg folder, containing "variables/" and "saved_model.pb"\n :return: Tuple of Tensors from VGG model (image_input, keep_prob, layer3_out, layer4_out, layer7_out)\n '
vgg_tag = 'vgg16'
vgg_input_tensor_name = 'image_input:0'
vgg_keep_prob_tensor_name = 'keep_prob:0'
vgg_layer3_out_tensor_name = 'layer3_out:0'
vgg_layer4_out_tensor_name = 'layer4_out:0'
vgg_layer7_out_tensor_name = 'layer7_out:0'
tf.save_model.loader.load(sess, [vgg_tag], vgg_path)
graph = tf.get_default_graph()
image_input = graph.get_tensor_by_name(vgg_input_tensor_name)
keep_prob = graph.get_tensor_by_name(vgg_keep_prob_tensor_name)
layer3_out = graph.get_tensor_by_name(vgg_layer3_out_tensor_name)
layer4_out = graph.get_tensor_by_name(vgg_layer4_out_tensor_name)
layer7_out = graph.get_tensor_by_name(vgg_layer7_out_tensor_name)
return (image_input, keep_prob, layer3_out, layer4_out, layer7_out) | def load_vgg(sess, vgg_path):
'\n Load Pretrained VGG Model into TensorFlow.\n :param sess: TensorFlow Session\n :param vgg_path: Path to vgg folder, containing "variables/" and "saved_model.pb"\n :return: Tuple of Tensors from VGG model (image_input, keep_prob, layer3_out, layer4_out, layer7_out)\n '
vgg_tag = 'vgg16'
vgg_input_tensor_name = 'image_input:0'
vgg_keep_prob_tensor_name = 'keep_prob:0'
vgg_layer3_out_tensor_name = 'layer3_out:0'
vgg_layer4_out_tensor_name = 'layer4_out:0'
vgg_layer7_out_tensor_name = 'layer7_out:0'
tf.save_model.loader.load(sess, [vgg_tag], vgg_path)
graph = tf.get_default_graph()
image_input = graph.get_tensor_by_name(vgg_input_tensor_name)
keep_prob = graph.get_tensor_by_name(vgg_keep_prob_tensor_name)
layer3_out = graph.get_tensor_by_name(vgg_layer3_out_tensor_name)
layer4_out = graph.get_tensor_by_name(vgg_layer4_out_tensor_name)
layer7_out = graph.get_tensor_by_name(vgg_layer7_out_tensor_name)
return (image_input, keep_prob, layer3_out, layer4_out, layer7_out)<|docstring|>Load Pretrained VGG Model into TensorFlow.
:param sess: TensorFlow Session
:param vgg_path: Path to vgg folder, containing "variables/" and "saved_model.pb"
:return: Tuple of Tensors from VGG model (image_input, keep_prob, layer3_out, layer4_out, layer7_out)<|endoftext|> |
26d87776437348743cd8553866b79740b4df912e99e3f45be1883e8948523895 | def layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes):
'\n Create the layers for a fully convolutional network. Build skip-layers using the vgg layers.\n :param vgg_layer3_out: TF Tensor for VGG Layer 3 output\n :param vgg_layer4_out: TF Tensor for VGG Layer 4 output\n :param vgg_layer7_out: TF Tensor for VGG Layer 7 output\n :param num_classes: Number of classes to classify\n :return: The Tensor for the last layer of output\n '
vgg_layer7_conv = tf.layers.conv2d(vgg_layer7_out, num_classes, 1, padding='same', kernel_regularizer=tf.contrib.layers.l2_regularizer(0.001))
vgg_layer7_upsample = tf.layers.conv2d_transpose(vgg_layer7_conv, num_classes, 4, strides=(2, 2), padding='same', kernel_initializer=tf.random_normal_initializer(stddev=0.01), kernel_regularizer=tf.contrib.layers.l2_regularizer(0.001))
vgg_layer4_conv = tf.layers.conv2d(vgg_layer4_out, num_classes, 1, padding='same', kernel_initializer=tf.random_normal_initializer(stddev=0.01), kernel_regularizer=tf.contrib.layers.l2_regularizer(0.001))
layer4 = tf.add(vgg_layer7_upsample, vgg_layer4_conv)
layer3 = tf.layers.conv2d_transpose(layer4, num_classes, 4, strides=(2, 2), padding='same', kernel_initializer=tf.random_normal_initializer(stddev=0.01), kernel_regularizer=tf.contrib.layers.l2_regularizer(0.001))
vgg_layer3_conv = tf.layers.conv2d(vgg_layer3_out, num_classes, 1, padding='same', kernel_initializer=tf.random_normal_initializer(stddev=0.01), kernel_regularizer=tf.contrib.layers.l2_regularizer(0.001))
layer3 = tf.add(layer3, vgg_layer3_conv)
output = tf.layers.conv2d_transpose(layer3, num_classes, 16, strides=(8, 8), padding='same', kernel_initializer=tf.random_normal_initializer(stddev=0.01), kernel_regularizer=tf.contrib.layers.l2_regularizer(0.001))
return output | Create the layers for a fully convolutional network. Build skip-layers using the vgg layers.
:param vgg_layer3_out: TF Tensor for VGG Layer 3 output
:param vgg_layer4_out: TF Tensor for VGG Layer 4 output
:param vgg_layer7_out: TF Tensor for VGG Layer 7 output
:param num_classes: Number of classes to classify
:return: The Tensor for the last layer of output | main.py | layers | shjzhao/CarND-Semantic-Segmentation | 0 | python | def layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes):
'\n Create the layers for a fully convolutional network. Build skip-layers using the vgg layers.\n :param vgg_layer3_out: TF Tensor for VGG Layer 3 output\n :param vgg_layer4_out: TF Tensor for VGG Layer 4 output\n :param vgg_layer7_out: TF Tensor for VGG Layer 7 output\n :param num_classes: Number of classes to classify\n :return: The Tensor for the last layer of output\n '
vgg_layer7_conv = tf.layers.conv2d(vgg_layer7_out, num_classes, 1, padding='same', kernel_regularizer=tf.contrib.layers.l2_regularizer(0.001))
vgg_layer7_upsample = tf.layers.conv2d_transpose(vgg_layer7_conv, num_classes, 4, strides=(2, 2), padding='same', kernel_initializer=tf.random_normal_initializer(stddev=0.01), kernel_regularizer=tf.contrib.layers.l2_regularizer(0.001))
vgg_layer4_conv = tf.layers.conv2d(vgg_layer4_out, num_classes, 1, padding='same', kernel_initializer=tf.random_normal_initializer(stddev=0.01), kernel_regularizer=tf.contrib.layers.l2_regularizer(0.001))
layer4 = tf.add(vgg_layer7_upsample, vgg_layer4_conv)
layer3 = tf.layers.conv2d_transpose(layer4, num_classes, 4, strides=(2, 2), padding='same', kernel_initializer=tf.random_normal_initializer(stddev=0.01), kernel_regularizer=tf.contrib.layers.l2_regularizer(0.001))
vgg_layer3_conv = tf.layers.conv2d(vgg_layer3_out, num_classes, 1, padding='same', kernel_initializer=tf.random_normal_initializer(stddev=0.01), kernel_regularizer=tf.contrib.layers.l2_regularizer(0.001))
layer3 = tf.add(layer3, vgg_layer3_conv)
output = tf.layers.conv2d_transpose(layer3, num_classes, 16, strides=(8, 8), padding='same', kernel_initializer=tf.random_normal_initializer(stddev=0.01), kernel_regularizer=tf.contrib.layers.l2_regularizer(0.001))
return output | def layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes):
'\n Create the layers for a fully convolutional network. Build skip-layers using the vgg layers.\n :param vgg_layer3_out: TF Tensor for VGG Layer 3 output\n :param vgg_layer4_out: TF Tensor for VGG Layer 4 output\n :param vgg_layer7_out: TF Tensor for VGG Layer 7 output\n :param num_classes: Number of classes to classify\n :return: The Tensor for the last layer of output\n '
vgg_layer7_conv = tf.layers.conv2d(vgg_layer7_out, num_classes, 1, padding='same', kernel_regularizer=tf.contrib.layers.l2_regularizer(0.001))
vgg_layer7_upsample = tf.layers.conv2d_transpose(vgg_layer7_conv, num_classes, 4, strides=(2, 2), padding='same', kernel_initializer=tf.random_normal_initializer(stddev=0.01), kernel_regularizer=tf.contrib.layers.l2_regularizer(0.001))
vgg_layer4_conv = tf.layers.conv2d(vgg_layer4_out, num_classes, 1, padding='same', kernel_initializer=tf.random_normal_initializer(stddev=0.01), kernel_regularizer=tf.contrib.layers.l2_regularizer(0.001))
layer4 = tf.add(vgg_layer7_upsample, vgg_layer4_conv)
layer3 = tf.layers.conv2d_transpose(layer4, num_classes, 4, strides=(2, 2), padding='same', kernel_initializer=tf.random_normal_initializer(stddev=0.01), kernel_regularizer=tf.contrib.layers.l2_regularizer(0.001))
vgg_layer3_conv = tf.layers.conv2d(vgg_layer3_out, num_classes, 1, padding='same', kernel_initializer=tf.random_normal_initializer(stddev=0.01), kernel_regularizer=tf.contrib.layers.l2_regularizer(0.001))
layer3 = tf.add(layer3, vgg_layer3_conv)
output = tf.layers.conv2d_transpose(layer3, num_classes, 16, strides=(8, 8), padding='same', kernel_initializer=tf.random_normal_initializer(stddev=0.01), kernel_regularizer=tf.contrib.layers.l2_regularizer(0.001))
return output<|docstring|>Create the layers for a fully convolutional network. Build skip-layers using the vgg layers.
:param vgg_layer3_out: TF Tensor for VGG Layer 3 output
:param vgg_layer4_out: TF Tensor for VGG Layer 4 output
:param vgg_layer7_out: TF Tensor for VGG Layer 7 output
:param num_classes: Number of classes to classify
:return: The Tensor for the last layer of output<|endoftext|> |
d3a03ff600f4c40c0ab5aa87d6147004db40e073bb2926d3d7fc4018bd7f3e36 | def optimize(nn_last_layer, correct_label, learning_rate, num_classes):
'\n Build the TensorFLow loss and optimizer operations.\n :param nn_last_layer: TF Tensor of the last layer in the neural network\n :param correct_label: TF Placeholder for the correct label image\n :param learning_rate: TF Placeholder for the learning rate\n :param num_classes: Number of classes to classify\n :return: Tuple of (logits, train_op, cross_entropy_loss)\n '
logit = tf.reshape(nn_last_layer, ((- 1), num_classes))
label = tf.reshape(correct_label, ((- 1), num_classes))
cross_entropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logit, labels=label))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(cross_entropy_loss)
return (logit, train_op, cross_entropy_loss) | Build the TensorFLow loss and optimizer operations.
:param nn_last_layer: TF Tensor of the last layer in the neural network
:param correct_label: TF Placeholder for the correct label image
:param learning_rate: TF Placeholder for the learning rate
:param num_classes: Number of classes to classify
:return: Tuple of (logits, train_op, cross_entropy_loss) | main.py | optimize | shjzhao/CarND-Semantic-Segmentation | 0 | python | def optimize(nn_last_layer, correct_label, learning_rate, num_classes):
'\n Build the TensorFLow loss and optimizer operations.\n :param nn_last_layer: TF Tensor of the last layer in the neural network\n :param correct_label: TF Placeholder for the correct label image\n :param learning_rate: TF Placeholder for the learning rate\n :param num_classes: Number of classes to classify\n :return: Tuple of (logits, train_op, cross_entropy_loss)\n '
logit = tf.reshape(nn_last_layer, ((- 1), num_classes))
label = tf.reshape(correct_label, ((- 1), num_classes))
cross_entropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logit, labels=label))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(cross_entropy_loss)
return (logit, train_op, cross_entropy_loss) | def optimize(nn_last_layer, correct_label, learning_rate, num_classes):
'\n Build the TensorFLow loss and optimizer operations.\n :param nn_last_layer: TF Tensor of the last layer in the neural network\n :param correct_label: TF Placeholder for the correct label image\n :param learning_rate: TF Placeholder for the learning rate\n :param num_classes: Number of classes to classify\n :return: Tuple of (logits, train_op, cross_entropy_loss)\n '
logit = tf.reshape(nn_last_layer, ((- 1), num_classes))
label = tf.reshape(correct_label, ((- 1), num_classes))
cross_entropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logit, labels=label))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(cross_entropy_loss)
return (logit, train_op, cross_entropy_loss)<|docstring|>Build the TensorFLow loss and optimizer operations.
:param nn_last_layer: TF Tensor of the last layer in the neural network
:param correct_label: TF Placeholder for the correct label image
:param learning_rate: TF Placeholder for the learning rate
:param num_classes: Number of classes to classify
:return: Tuple of (logits, train_op, cross_entropy_loss)<|endoftext|> |
b2fc68ff07b9e01714b8b6fd5807532c8947d4f7e5b85b00d0035f6aa6b600d7 | def train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_image, correct_label, keep_prob, learning_rate):
'\n Train neural network and print out the loss during training.\n :param sess: TF Session\n :param epochs: Number of epochs\n :param batch_size: Batch size\n :param get_batches_fn: Function to get batches of training data. Call using get_batches_fn(batch_size)\n :param train_op: TF Operation to train the neural network\n :param cross_entropy_loss: TF Tensor for the amount of loss\n :param input_image: TF Placeholder for input images\n :param correct_label: TF Placeholder for label images\n :param keep_prob: TF Placeholder for dropout keep probability\n :param learning_rate: TF Placeholder for learning rate\n '
sess.run(tf.global_variables_initializer())
tf.logging.info('Training begin...')
for i in range(epochs):
tf.logging.info('EPOCH {} training ...'.format((i + 1)))
for (image, label) in get_batches_fn(batch_size):
(_, loss) = sess.run([train_op, cross_entropy_loss], feed_dict={input_image: image, correct_label: label, keep_prob: 0.5, learning_rate: 0.001})
tf.logging.info('Loss: = {:.3f}'.format(loss)) | Train neural network and print out the loss during training.
:param sess: TF Session
:param epochs: Number of epochs
:param batch_size: Batch size
:param get_batches_fn: Function to get batches of training data. Call using get_batches_fn(batch_size)
:param train_op: TF Operation to train the neural network
:param cross_entropy_loss: TF Tensor for the amount of loss
:param input_image: TF Placeholder for input images
:param correct_label: TF Placeholder for label images
:param keep_prob: TF Placeholder for dropout keep probability
:param learning_rate: TF Placeholder for learning rate | main.py | train_nn | shjzhao/CarND-Semantic-Segmentation | 0 | python | def train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_image, correct_label, keep_prob, learning_rate):
'\n Train neural network and print out the loss during training.\n :param sess: TF Session\n :param epochs: Number of epochs\n :param batch_size: Batch size\n :param get_batches_fn: Function to get batches of training data. Call using get_batches_fn(batch_size)\n :param train_op: TF Operation to train the neural network\n :param cross_entropy_loss: TF Tensor for the amount of loss\n :param input_image: TF Placeholder for input images\n :param correct_label: TF Placeholder for label images\n :param keep_prob: TF Placeholder for dropout keep probability\n :param learning_rate: TF Placeholder for learning rate\n '
sess.run(tf.global_variables_initializer())
tf.logging.info('Training begin...')
for i in range(epochs):
tf.logging.info('EPOCH {} training ...'.format((i + 1)))
for (image, label) in get_batches_fn(batch_size):
(_, loss) = sess.run([train_op, cross_entropy_loss], feed_dict={input_image: image, correct_label: label, keep_prob: 0.5, learning_rate: 0.001})
tf.logging.info('Loss: = {:.3f}'.format(loss)) | def train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_image, correct_label, keep_prob, learning_rate):
'\n Train neural network and print out the loss during training.\n :param sess: TF Session\n :param epochs: Number of epochs\n :param batch_size: Batch size\n :param get_batches_fn: Function to get batches of training data. Call using get_batches_fn(batch_size)\n :param train_op: TF Operation to train the neural network\n :param cross_entropy_loss: TF Tensor for the amount of loss\n :param input_image: TF Placeholder for input images\n :param correct_label: TF Placeholder for label images\n :param keep_prob: TF Placeholder for dropout keep probability\n :param learning_rate: TF Placeholder for learning rate\n '
sess.run(tf.global_variables_initializer())
tf.logging.info('Training begin...')
for i in range(epochs):
tf.logging.info('EPOCH {} training ...'.format((i + 1)))
for (image, label) in get_batches_fn(batch_size):
(_, loss) = sess.run([train_op, cross_entropy_loss], feed_dict={input_image: image, correct_label: label, keep_prob: 0.5, learning_rate: 0.001})
tf.logging.info('Loss: = {:.3f}'.format(loss))<|docstring|>Train neural network and print out the loss during training.
:param sess: TF Session
:param epochs: Number of epochs
:param batch_size: Batch size
:param get_batches_fn: Function to get batches of training data. Call using get_batches_fn(batch_size)
:param train_op: TF Operation to train the neural network
:param cross_entropy_loss: TF Tensor for the amount of loss
:param input_image: TF Placeholder for input images
:param correct_label: TF Placeholder for label images
:param keep_prob: TF Placeholder for dropout keep probability
:param learning_rate: TF Placeholder for learning rate<|endoftext|> |
e2929b4a8e23faee04357a1951bf0d8311cf6f0eefaaa7d686e12436bab8fbd8 | def batch_callfunction_decode(endpoint, datalist, outtypes, height=None, needidx=False):
'\n datalist: [contract_address, funcname(arg_type_list), encoded_arguments]\n outtypes: list of [return values\' type list]\n Example:\n data = batch_callfunction_decode(H, [[addr, "symbol()", ""] for addr in addrs], [["string"]])\n Depends on eth_abi package\n '
import eth_abi
if (not height):
height = 'latest'
if (not isinstance(outtypes[0], list)):
outtypes = ([outtypes] * len(datalist))
data = batch_callfunction(endpoint, datalist, height)
res = []
for (i, item) in data:
if (not item):
res.append((i, None))
else:
if (outtypes[i] == ['hex']):
d = int(item, 16)
else:
d = eth_abi.decode_abi(outtypes[i], bd(item))
if (len(d) == 1):
d = d[0]
res.append((i, d))
if needidx:
return res
else:
return [i[1] for i in res] | datalist: [contract_address, funcname(arg_type_list), encoded_arguments]
outtypes: list of [return values' type list]
Example:
data = batch_callfunction_decode(H, [[addr, "symbol()", ""] for addr in addrs], [["string"]])
Depends on eth_abi package | base.py | batch_callfunction_decode | zjuchenyuan/whalerank | 8 | python | def batch_callfunction_decode(endpoint, datalist, outtypes, height=None, needidx=False):
'\n datalist: [contract_address, funcname(arg_type_list), encoded_arguments]\n outtypes: list of [return values\' type list]\n Example:\n data = batch_callfunction_decode(H, [[addr, "symbol()", ] for addr in addrs], [["string"]])\n Depends on eth_abi package\n '
import eth_abi
if (not height):
height = 'latest'
if (not isinstance(outtypes[0], list)):
outtypes = ([outtypes] * len(datalist))
data = batch_callfunction(endpoint, datalist, height)
res = []
for (i, item) in data:
if (not item):
res.append((i, None))
else:
if (outtypes[i] == ['hex']):
d = int(item, 16)
else:
d = eth_abi.decode_abi(outtypes[i], bd(item))
if (len(d) == 1):
d = d[0]
res.append((i, d))
if needidx:
return res
else:
return [i[1] for i in res] | def batch_callfunction_decode(endpoint, datalist, outtypes, height=None, needidx=False):
'\n datalist: [contract_address, funcname(arg_type_list), encoded_arguments]\n outtypes: list of [return values\' type list]\n Example:\n data = batch_callfunction_decode(H, [[addr, "symbol()", ] for addr in addrs], [["string"]])\n Depends on eth_abi package\n '
import eth_abi
if (not height):
height = 'latest'
if (not isinstance(outtypes[0], list)):
outtypes = ([outtypes] * len(datalist))
data = batch_callfunction(endpoint, datalist, height)
res = []
for (i, item) in data:
if (not item):
res.append((i, None))
else:
if (outtypes[i] == ['hex']):
d = int(item, 16)
else:
d = eth_abi.decode_abi(outtypes[i], bd(item))
if (len(d) == 1):
d = d[0]
res.append((i, d))
if needidx:
return res
else:
return [i[1] for i in res]<|docstring|>datalist: [contract_address, funcname(arg_type_list), encoded_arguments]
outtypes: list of [return values' type list]
Example:
data = batch_callfunction_decode(H, [[addr, "symbol()", ""] for addr in addrs], [["string"]])
Depends on eth_abi package<|endoftext|> |
576aa94f48b79a8abc854078ec16f2a571285fb4b06e82745cf3cbe3b03d22f8 | def create_evaluate_ops(task_prefix: str, data_format: str, input_paths: List[str], prediction_path: str, metric_fn_and_keys: Tuple[(T, Iterable[str])], validate_fn: T, batch_prediction_job_id: Optional[str]=None, region: Optional[str]=None, project_id: Optional[str]=None, dataflow_options: Optional[Dict]=None, model_uri: Optional[str]=None, model_name: Optional[str]=None, version_name: Optional[str]=None, dag: Optional[DAG]=None, py_interpreter='python3'):
'\n Creates Operators needed for model evaluation and returns.\n\n It gets prediction over inputs via Cloud ML Engine BatchPrediction API by\n calling MLEngineBatchPredictionOperator, then summarize and validate\n the result via Cloud Dataflow using DataFlowPythonOperator.\n\n For details and pricing about Batch prediction, please refer to the website\n https://cloud.google.com/ml-engine/docs/how-tos/batch-predict\n and for Cloud Dataflow, https://cloud.google.com/dataflow/docs/\n\n It returns three chained operators for prediction, summary, and validation,\n named as ``<prefix>-prediction``, ``<prefix>-summary``, and ``<prefix>-validation``,\n respectively.\n (``<prefix>`` should contain only alphanumeric characters or hyphen.)\n\n The upstream and downstream can be set accordingly like:\n\n .. code-block:: python\n\n pred, _, val = create_evaluate_ops(...)\n pred.set_upstream(upstream_op)\n ...\n downstream_op.set_upstream(val)\n\n Callers will provide two python callables, metric_fn and validate_fn, in\n order to customize the evaluation behavior as they wish.\n\n - metric_fn receives a dictionary per instance derived from json in the\n batch prediction result. The keys might vary depending on the model.\n It should return a tuple of metrics.\n - validation_fn receives a dictionary of the averaged metrics that metric_fn\n generated over all instances.\n The key/value of the dictionary matches to what\'s given by\n metric_fn_and_keys arg.\n The dictionary contains an additional metric, \'count\' to represent the\n total number of instances received for evaluation.\n The function would raise an exception to mark the task as failed, in a\n case the validation result is not okay to proceed (i.e. to set the trained\n version as default).\n\n Typical examples are like this:\n\n .. code-block:: python\n\n def get_metric_fn_and_keys():\n import math # imports should be outside of the metric_fn below.\n\n def error_and_squared_error(inst):\n label = float(inst["input_label"])\n classes = float(inst["classes"]) # 0 or 1\n err = abs(classes - label)\n squared_err = math.pow(classes - label, 2)\n return (err, squared_err) # returns a tuple.\n\n return error_and_squared_error, ["err", "mse"] # key order must match.\n\n\n def validate_err_and_count(summary):\n if summary["err"] > 0.2:\n raise ValueError("Too high err>0.2; summary=%s" % summary)\n if summary["mse"] > 0.05:\n raise ValueError("Too high mse>0.05; summary=%s" % summary)\n if summary["count"] < 1000:\n raise ValueError("Too few instances<1000; summary=%s" % summary)\n return summary\n\n For the details on the other BatchPrediction-related arguments (project_id,\n job_id, region, data_format, input_paths, prediction_path, model_uri),\n please refer to MLEngineBatchPredictionOperator too.\n\n :param task_prefix: a prefix for the tasks. Only alphanumeric characters and\n hyphen are allowed (no underscores), since this will be used as dataflow\n job name, which doesn\'t allow other characters.\n :type task_prefix: str\n\n :param data_format: either of \'TEXT\', \'TF_RECORD\', \'TF_RECORD_GZIP\'\n :type data_format: str\n\n :param input_paths: a list of input paths to be sent to BatchPrediction.\n :type input_paths: list[str]\n\n :param prediction_path: GCS path to put the prediction results in.\n :type prediction_path: str\n\n :param metric_fn_and_keys: a tuple of metric_fn and metric_keys:\n\n - metric_fn is a function that accepts a dictionary (for an instance),\n and returns a tuple of metric(s) that it calculates.\n\n - metric_keys is a list of strings to denote the key of each metric.\n :type metric_fn_and_keys: tuple of a function and a list[str]\n\n :param validate_fn: a function to validate whether the averaged metric(s) is\n good enough to push the model.\n :type validate_fn: function\n\n :param batch_prediction_job_id: the id to use for the Cloud ML Batch\n prediction job. Passed directly to the MLEngineBatchPredictionOperator as\n the job_id argument.\n :type batch_prediction_job_id: str\n\n :param project_id: the Google Cloud project id in which to execute\n Cloud ML Batch Prediction and Dataflow jobs. If None, then the `dag`\'s\n `default_args[\'project_id\']` will be used.\n :type project_id: str\n\n :param region: the Google Cloud region in which to execute Cloud ML\n Batch Prediction and Dataflow jobs. If None, then the `dag`\'s\n `default_args[\'region\']` will be used.\n :type region: str\n\n :param dataflow_options: options to run Dataflow jobs. If None, then the\n `dag`\'s `default_args[\'dataflow_default_options\']` will be used.\n :type dataflow_options: dictionary\n\n :param model_uri: GCS path of the model exported by Tensorflow using\n ``tensorflow.estimator.export_savedmodel()``. It cannot be used with\n model_name or version_name below. See MLEngineBatchPredictionOperator for\n more detail.\n :type model_uri: str\n\n :param model_name: Used to indicate a model to use for prediction. Can be\n used in combination with version_name, but cannot be used together with\n model_uri. See MLEngineBatchPredictionOperator for more detail. If None,\n then the `dag`\'s `default_args[\'model_name\']` will be used.\n :type model_name: str\n\n :param version_name: Used to indicate a model version to use for prediction,\n in combination with model_name. Cannot be used together with model_uri.\n See MLEngineBatchPredictionOperator for more detail. If None, then the\n `dag`\'s `default_args[\'version_name\']` will be used.\n :type version_name: str\n\n :param dag: The `DAG` to use for all Operators.\n :type dag: airflow.models.DAG\n\n :param py_interpreter: Python version of the beam pipeline.\n If None, this defaults to the python3.\n To track python versions supported by beam and related\n issues check: https://issues.apache.org/jira/browse/BEAM-1251\n :type py_interpreter: str\n\n :returns: a tuple of three operators, (prediction, summary, validation)\n :rtype: tuple(DataFlowPythonOperator, DataFlowPythonOperator,\n PythonOperator)\n '
batch_prediction_job_id = (batch_prediction_job_id or '')
dataflow_options = (dataflow_options or {})
region = (region or '')
if (not re.match('^[a-zA-Z][-A-Za-z0-9]*$', task_prefix)):
raise AirflowException(('Malformed task_id for DataFlowPythonOperator (only alphanumeric and hyphens are allowed but got: ' + task_prefix))
(metric_fn, metric_keys) = metric_fn_and_keys
if (not callable(metric_fn)):
raise AirflowException('`metric_fn` param must be callable.')
if (not callable(validate_fn)):
raise AirflowException('`validate_fn` param must be callable.')
if ((dag is not None) and (dag.default_args is not None)):
default_args = dag.default_args
project_id = (project_id or default_args.get('project_id'))
region = (region or default_args['region'])
model_name = (model_name or default_args.get('model_name'))
version_name = (version_name or default_args.get('version_name'))
dataflow_options = (dataflow_options or default_args.get('dataflow_default_options'))
evaluate_prediction = MLEngineStartBatchPredictionJobOperator(task_id=(task_prefix + '-prediction'), project_id=project_id, job_id=batch_prediction_job_id, region=region, data_format=data_format, input_paths=input_paths, output_path=prediction_path, uri=model_uri, model_name=model_name, version_name=version_name, dag=dag)
metric_fn_encoded = base64.b64encode(dill.dumps(metric_fn, recurse=True)).decode()
evaluate_summary = BeamRunPythonPipelineOperator(task_id=(task_prefix + '-summary'), py_file=os.path.join(os.path.dirname(__file__), 'mlengine_prediction_summary.py'), default_pipeline_options=dataflow_options, pipeline_options={'prediction_path': prediction_path, 'metric_fn_encoded': metric_fn_encoded, 'metric_keys': ','.join(metric_keys)}, py_interpreter=py_interpreter, py_requirements=['apache-beam[gcp]>=2.14.0'], dag=dag)
evaluate_summary.set_upstream(evaluate_prediction)
def apply_validate_fn(*args, templates_dict, **kwargs):
prediction_path = templates_dict['prediction_path']
(scheme, bucket, obj, _, _) = urlsplit(prediction_path)
if ((scheme != 'gs') or (not bucket) or (not obj)):
raise ValueError(f'Wrong format prediction_path: {prediction_path}')
summary = os.path.join(obj.strip('/'), 'prediction.summary.json')
gcs_hook = GCSHook()
summary = json.loads(gcs_hook.download(bucket, summary))
return validate_fn(summary)
evaluate_validation = PythonOperator(task_id=(task_prefix + '-validation'), python_callable=apply_validate_fn, templates_dict={'prediction_path': prediction_path}, dag=dag)
evaluate_validation.set_upstream(evaluate_summary)
return (evaluate_prediction, evaluate_summary, evaluate_validation) | Creates Operators needed for model evaluation and returns.
It gets prediction over inputs via Cloud ML Engine BatchPrediction API by
calling MLEngineBatchPredictionOperator, then summarize and validate
the result via Cloud Dataflow using DataFlowPythonOperator.
For details and pricing about Batch prediction, please refer to the website
https://cloud.google.com/ml-engine/docs/how-tos/batch-predict
and for Cloud Dataflow, https://cloud.google.com/dataflow/docs/
It returns three chained operators for prediction, summary, and validation,
named as ``<prefix>-prediction``, ``<prefix>-summary``, and ``<prefix>-validation``,
respectively.
(``<prefix>`` should contain only alphanumeric characters or hyphen.)
The upstream and downstream can be set accordingly like:
.. code-block:: python
pred, _, val = create_evaluate_ops(...)
pred.set_upstream(upstream_op)
...
downstream_op.set_upstream(val)
Callers will provide two python callables, metric_fn and validate_fn, in
order to customize the evaluation behavior as they wish.
- metric_fn receives a dictionary per instance derived from json in the
batch prediction result. The keys might vary depending on the model.
It should return a tuple of metrics.
- validation_fn receives a dictionary of the averaged metrics that metric_fn
generated over all instances.
The key/value of the dictionary matches to what's given by
metric_fn_and_keys arg.
The dictionary contains an additional metric, 'count' to represent the
total number of instances received for evaluation.
The function would raise an exception to mark the task as failed, in a
case the validation result is not okay to proceed (i.e. to set the trained
version as default).
Typical examples are like this:
.. code-block:: python
def get_metric_fn_and_keys():
import math # imports should be outside of the metric_fn below.
def error_and_squared_error(inst):
label = float(inst["input_label"])
classes = float(inst["classes"]) # 0 or 1
err = abs(classes - label)
squared_err = math.pow(classes - label, 2)
return (err, squared_err) # returns a tuple.
return error_and_squared_error, ["err", "mse"] # key order must match.
def validate_err_and_count(summary):
if summary["err"] > 0.2:
raise ValueError("Too high err>0.2; summary=%s" % summary)
if summary["mse"] > 0.05:
raise ValueError("Too high mse>0.05; summary=%s" % summary)
if summary["count"] < 1000:
raise ValueError("Too few instances<1000; summary=%s" % summary)
return summary
For the details on the other BatchPrediction-related arguments (project_id,
job_id, region, data_format, input_paths, prediction_path, model_uri),
please refer to MLEngineBatchPredictionOperator too.
:param task_prefix: a prefix for the tasks. Only alphanumeric characters and
hyphen are allowed (no underscores), since this will be used as dataflow
job name, which doesn't allow other characters.
:type task_prefix: str
:param data_format: either of 'TEXT', 'TF_RECORD', 'TF_RECORD_GZIP'
:type data_format: str
:param input_paths: a list of input paths to be sent to BatchPrediction.
:type input_paths: list[str]
:param prediction_path: GCS path to put the prediction results in.
:type prediction_path: str
:param metric_fn_and_keys: a tuple of metric_fn and metric_keys:
- metric_fn is a function that accepts a dictionary (for an instance),
and returns a tuple of metric(s) that it calculates.
- metric_keys is a list of strings to denote the key of each metric.
:type metric_fn_and_keys: tuple of a function and a list[str]
:param validate_fn: a function to validate whether the averaged metric(s) is
good enough to push the model.
:type validate_fn: function
:param batch_prediction_job_id: the id to use for the Cloud ML Batch
prediction job. Passed directly to the MLEngineBatchPredictionOperator as
the job_id argument.
:type batch_prediction_job_id: str
:param project_id: the Google Cloud project id in which to execute
Cloud ML Batch Prediction and Dataflow jobs. If None, then the `dag`'s
`default_args['project_id']` will be used.
:type project_id: str
:param region: the Google Cloud region in which to execute Cloud ML
Batch Prediction and Dataflow jobs. If None, then the `dag`'s
`default_args['region']` will be used.
:type region: str
:param dataflow_options: options to run Dataflow jobs. If None, then the
`dag`'s `default_args['dataflow_default_options']` will be used.
:type dataflow_options: dictionary
:param model_uri: GCS path of the model exported by Tensorflow using
``tensorflow.estimator.export_savedmodel()``. It cannot be used with
model_name or version_name below. See MLEngineBatchPredictionOperator for
more detail.
:type model_uri: str
:param model_name: Used to indicate a model to use for prediction. Can be
used in combination with version_name, but cannot be used together with
model_uri. See MLEngineBatchPredictionOperator for more detail. If None,
then the `dag`'s `default_args['model_name']` will be used.
:type model_name: str
:param version_name: Used to indicate a model version to use for prediction,
in combination with model_name. Cannot be used together with model_uri.
See MLEngineBatchPredictionOperator for more detail. If None, then the
`dag`'s `default_args['version_name']` will be used.
:type version_name: str
:param dag: The `DAG` to use for all Operators.
:type dag: airflow.models.DAG
:param py_interpreter: Python version of the beam pipeline.
If None, this defaults to the python3.
To track python versions supported by beam and related
issues check: https://issues.apache.org/jira/browse/BEAM-1251
:type py_interpreter: str
:returns: a tuple of three operators, (prediction, summary, validation)
:rtype: tuple(DataFlowPythonOperator, DataFlowPythonOperator,
PythonOperator) | airflow/providers/google/cloud/utils/mlengine_operator_utils.py | create_evaluate_ops | jiantao01/airflow | 15,947 | python | def create_evaluate_ops(task_prefix: str, data_format: str, input_paths: List[str], prediction_path: str, metric_fn_and_keys: Tuple[(T, Iterable[str])], validate_fn: T, batch_prediction_job_id: Optional[str]=None, region: Optional[str]=None, project_id: Optional[str]=None, dataflow_options: Optional[Dict]=None, model_uri: Optional[str]=None, model_name: Optional[str]=None, version_name: Optional[str]=None, dag: Optional[DAG]=None, py_interpreter='python3'):
'\n Creates Operators needed for model evaluation and returns.\n\n It gets prediction over inputs via Cloud ML Engine BatchPrediction API by\n calling MLEngineBatchPredictionOperator, then summarize and validate\n the result via Cloud Dataflow using DataFlowPythonOperator.\n\n For details and pricing about Batch prediction, please refer to the website\n https://cloud.google.com/ml-engine/docs/how-tos/batch-predict\n and for Cloud Dataflow, https://cloud.google.com/dataflow/docs/\n\n It returns three chained operators for prediction, summary, and validation,\n named as ``<prefix>-prediction``, ``<prefix>-summary``, and ``<prefix>-validation``,\n respectively.\n (``<prefix>`` should contain only alphanumeric characters or hyphen.)\n\n The upstream and downstream can be set accordingly like:\n\n .. code-block:: python\n\n pred, _, val = create_evaluate_ops(...)\n pred.set_upstream(upstream_op)\n ...\n downstream_op.set_upstream(val)\n\n Callers will provide two python callables, metric_fn and validate_fn, in\n order to customize the evaluation behavior as they wish.\n\n - metric_fn receives a dictionary per instance derived from json in the\n batch prediction result. The keys might vary depending on the model.\n It should return a tuple of metrics.\n - validation_fn receives a dictionary of the averaged metrics that metric_fn\n generated over all instances.\n The key/value of the dictionary matches to what\'s given by\n metric_fn_and_keys arg.\n The dictionary contains an additional metric, \'count\' to represent the\n total number of instances received for evaluation.\n The function would raise an exception to mark the task as failed, in a\n case the validation result is not okay to proceed (i.e. to set the trained\n version as default).\n\n Typical examples are like this:\n\n .. code-block:: python\n\n def get_metric_fn_and_keys():\n import math # imports should be outside of the metric_fn below.\n\n def error_and_squared_error(inst):\n label = float(inst["input_label"])\n classes = float(inst["classes"]) # 0 or 1\n err = abs(classes - label)\n squared_err = math.pow(classes - label, 2)\n return (err, squared_err) # returns a tuple.\n\n return error_and_squared_error, ["err", "mse"] # key order must match.\n\n\n def validate_err_and_count(summary):\n if summary["err"] > 0.2:\n raise ValueError("Too high err>0.2; summary=%s" % summary)\n if summary["mse"] > 0.05:\n raise ValueError("Too high mse>0.05; summary=%s" % summary)\n if summary["count"] < 1000:\n raise ValueError("Too few instances<1000; summary=%s" % summary)\n return summary\n\n For the details on the other BatchPrediction-related arguments (project_id,\n job_id, region, data_format, input_paths, prediction_path, model_uri),\n please refer to MLEngineBatchPredictionOperator too.\n\n :param task_prefix: a prefix for the tasks. Only alphanumeric characters and\n hyphen are allowed (no underscores), since this will be used as dataflow\n job name, which doesn\'t allow other characters.\n :type task_prefix: str\n\n :param data_format: either of \'TEXT\', \'TF_RECORD\', \'TF_RECORD_GZIP\'\n :type data_format: str\n\n :param input_paths: a list of input paths to be sent to BatchPrediction.\n :type input_paths: list[str]\n\n :param prediction_path: GCS path to put the prediction results in.\n :type prediction_path: str\n\n :param metric_fn_and_keys: a tuple of metric_fn and metric_keys:\n\n - metric_fn is a function that accepts a dictionary (for an instance),\n and returns a tuple of metric(s) that it calculates.\n\n - metric_keys is a list of strings to denote the key of each metric.\n :type metric_fn_and_keys: tuple of a function and a list[str]\n\n :param validate_fn: a function to validate whether the averaged metric(s) is\n good enough to push the model.\n :type validate_fn: function\n\n :param batch_prediction_job_id: the id to use for the Cloud ML Batch\n prediction job. Passed directly to the MLEngineBatchPredictionOperator as\n the job_id argument.\n :type batch_prediction_job_id: str\n\n :param project_id: the Google Cloud project id in which to execute\n Cloud ML Batch Prediction and Dataflow jobs. If None, then the `dag`\'s\n `default_args[\'project_id\']` will be used.\n :type project_id: str\n\n :param region: the Google Cloud region in which to execute Cloud ML\n Batch Prediction and Dataflow jobs. If None, then the `dag`\'s\n `default_args[\'region\']` will be used.\n :type region: str\n\n :param dataflow_options: options to run Dataflow jobs. If None, then the\n `dag`\'s `default_args[\'dataflow_default_options\']` will be used.\n :type dataflow_options: dictionary\n\n :param model_uri: GCS path of the model exported by Tensorflow using\n ``tensorflow.estimator.export_savedmodel()``. It cannot be used with\n model_name or version_name below. See MLEngineBatchPredictionOperator for\n more detail.\n :type model_uri: str\n\n :param model_name: Used to indicate a model to use for prediction. Can be\n used in combination with version_name, but cannot be used together with\n model_uri. See MLEngineBatchPredictionOperator for more detail. If None,\n then the `dag`\'s `default_args[\'model_name\']` will be used.\n :type model_name: str\n\n :param version_name: Used to indicate a model version to use for prediction,\n in combination with model_name. Cannot be used together with model_uri.\n See MLEngineBatchPredictionOperator for more detail. If None, then the\n `dag`\'s `default_args[\'version_name\']` will be used.\n :type version_name: str\n\n :param dag: The `DAG` to use for all Operators.\n :type dag: airflow.models.DAG\n\n :param py_interpreter: Python version of the beam pipeline.\n If None, this defaults to the python3.\n To track python versions supported by beam and related\n issues check: https://issues.apache.org/jira/browse/BEAM-1251\n :type py_interpreter: str\n\n :returns: a tuple of three operators, (prediction, summary, validation)\n :rtype: tuple(DataFlowPythonOperator, DataFlowPythonOperator,\n PythonOperator)\n '
batch_prediction_job_id = (batch_prediction_job_id or )
dataflow_options = (dataflow_options or {})
region = (region or )
if (not re.match('^[a-zA-Z][-A-Za-z0-9]*$', task_prefix)):
raise AirflowException(('Malformed task_id for DataFlowPythonOperator (only alphanumeric and hyphens are allowed but got: ' + task_prefix))
(metric_fn, metric_keys) = metric_fn_and_keys
if (not callable(metric_fn)):
raise AirflowException('`metric_fn` param must be callable.')
if (not callable(validate_fn)):
raise AirflowException('`validate_fn` param must be callable.')
if ((dag is not None) and (dag.default_args is not None)):
default_args = dag.default_args
project_id = (project_id or default_args.get('project_id'))
region = (region or default_args['region'])
model_name = (model_name or default_args.get('model_name'))
version_name = (version_name or default_args.get('version_name'))
dataflow_options = (dataflow_options or default_args.get('dataflow_default_options'))
evaluate_prediction = MLEngineStartBatchPredictionJobOperator(task_id=(task_prefix + '-prediction'), project_id=project_id, job_id=batch_prediction_job_id, region=region, data_format=data_format, input_paths=input_paths, output_path=prediction_path, uri=model_uri, model_name=model_name, version_name=version_name, dag=dag)
metric_fn_encoded = base64.b64encode(dill.dumps(metric_fn, recurse=True)).decode()
evaluate_summary = BeamRunPythonPipelineOperator(task_id=(task_prefix + '-summary'), py_file=os.path.join(os.path.dirname(__file__), 'mlengine_prediction_summary.py'), default_pipeline_options=dataflow_options, pipeline_options={'prediction_path': prediction_path, 'metric_fn_encoded': metric_fn_encoded, 'metric_keys': ','.join(metric_keys)}, py_interpreter=py_interpreter, py_requirements=['apache-beam[gcp]>=2.14.0'], dag=dag)
evaluate_summary.set_upstream(evaluate_prediction)
def apply_validate_fn(*args, templates_dict, **kwargs):
prediction_path = templates_dict['prediction_path']
(scheme, bucket, obj, _, _) = urlsplit(prediction_path)
if ((scheme != 'gs') or (not bucket) or (not obj)):
raise ValueError(f'Wrong format prediction_path: {prediction_path}')
summary = os.path.join(obj.strip('/'), 'prediction.summary.json')
gcs_hook = GCSHook()
summary = json.loads(gcs_hook.download(bucket, summary))
return validate_fn(summary)
evaluate_validation = PythonOperator(task_id=(task_prefix + '-validation'), python_callable=apply_validate_fn, templates_dict={'prediction_path': prediction_path}, dag=dag)
evaluate_validation.set_upstream(evaluate_summary)
return (evaluate_prediction, evaluate_summary, evaluate_validation) | def create_evaluate_ops(task_prefix: str, data_format: str, input_paths: List[str], prediction_path: str, metric_fn_and_keys: Tuple[(T, Iterable[str])], validate_fn: T, batch_prediction_job_id: Optional[str]=None, region: Optional[str]=None, project_id: Optional[str]=None, dataflow_options: Optional[Dict]=None, model_uri: Optional[str]=None, model_name: Optional[str]=None, version_name: Optional[str]=None, dag: Optional[DAG]=None, py_interpreter='python3'):
'\n Creates Operators needed for model evaluation and returns.\n\n It gets prediction over inputs via Cloud ML Engine BatchPrediction API by\n calling MLEngineBatchPredictionOperator, then summarize and validate\n the result via Cloud Dataflow using DataFlowPythonOperator.\n\n For details and pricing about Batch prediction, please refer to the website\n https://cloud.google.com/ml-engine/docs/how-tos/batch-predict\n and for Cloud Dataflow, https://cloud.google.com/dataflow/docs/\n\n It returns three chained operators for prediction, summary, and validation,\n named as ``<prefix>-prediction``, ``<prefix>-summary``, and ``<prefix>-validation``,\n respectively.\n (``<prefix>`` should contain only alphanumeric characters or hyphen.)\n\n The upstream and downstream can be set accordingly like:\n\n .. code-block:: python\n\n pred, _, val = create_evaluate_ops(...)\n pred.set_upstream(upstream_op)\n ...\n downstream_op.set_upstream(val)\n\n Callers will provide two python callables, metric_fn and validate_fn, in\n order to customize the evaluation behavior as they wish.\n\n - metric_fn receives a dictionary per instance derived from json in the\n batch prediction result. The keys might vary depending on the model.\n It should return a tuple of metrics.\n - validation_fn receives a dictionary of the averaged metrics that metric_fn\n generated over all instances.\n The key/value of the dictionary matches to what\'s given by\n metric_fn_and_keys arg.\n The dictionary contains an additional metric, \'count\' to represent the\n total number of instances received for evaluation.\n The function would raise an exception to mark the task as failed, in a\n case the validation result is not okay to proceed (i.e. to set the trained\n version as default).\n\n Typical examples are like this:\n\n .. code-block:: python\n\n def get_metric_fn_and_keys():\n import math # imports should be outside of the metric_fn below.\n\n def error_and_squared_error(inst):\n label = float(inst["input_label"])\n classes = float(inst["classes"]) # 0 or 1\n err = abs(classes - label)\n squared_err = math.pow(classes - label, 2)\n return (err, squared_err) # returns a tuple.\n\n return error_and_squared_error, ["err", "mse"] # key order must match.\n\n\n def validate_err_and_count(summary):\n if summary["err"] > 0.2:\n raise ValueError("Too high err>0.2; summary=%s" % summary)\n if summary["mse"] > 0.05:\n raise ValueError("Too high mse>0.05; summary=%s" % summary)\n if summary["count"] < 1000:\n raise ValueError("Too few instances<1000; summary=%s" % summary)\n return summary\n\n For the details on the other BatchPrediction-related arguments (project_id,\n job_id, region, data_format, input_paths, prediction_path, model_uri),\n please refer to MLEngineBatchPredictionOperator too.\n\n :param task_prefix: a prefix for the tasks. Only alphanumeric characters and\n hyphen are allowed (no underscores), since this will be used as dataflow\n job name, which doesn\'t allow other characters.\n :type task_prefix: str\n\n :param data_format: either of \'TEXT\', \'TF_RECORD\', \'TF_RECORD_GZIP\'\n :type data_format: str\n\n :param input_paths: a list of input paths to be sent to BatchPrediction.\n :type input_paths: list[str]\n\n :param prediction_path: GCS path to put the prediction results in.\n :type prediction_path: str\n\n :param metric_fn_and_keys: a tuple of metric_fn and metric_keys:\n\n - metric_fn is a function that accepts a dictionary (for an instance),\n and returns a tuple of metric(s) that it calculates.\n\n - metric_keys is a list of strings to denote the key of each metric.\n :type metric_fn_and_keys: tuple of a function and a list[str]\n\n :param validate_fn: a function to validate whether the averaged metric(s) is\n good enough to push the model.\n :type validate_fn: function\n\n :param batch_prediction_job_id: the id to use for the Cloud ML Batch\n prediction job. Passed directly to the MLEngineBatchPredictionOperator as\n the job_id argument.\n :type batch_prediction_job_id: str\n\n :param project_id: the Google Cloud project id in which to execute\n Cloud ML Batch Prediction and Dataflow jobs. If None, then the `dag`\'s\n `default_args[\'project_id\']` will be used.\n :type project_id: str\n\n :param region: the Google Cloud region in which to execute Cloud ML\n Batch Prediction and Dataflow jobs. If None, then the `dag`\'s\n `default_args[\'region\']` will be used.\n :type region: str\n\n :param dataflow_options: options to run Dataflow jobs. If None, then the\n `dag`\'s `default_args[\'dataflow_default_options\']` will be used.\n :type dataflow_options: dictionary\n\n :param model_uri: GCS path of the model exported by Tensorflow using\n ``tensorflow.estimator.export_savedmodel()``. It cannot be used with\n model_name or version_name below. See MLEngineBatchPredictionOperator for\n more detail.\n :type model_uri: str\n\n :param model_name: Used to indicate a model to use for prediction. Can be\n used in combination with version_name, but cannot be used together with\n model_uri. See MLEngineBatchPredictionOperator for more detail. If None,\n then the `dag`\'s `default_args[\'model_name\']` will be used.\n :type model_name: str\n\n :param version_name: Used to indicate a model version to use for prediction,\n in combination with model_name. Cannot be used together with model_uri.\n See MLEngineBatchPredictionOperator for more detail. If None, then the\n `dag`\'s `default_args[\'version_name\']` will be used.\n :type version_name: str\n\n :param dag: The `DAG` to use for all Operators.\n :type dag: airflow.models.DAG\n\n :param py_interpreter: Python version of the beam pipeline.\n If None, this defaults to the python3.\n To track python versions supported by beam and related\n issues check: https://issues.apache.org/jira/browse/BEAM-1251\n :type py_interpreter: str\n\n :returns: a tuple of three operators, (prediction, summary, validation)\n :rtype: tuple(DataFlowPythonOperator, DataFlowPythonOperator,\n PythonOperator)\n '
batch_prediction_job_id = (batch_prediction_job_id or )
dataflow_options = (dataflow_options or {})
region = (region or )
if (not re.match('^[a-zA-Z][-A-Za-z0-9]*$', task_prefix)):
raise AirflowException(('Malformed task_id for DataFlowPythonOperator (only alphanumeric and hyphens are allowed but got: ' + task_prefix))
(metric_fn, metric_keys) = metric_fn_and_keys
if (not callable(metric_fn)):
raise AirflowException('`metric_fn` param must be callable.')
if (not callable(validate_fn)):
raise AirflowException('`validate_fn` param must be callable.')
if ((dag is not None) and (dag.default_args is not None)):
default_args = dag.default_args
project_id = (project_id or default_args.get('project_id'))
region = (region or default_args['region'])
model_name = (model_name or default_args.get('model_name'))
version_name = (version_name or default_args.get('version_name'))
dataflow_options = (dataflow_options or default_args.get('dataflow_default_options'))
evaluate_prediction = MLEngineStartBatchPredictionJobOperator(task_id=(task_prefix + '-prediction'), project_id=project_id, job_id=batch_prediction_job_id, region=region, data_format=data_format, input_paths=input_paths, output_path=prediction_path, uri=model_uri, model_name=model_name, version_name=version_name, dag=dag)
metric_fn_encoded = base64.b64encode(dill.dumps(metric_fn, recurse=True)).decode()
evaluate_summary = BeamRunPythonPipelineOperator(task_id=(task_prefix + '-summary'), py_file=os.path.join(os.path.dirname(__file__), 'mlengine_prediction_summary.py'), default_pipeline_options=dataflow_options, pipeline_options={'prediction_path': prediction_path, 'metric_fn_encoded': metric_fn_encoded, 'metric_keys': ','.join(metric_keys)}, py_interpreter=py_interpreter, py_requirements=['apache-beam[gcp]>=2.14.0'], dag=dag)
evaluate_summary.set_upstream(evaluate_prediction)
def apply_validate_fn(*args, templates_dict, **kwargs):
prediction_path = templates_dict['prediction_path']
(scheme, bucket, obj, _, _) = urlsplit(prediction_path)
if ((scheme != 'gs') or (not bucket) or (not obj)):
raise ValueError(f'Wrong format prediction_path: {prediction_path}')
summary = os.path.join(obj.strip('/'), 'prediction.summary.json')
gcs_hook = GCSHook()
summary = json.loads(gcs_hook.download(bucket, summary))
return validate_fn(summary)
evaluate_validation = PythonOperator(task_id=(task_prefix + '-validation'), python_callable=apply_validate_fn, templates_dict={'prediction_path': prediction_path}, dag=dag)
evaluate_validation.set_upstream(evaluate_summary)
return (evaluate_prediction, evaluate_summary, evaluate_validation)<|docstring|>Creates Operators needed for model evaluation and returns.
It gets prediction over inputs via Cloud ML Engine BatchPrediction API by
calling MLEngineBatchPredictionOperator, then summarize and validate
the result via Cloud Dataflow using DataFlowPythonOperator.
For details and pricing about Batch prediction, please refer to the website
https://cloud.google.com/ml-engine/docs/how-tos/batch-predict
and for Cloud Dataflow, https://cloud.google.com/dataflow/docs/
It returns three chained operators for prediction, summary, and validation,
named as ``<prefix>-prediction``, ``<prefix>-summary``, and ``<prefix>-validation``,
respectively.
(``<prefix>`` should contain only alphanumeric characters or hyphen.)
The upstream and downstream can be set accordingly like:
.. code-block:: python
pred, _, val = create_evaluate_ops(...)
pred.set_upstream(upstream_op)
...
downstream_op.set_upstream(val)
Callers will provide two python callables, metric_fn and validate_fn, in
order to customize the evaluation behavior as they wish.
- metric_fn receives a dictionary per instance derived from json in the
batch prediction result. The keys might vary depending on the model.
It should return a tuple of metrics.
- validation_fn receives a dictionary of the averaged metrics that metric_fn
generated over all instances.
The key/value of the dictionary matches to what's given by
metric_fn_and_keys arg.
The dictionary contains an additional metric, 'count' to represent the
total number of instances received for evaluation.
The function would raise an exception to mark the task as failed, in a
case the validation result is not okay to proceed (i.e. to set the trained
version as default).
Typical examples are like this:
.. code-block:: python
def get_metric_fn_and_keys():
import math # imports should be outside of the metric_fn below.
def error_and_squared_error(inst):
label = float(inst["input_label"])
classes = float(inst["classes"]) # 0 or 1
err = abs(classes - label)
squared_err = math.pow(classes - label, 2)
return (err, squared_err) # returns a tuple.
return error_and_squared_error, ["err", "mse"] # key order must match.
def validate_err_and_count(summary):
if summary["err"] > 0.2:
raise ValueError("Too high err>0.2; summary=%s" % summary)
if summary["mse"] > 0.05:
raise ValueError("Too high mse>0.05; summary=%s" % summary)
if summary["count"] < 1000:
raise ValueError("Too few instances<1000; summary=%s" % summary)
return summary
For the details on the other BatchPrediction-related arguments (project_id,
job_id, region, data_format, input_paths, prediction_path, model_uri),
please refer to MLEngineBatchPredictionOperator too.
:param task_prefix: a prefix for the tasks. Only alphanumeric characters and
hyphen are allowed (no underscores), since this will be used as dataflow
job name, which doesn't allow other characters.
:type task_prefix: str
:param data_format: either of 'TEXT', 'TF_RECORD', 'TF_RECORD_GZIP'
:type data_format: str
:param input_paths: a list of input paths to be sent to BatchPrediction.
:type input_paths: list[str]
:param prediction_path: GCS path to put the prediction results in.
:type prediction_path: str
:param metric_fn_and_keys: a tuple of metric_fn and metric_keys:
- metric_fn is a function that accepts a dictionary (for an instance),
and returns a tuple of metric(s) that it calculates.
- metric_keys is a list of strings to denote the key of each metric.
:type metric_fn_and_keys: tuple of a function and a list[str]
:param validate_fn: a function to validate whether the averaged metric(s) is
good enough to push the model.
:type validate_fn: function
:param batch_prediction_job_id: the id to use for the Cloud ML Batch
prediction job. Passed directly to the MLEngineBatchPredictionOperator as
the job_id argument.
:type batch_prediction_job_id: str
:param project_id: the Google Cloud project id in which to execute
Cloud ML Batch Prediction and Dataflow jobs. If None, then the `dag`'s
`default_args['project_id']` will be used.
:type project_id: str
:param region: the Google Cloud region in which to execute Cloud ML
Batch Prediction and Dataflow jobs. If None, then the `dag`'s
`default_args['region']` will be used.
:type region: str
:param dataflow_options: options to run Dataflow jobs. If None, then the
`dag`'s `default_args['dataflow_default_options']` will be used.
:type dataflow_options: dictionary
:param model_uri: GCS path of the model exported by Tensorflow using
``tensorflow.estimator.export_savedmodel()``. It cannot be used with
model_name or version_name below. See MLEngineBatchPredictionOperator for
more detail.
:type model_uri: str
:param model_name: Used to indicate a model to use for prediction. Can be
used in combination with version_name, but cannot be used together with
model_uri. See MLEngineBatchPredictionOperator for more detail. If None,
then the `dag`'s `default_args['model_name']` will be used.
:type model_name: str
:param version_name: Used to indicate a model version to use for prediction,
in combination with model_name. Cannot be used together with model_uri.
See MLEngineBatchPredictionOperator for more detail. If None, then the
`dag`'s `default_args['version_name']` will be used.
:type version_name: str
:param dag: The `DAG` to use for all Operators.
:type dag: airflow.models.DAG
:param py_interpreter: Python version of the beam pipeline.
If None, this defaults to the python3.
To track python versions supported by beam and related
issues check: https://issues.apache.org/jira/browse/BEAM-1251
:type py_interpreter: str
:returns: a tuple of three operators, (prediction, summary, validation)
:rtype: tuple(DataFlowPythonOperator, DataFlowPythonOperator,
PythonOperator)<|endoftext|> |
15bcfc2cc3821aea0e935d2e7e02de83dc20dbbe00680e988b03d026bf45e0b8 | def u_net(shape, nb_filters=64, conv_size=3, initialization='glorot_uniform', depth=4, inc_rate=2.0, activation='relu', dropout=0, output_channels=5, batchnorm=False, maxpool=True, upconv=True, pretrain=0, sigma_noise=0):
'U-Net model.\n\n Standard U-Net model, plus optional gaussian noise.\n Note that the dimensions of the input images should be\n multiples of 16.\n\n Arguments:\n shape: image shape, in the format (nb_channels, x_size, y_size).\n nb_filters : initial number of filters in the convolutional layer.\n depth : The depth of the U-net, i.e. the number of contracting steps before expansion begins\n inc_rate : the multiplier for number of filters per layer\n conv_size : size of convolution.\n initialization: initialization of the convolutional layers.\n activation: activation of the convolutional layers.\n sigma_noise: standard deviation of the gaussian noise layer. If equal to zero, this layer is deactivated.\n output_channels: number of output channels.\n drop: dropout rate\n\n Returns:\n U-Net model - it still needs to be compiled.\n\n Reference:\n U-Net: Convolutional Networks for Biomedical Image Segmentation\n Olaf Ronneberger, Philipp Fischer, Thomas Brox\n MICCAI 2015\n\n Credits:\n The starting point for the code of this function comes from:\n https://github.com/jocicmarko/ultrasound-nerve-segmentation\n by Marko Jocic\n '
i = Input(shape, name='input_layer')
o = level_block(i, nb_filters, conv_size, initialization, depth, inc_rate, activation, dropout, batchnorm, maxpool, upconv)
if (sigma_noise > 0):
o = GaussianNoise(sigma_noise, name='GaussianNoise_preout')(o)
o = Conv2D(output_channels, 1, activation='softmax', name='conv_out')(o)
if (pretrain > 0):
pretrained_model = keras.applications.vgg19.VGG19(include_top=False, weights='imagenet', input_tensor=None, input_shape=shape, pooling='max')
w = []
pretrain_layers = ['block{}_conv{}'.format(block, layer) for block in range(1, (pretrain + 1)) for layer in range(1, 3)]
for n in pretrain_layers:
w.append(pretrained_model.get_layer(name=n).get_weights())
del pretrained_model
new_model = Model(inputs=i, outputs=o)
for (i, n) in enumerate(pretrain_layers):
new_model.get_layer(name=n).set_weights(w[i])
return new_model
return Model(inputs=i, outputs=o) | U-Net model.
Standard U-Net model, plus optional gaussian noise.
Note that the dimensions of the input images should be
multiples of 16.
Arguments:
shape: image shape, in the format (nb_channels, x_size, y_size).
nb_filters : initial number of filters in the convolutional layer.
depth : The depth of the U-net, i.e. the number of contracting steps before expansion begins
inc_rate : the multiplier for number of filters per layer
conv_size : size of convolution.
initialization: initialization of the convolutional layers.
activation: activation of the convolutional layers.
sigma_noise: standard deviation of the gaussian noise layer. If equal to zero, this layer is deactivated.
output_channels: number of output channels.
drop: dropout rate
Returns:
U-Net model - it still needs to be compiled.
Reference:
U-Net: Convolutional Networks for Biomedical Image Segmentation
Olaf Ronneberger, Philipp Fischer, Thomas Brox
MICCAI 2015
Credits:
The starting point for the code of this function comes from:
https://github.com/jocicmarko/ultrasound-nerve-segmentation
by Marko Jocic | src/mmciad/utils/.ipynb_checkpoints/u_net-checkpoint.py | u_net | bjtho08/mmciad | 0 | python | def u_net(shape, nb_filters=64, conv_size=3, initialization='glorot_uniform', depth=4, inc_rate=2.0, activation='relu', dropout=0, output_channels=5, batchnorm=False, maxpool=True, upconv=True, pretrain=0, sigma_noise=0):
'U-Net model.\n\n Standard U-Net model, plus optional gaussian noise.\n Note that the dimensions of the input images should be\n multiples of 16.\n\n Arguments:\n shape: image shape, in the format (nb_channels, x_size, y_size).\n nb_filters : initial number of filters in the convolutional layer.\n depth : The depth of the U-net, i.e. the number of contracting steps before expansion begins\n inc_rate : the multiplier for number of filters per layer\n conv_size : size of convolution.\n initialization: initialization of the convolutional layers.\n activation: activation of the convolutional layers.\n sigma_noise: standard deviation of the gaussian noise layer. If equal to zero, this layer is deactivated.\n output_channels: number of output channels.\n drop: dropout rate\n\n Returns:\n U-Net model - it still needs to be compiled.\n\n Reference:\n U-Net: Convolutional Networks for Biomedical Image Segmentation\n Olaf Ronneberger, Philipp Fischer, Thomas Brox\n MICCAI 2015\n\n Credits:\n The starting point for the code of this function comes from:\n https://github.com/jocicmarko/ultrasound-nerve-segmentation\n by Marko Jocic\n '
i = Input(shape, name='input_layer')
o = level_block(i, nb_filters, conv_size, initialization, depth, inc_rate, activation, dropout, batchnorm, maxpool, upconv)
if (sigma_noise > 0):
o = GaussianNoise(sigma_noise, name='GaussianNoise_preout')(o)
o = Conv2D(output_channels, 1, activation='softmax', name='conv_out')(o)
if (pretrain > 0):
pretrained_model = keras.applications.vgg19.VGG19(include_top=False, weights='imagenet', input_tensor=None, input_shape=shape, pooling='max')
w = []
pretrain_layers = ['block{}_conv{}'.format(block, layer) for block in range(1, (pretrain + 1)) for layer in range(1, 3)]
for n in pretrain_layers:
w.append(pretrained_model.get_layer(name=n).get_weights())
del pretrained_model
new_model = Model(inputs=i, outputs=o)
for (i, n) in enumerate(pretrain_layers):
new_model.get_layer(name=n).set_weights(w[i])
return new_model
return Model(inputs=i, outputs=o) | def u_net(shape, nb_filters=64, conv_size=3, initialization='glorot_uniform', depth=4, inc_rate=2.0, activation='relu', dropout=0, output_channels=5, batchnorm=False, maxpool=True, upconv=True, pretrain=0, sigma_noise=0):
'U-Net model.\n\n Standard U-Net model, plus optional gaussian noise.\n Note that the dimensions of the input images should be\n multiples of 16.\n\n Arguments:\n shape: image shape, in the format (nb_channels, x_size, y_size).\n nb_filters : initial number of filters in the convolutional layer.\n depth : The depth of the U-net, i.e. the number of contracting steps before expansion begins\n inc_rate : the multiplier for number of filters per layer\n conv_size : size of convolution.\n initialization: initialization of the convolutional layers.\n activation: activation of the convolutional layers.\n sigma_noise: standard deviation of the gaussian noise layer. If equal to zero, this layer is deactivated.\n output_channels: number of output channels.\n drop: dropout rate\n\n Returns:\n U-Net model - it still needs to be compiled.\n\n Reference:\n U-Net: Convolutional Networks for Biomedical Image Segmentation\n Olaf Ronneberger, Philipp Fischer, Thomas Brox\n MICCAI 2015\n\n Credits:\n The starting point for the code of this function comes from:\n https://github.com/jocicmarko/ultrasound-nerve-segmentation\n by Marko Jocic\n '
i = Input(shape, name='input_layer')
o = level_block(i, nb_filters, conv_size, initialization, depth, inc_rate, activation, dropout, batchnorm, maxpool, upconv)
if (sigma_noise > 0):
o = GaussianNoise(sigma_noise, name='GaussianNoise_preout')(o)
o = Conv2D(output_channels, 1, activation='softmax', name='conv_out')(o)
if (pretrain > 0):
pretrained_model = keras.applications.vgg19.VGG19(include_top=False, weights='imagenet', input_tensor=None, input_shape=shape, pooling='max')
w = []
pretrain_layers = ['block{}_conv{}'.format(block, layer) for block in range(1, (pretrain + 1)) for layer in range(1, 3)]
for n in pretrain_layers:
w.append(pretrained_model.get_layer(name=n).get_weights())
del pretrained_model
new_model = Model(inputs=i, outputs=o)
for (i, n) in enumerate(pretrain_layers):
new_model.get_layer(name=n).set_weights(w[i])
return new_model
return Model(inputs=i, outputs=o)<|docstring|>U-Net model.
Standard U-Net model, plus optional gaussian noise.
Note that the dimensions of the input images should be
multiples of 16.
Arguments:
shape: image shape, in the format (nb_channels, x_size, y_size).
nb_filters : initial number of filters in the convolutional layer.
depth : The depth of the U-net, i.e. the number of contracting steps before expansion begins
inc_rate : the multiplier for number of filters per layer
conv_size : size of convolution.
initialization: initialization of the convolutional layers.
activation: activation of the convolutional layers.
sigma_noise: standard deviation of the gaussian noise layer. If equal to zero, this layer is deactivated.
output_channels: number of output channels.
drop: dropout rate
Returns:
U-Net model - it still needs to be compiled.
Reference:
U-Net: Convolutional Networks for Biomedical Image Segmentation
Olaf Ronneberger, Philipp Fischer, Thomas Brox
MICCAI 2015
Credits:
The starting point for the code of this function comes from:
https://github.com/jocicmarko/ultrasound-nerve-segmentation
by Marko Jocic<|endoftext|> |
6692f569c3dd07567059d4a1ab16f4cfad49898169ba646861c1620dbe38569c | def test_extra_tokens():
'Extra tokens should persist between multiple calls of the same renderer,\n but be reset if initiating a new renderer.\n '
output_nomath = {'type': 'Document', 'front_matter': None, 'link_definitions': {}, 'footnotes': {}, 'footref_order': [], 'children': [{'type': 'Paragraph', 'children': [{'type': 'RawText', 'content': '$b$', 'position': None}], 'position': {'line_start': 1, 'line_end': 1, 'uri': None, 'data': {}}}]}
output_math = {'type': 'Document', 'front_matter': None, 'link_definitions': {}, 'footnotes': {}, 'footref_order': [], 'children': [{'type': 'Paragraph', 'children': [{'type': 'Math', 'content': '$b$'}], 'position': {'line_start': 1, 'line_end': 1, 'uri': None, 'data': {}}}]}
with JsonRenderer() as render:
output = render.render(Document.read(['$b$']), as_string=False)
print(output)
assert (output == output_nomath)
renderer = JsonRenderer(parse_context=ParseContext(find_spans=LaTeXRenderer.default_span_tokens))
with renderer as render:
output = render.render(Document.read(['$b$']), as_string=False)
assert (output == output_math)
with renderer as render:
output = render.render(Document.read(['$b$']), as_string=False)
assert (output == output_math)
with JsonRenderer() as render:
output = render.render(Document.read(['$b$']), as_string=False)
assert (output == output_nomath) | Extra tokens should persist between multiple calls of the same renderer,
but be reset if initiating a new renderer. | test/test_renderers/test_json_renderer.py | test_extra_tokens | executablebooks/mistletoe-ebp | 2 | python | def test_extra_tokens():
'Extra tokens should persist between multiple calls of the same renderer,\n but be reset if initiating a new renderer.\n '
output_nomath = {'type': 'Document', 'front_matter': None, 'link_definitions': {}, 'footnotes': {}, 'footref_order': [], 'children': [{'type': 'Paragraph', 'children': [{'type': 'RawText', 'content': '$b$', 'position': None}], 'position': {'line_start': 1, 'line_end': 1, 'uri': None, 'data': {}}}]}
output_math = {'type': 'Document', 'front_matter': None, 'link_definitions': {}, 'footnotes': {}, 'footref_order': [], 'children': [{'type': 'Paragraph', 'children': [{'type': 'Math', 'content': '$b$'}], 'position': {'line_start': 1, 'line_end': 1, 'uri': None, 'data': {}}}]}
with JsonRenderer() as render:
output = render.render(Document.read(['$b$']), as_string=False)
print(output)
assert (output == output_nomath)
renderer = JsonRenderer(parse_context=ParseContext(find_spans=LaTeXRenderer.default_span_tokens))
with renderer as render:
output = render.render(Document.read(['$b$']), as_string=False)
assert (output == output_math)
with renderer as render:
output = render.render(Document.read(['$b$']), as_string=False)
assert (output == output_math)
with JsonRenderer() as render:
output = render.render(Document.read(['$b$']), as_string=False)
assert (output == output_nomath) | def test_extra_tokens():
'Extra tokens should persist between multiple calls of the same renderer,\n but be reset if initiating a new renderer.\n '
output_nomath = {'type': 'Document', 'front_matter': None, 'link_definitions': {}, 'footnotes': {}, 'footref_order': [], 'children': [{'type': 'Paragraph', 'children': [{'type': 'RawText', 'content': '$b$', 'position': None}], 'position': {'line_start': 1, 'line_end': 1, 'uri': None, 'data': {}}}]}
output_math = {'type': 'Document', 'front_matter': None, 'link_definitions': {}, 'footnotes': {}, 'footref_order': [], 'children': [{'type': 'Paragraph', 'children': [{'type': 'Math', 'content': '$b$'}], 'position': {'line_start': 1, 'line_end': 1, 'uri': None, 'data': {}}}]}
with JsonRenderer() as render:
output = render.render(Document.read(['$b$']), as_string=False)
print(output)
assert (output == output_nomath)
renderer = JsonRenderer(parse_context=ParseContext(find_spans=LaTeXRenderer.default_span_tokens))
with renderer as render:
output = render.render(Document.read(['$b$']), as_string=False)
assert (output == output_math)
with renderer as render:
output = render.render(Document.read(['$b$']), as_string=False)
assert (output == output_math)
with JsonRenderer() as render:
output = render.render(Document.read(['$b$']), as_string=False)
assert (output == output_nomath)<|docstring|>Extra tokens should persist between multiple calls of the same renderer,
but be reset if initiating a new renderer.<|endoftext|> |
0a37bde166b5c31d2bd497149b373fb702fb03fb0b8c88c33aaafb15b6ff39e9 | def __init__(self):
'\n Normalizer constructor. Initializes constants that will be used for\n data transformation.\n '
self.train_min = 0
self.train_max = 0
self.centering_shift_constant = 0
self.zero_shift_constant = (10 ** (- 6)) | Normalizer constructor. Initializes constants that will be used for
data transformation. | emulator/normalization.py | __init__ | hutchresearch/deep_climate_emulator | 7 | python | def __init__(self):
'\n Normalizer constructor. Initializes constants that will be used for\n data transformation.\n '
self.train_min = 0
self.train_max = 0
self.centering_shift_constant = 0
self.zero_shift_constant = (10 ** (- 6)) | def __init__(self):
'\n Normalizer constructor. Initializes constants that will be used for\n data transformation.\n '
self.train_min = 0
self.train_max = 0
self.centering_shift_constant = 0
self.zero_shift_constant = (10 ** (- 6))<|docstring|>Normalizer constructor. Initializes constants that will be used for
data transformation.<|endoftext|> |
c38ea8adce9e5ba9e12696e8c3f142c353dcb3fb79f59f659fec76b6469fd60f | def transform(self, data, train_len, copy=True):
'\n Applies log transformation and scales values b/t -1 and 1.\n\n Args:\n data (ndarray): Collection of data points\n train_len (int): Length of the training set\n copy (bool): If true, creates a copy of th data array\n\n Returns:\n (ndarray): Array of normalized data points\n '
if copy:
data = deepcopy(data)
data += self.zero_shift_constant
data = np.log2(data)
self.train_min = data[:train_len].min()
data -= self.train_min
self.train_max = data[:train_len].max()
data /= self.train_max
data *= 2
self.centering_shift_constant = ((data.max() - data.min()) / 2)
data -= self.centering_shift_constant
return data | Applies log transformation and scales values b/t -1 and 1.
Args:
data (ndarray): Collection of data points
train_len (int): Length of the training set
copy (bool): If true, creates a copy of th data array
Returns:
(ndarray): Array of normalized data points | emulator/normalization.py | transform | hutchresearch/deep_climate_emulator | 7 | python | def transform(self, data, train_len, copy=True):
'\n Applies log transformation and scales values b/t -1 and 1.\n\n Args:\n data (ndarray): Collection of data points\n train_len (int): Length of the training set\n copy (bool): If true, creates a copy of th data array\n\n Returns:\n (ndarray): Array of normalized data points\n '
if copy:
data = deepcopy(data)
data += self.zero_shift_constant
data = np.log2(data)
self.train_min = data[:train_len].min()
data -= self.train_min
self.train_max = data[:train_len].max()
data /= self.train_max
data *= 2
self.centering_shift_constant = ((data.max() - data.min()) / 2)
data -= self.centering_shift_constant
return data | def transform(self, data, train_len, copy=True):
'\n Applies log transformation and scales values b/t -1 and 1.\n\n Args:\n data (ndarray): Collection of data points\n train_len (int): Length of the training set\n copy (bool): If true, creates a copy of th data array\n\n Returns:\n (ndarray): Array of normalized data points\n '
if copy:
data = deepcopy(data)
data += self.zero_shift_constant
data = np.log2(data)
self.train_min = data[:train_len].min()
data -= self.train_min
self.train_max = data[:train_len].max()
data /= self.train_max
data *= 2
self.centering_shift_constant = ((data.max() - data.min()) / 2)
data -= self.centering_shift_constant
return data<|docstring|>Applies log transformation and scales values b/t -1 and 1.
Args:
data (ndarray): Collection of data points
train_len (int): Length of the training set
copy (bool): If true, creates a copy of th data array
Returns:
(ndarray): Array of normalized data points<|endoftext|> |
c267337f56db49551d1c654b320b83eff71e8577a44f5d39bf4575ef77716ca0 | def inverse_transform(self, data):
'\n Applies the inverse transformation.\n\n Args:\n data (ndarray): Collection of data points\n\n Returns:\n (ndarray): Array of denormalized data points\n '
data += self.centering_shift_constant
data /= 2
data *= self.train_max
data += self.train_min
data = np.power(2, data)
data -= self.zero_shift_constant
return data | Applies the inverse transformation.
Args:
data (ndarray): Collection of data points
Returns:
(ndarray): Array of denormalized data points | emulator/normalization.py | inverse_transform | hutchresearch/deep_climate_emulator | 7 | python | def inverse_transform(self, data):
'\n Applies the inverse transformation.\n\n Args:\n data (ndarray): Collection of data points\n\n Returns:\n (ndarray): Array of denormalized data points\n '
data += self.centering_shift_constant
data /= 2
data *= self.train_max
data += self.train_min
data = np.power(2, data)
data -= self.zero_shift_constant
return data | def inverse_transform(self, data):
'\n Applies the inverse transformation.\n\n Args:\n data (ndarray): Collection of data points\n\n Returns:\n (ndarray): Array of denormalized data points\n '
data += self.centering_shift_constant
data /= 2
data *= self.train_max
data += self.train_min
data = np.power(2, data)
data -= self.zero_shift_constant
return data<|docstring|>Applies the inverse transformation.
Args:
data (ndarray): Collection of data points
Returns:
(ndarray): Array of denormalized data points<|endoftext|> |
0052d0133402a3ea96564147cfcd63164f192e47880cb1379cfdde03f1f36491 | @build_hypothesis.command('glycopeptide-fa', short_help='Build glycopeptide search spaces with a FASTA file of proteins')
@click.pass_context
@glycopeptide_hypothesis_common_options
@click.argument('fasta-file', type=click.Path(exists=True), doc_help='A file containing protein sequences in FASTA format')
@database_connection
@click.option('-e', '--enzyme', default=['trypsin'], multiple=True, help=('The proteolytic enzyme to use during digestion. May be specified multiple times, generating a co-digestion. May specify an enzyme name or a regular expression describing the cleavage pattern. Recognized enzyme names are: ' + ', '.join(sorted(enzyme_rules))))
@click.option('-m', '--missed-cleavages', type=int, default=1, help='The number of missed proteolytic cleavage sites permitted')
@click.option('-c', '--constant-modification', multiple=True, help='Peptide modification rule which will be applied constantly')
@click.option('-v', '--variable-modification', multiple=True, help='Peptide modification rule which will be applied variablely')
@click.option('-V', '--max-variable-modifications', type=int, default=4, required=False, help='The maximum number of variable modifications that can be applied to a single peptide')
@click.option('-y', '--semispecific-digest', is_flag=True, help='Apply a semispecific enzyme digest permitting one peptide terminal to be non-specific')
@click.option('-R', '--reverse', default=False, is_flag=True, help='Reverse protein sequences')
@click.option('--dry-run', default=False, is_flag=True, help='Do not save glycopeptides', cls=HiddenOption)
@click.option('-F', '--not-full-crossproduct', is_flag=True, help='Do not produce full crossproduct. For when the search space is too large to enumerate, store, and load.')
@click.option('--retain-all-peptides', is_flag=True, default=False, help='Do not require a glycosylation site when saving base peptides')
def glycopeptide_fa(context, fasta_file, database_connection, enzyme, missed_cleavages, occupied_glycosites, name, constant_modification, variable_modification, processes, glycan_source, glycan_source_type, glycan_source_identifier=None, semispecific_digest=False, reverse=False, dry_run=False, peptide_length_range=(5, 60), not_full_crossproduct=False, max_variable_modifications=4, retain_all_peptides=False):
'Constructs a glycopeptide hypothesis from a FASTA file of proteins and a\n collection of glycans.\n '
if reverse:
task_type = ReversingMultipleProcessFastaGlycopeptideHypothesisSerializer
click.secho('Using ReversingMultipleProcessFastaGlycopeptideHypothesisSerializer', fg='yellow')
elif dry_run:
task_type = NonSavingMultipleProcessFastaGlycopeptideHypothesisSerializer
click.secho('Using NonSavingMultipleProcessFastaGlycopeptideHypothesisSerializer', fg='yellow')
else:
task_type = MultipleProcessFastaGlycopeptideHypothesisSerializer
validate_modifications(context, (constant_modification + variable_modification))
validate_glycan_source(context, database_connection, glycan_source, glycan_source_type, glycan_source_identifier)
processes = min(multiprocessing.cpu_count(), processes)
if (name is not None):
name = validate_glycopeptide_hypothesis_name(context, database_connection, name)
click.secho(('Building Glycopeptide Hypothesis %s' % name), fg='cyan')
mt = RestrictedModificationTable(None, constant_modification, variable_modification)
constant_modification = [mt[c] for c in constant_modification]
variable_modification = [mt[c] for c in variable_modification]
glycan_hypothesis_id = _glycan_hypothesis_builders[glycan_source_type](database_connection, glycan_source, name, glycan_source_identifier)
builder = task_type(fasta_file, database_connection, glycan_hypothesis_id=glycan_hypothesis_id, protease=enzyme, constant_modifications=constant_modification, variable_modifications=variable_modification, max_missed_cleavages=missed_cleavages, max_glycosylation_events=occupied_glycosites, hypothesis_name=name, semispecific=semispecific_digest, n_processes=processes, full_cross_product=(not not_full_crossproduct), max_variable_modifications=max_variable_modifications, peptide_length_range=peptide_length_range, require_glycosylation_sites=(not retain_all_peptides))
builder.display_header()
builder.start()
return builder.hypothesis_id | Constructs a glycopeptide hypothesis from a FASTA file of proteins and a
collection of glycans. | glycan_profiling/cli/build_db.py | glycopeptide_fa | mobiusklein/glycresoft | 4 | python | @build_hypothesis.command('glycopeptide-fa', short_help='Build glycopeptide search spaces with a FASTA file of proteins')
@click.pass_context
@glycopeptide_hypothesis_common_options
@click.argument('fasta-file', type=click.Path(exists=True), doc_help='A file containing protein sequences in FASTA format')
@database_connection
@click.option('-e', '--enzyme', default=['trypsin'], multiple=True, help=('The proteolytic enzyme to use during digestion. May be specified multiple times, generating a co-digestion. May specify an enzyme name or a regular expression describing the cleavage pattern. Recognized enzyme names are: ' + ', '.join(sorted(enzyme_rules))))
@click.option('-m', '--missed-cleavages', type=int, default=1, help='The number of missed proteolytic cleavage sites permitted')
@click.option('-c', '--constant-modification', multiple=True, help='Peptide modification rule which will be applied constantly')
@click.option('-v', '--variable-modification', multiple=True, help='Peptide modification rule which will be applied variablely')
@click.option('-V', '--max-variable-modifications', type=int, default=4, required=False, help='The maximum number of variable modifications that can be applied to a single peptide')
@click.option('-y', '--semispecific-digest', is_flag=True, help='Apply a semispecific enzyme digest permitting one peptide terminal to be non-specific')
@click.option('-R', '--reverse', default=False, is_flag=True, help='Reverse protein sequences')
@click.option('--dry-run', default=False, is_flag=True, help='Do not save glycopeptides', cls=HiddenOption)
@click.option('-F', '--not-full-crossproduct', is_flag=True, help='Do not produce full crossproduct. For when the search space is too large to enumerate, store, and load.')
@click.option('--retain-all-peptides', is_flag=True, default=False, help='Do not require a glycosylation site when saving base peptides')
def glycopeptide_fa(context, fasta_file, database_connection, enzyme, missed_cleavages, occupied_glycosites, name, constant_modification, variable_modification, processes, glycan_source, glycan_source_type, glycan_source_identifier=None, semispecific_digest=False, reverse=False, dry_run=False, peptide_length_range=(5, 60), not_full_crossproduct=False, max_variable_modifications=4, retain_all_peptides=False):
'Constructs a glycopeptide hypothesis from a FASTA file of proteins and a\n collection of glycans.\n '
if reverse:
task_type = ReversingMultipleProcessFastaGlycopeptideHypothesisSerializer
click.secho('Using ReversingMultipleProcessFastaGlycopeptideHypothesisSerializer', fg='yellow')
elif dry_run:
task_type = NonSavingMultipleProcessFastaGlycopeptideHypothesisSerializer
click.secho('Using NonSavingMultipleProcessFastaGlycopeptideHypothesisSerializer', fg='yellow')
else:
task_type = MultipleProcessFastaGlycopeptideHypothesisSerializer
validate_modifications(context, (constant_modification + variable_modification))
validate_glycan_source(context, database_connection, glycan_source, glycan_source_type, glycan_source_identifier)
processes = min(multiprocessing.cpu_count(), processes)
if (name is not None):
name = validate_glycopeptide_hypothesis_name(context, database_connection, name)
click.secho(('Building Glycopeptide Hypothesis %s' % name), fg='cyan')
mt = RestrictedModificationTable(None, constant_modification, variable_modification)
constant_modification = [mt[c] for c in constant_modification]
variable_modification = [mt[c] for c in variable_modification]
glycan_hypothesis_id = _glycan_hypothesis_builders[glycan_source_type](database_connection, glycan_source, name, glycan_source_identifier)
builder = task_type(fasta_file, database_connection, glycan_hypothesis_id=glycan_hypothesis_id, protease=enzyme, constant_modifications=constant_modification, variable_modifications=variable_modification, max_missed_cleavages=missed_cleavages, max_glycosylation_events=occupied_glycosites, hypothesis_name=name, semispecific=semispecific_digest, n_processes=processes, full_cross_product=(not not_full_crossproduct), max_variable_modifications=max_variable_modifications, peptide_length_range=peptide_length_range, require_glycosylation_sites=(not retain_all_peptides))
builder.display_header()
builder.start()
return builder.hypothesis_id | @build_hypothesis.command('glycopeptide-fa', short_help='Build glycopeptide search spaces with a FASTA file of proteins')
@click.pass_context
@glycopeptide_hypothesis_common_options
@click.argument('fasta-file', type=click.Path(exists=True), doc_help='A file containing protein sequences in FASTA format')
@database_connection
@click.option('-e', '--enzyme', default=['trypsin'], multiple=True, help=('The proteolytic enzyme to use during digestion. May be specified multiple times, generating a co-digestion. May specify an enzyme name or a regular expression describing the cleavage pattern. Recognized enzyme names are: ' + ', '.join(sorted(enzyme_rules))))
@click.option('-m', '--missed-cleavages', type=int, default=1, help='The number of missed proteolytic cleavage sites permitted')
@click.option('-c', '--constant-modification', multiple=True, help='Peptide modification rule which will be applied constantly')
@click.option('-v', '--variable-modification', multiple=True, help='Peptide modification rule which will be applied variablely')
@click.option('-V', '--max-variable-modifications', type=int, default=4, required=False, help='The maximum number of variable modifications that can be applied to a single peptide')
@click.option('-y', '--semispecific-digest', is_flag=True, help='Apply a semispecific enzyme digest permitting one peptide terminal to be non-specific')
@click.option('-R', '--reverse', default=False, is_flag=True, help='Reverse protein sequences')
@click.option('--dry-run', default=False, is_flag=True, help='Do not save glycopeptides', cls=HiddenOption)
@click.option('-F', '--not-full-crossproduct', is_flag=True, help='Do not produce full crossproduct. For when the search space is too large to enumerate, store, and load.')
@click.option('--retain-all-peptides', is_flag=True, default=False, help='Do not require a glycosylation site when saving base peptides')
def glycopeptide_fa(context, fasta_file, database_connection, enzyme, missed_cleavages, occupied_glycosites, name, constant_modification, variable_modification, processes, glycan_source, glycan_source_type, glycan_source_identifier=None, semispecific_digest=False, reverse=False, dry_run=False, peptide_length_range=(5, 60), not_full_crossproduct=False, max_variable_modifications=4, retain_all_peptides=False):
'Constructs a glycopeptide hypothesis from a FASTA file of proteins and a\n collection of glycans.\n '
if reverse:
task_type = ReversingMultipleProcessFastaGlycopeptideHypothesisSerializer
click.secho('Using ReversingMultipleProcessFastaGlycopeptideHypothesisSerializer', fg='yellow')
elif dry_run:
task_type = NonSavingMultipleProcessFastaGlycopeptideHypothesisSerializer
click.secho('Using NonSavingMultipleProcessFastaGlycopeptideHypothesisSerializer', fg='yellow')
else:
task_type = MultipleProcessFastaGlycopeptideHypothesisSerializer
validate_modifications(context, (constant_modification + variable_modification))
validate_glycan_source(context, database_connection, glycan_source, glycan_source_type, glycan_source_identifier)
processes = min(multiprocessing.cpu_count(), processes)
if (name is not None):
name = validate_glycopeptide_hypothesis_name(context, database_connection, name)
click.secho(('Building Glycopeptide Hypothesis %s' % name), fg='cyan')
mt = RestrictedModificationTable(None, constant_modification, variable_modification)
constant_modification = [mt[c] for c in constant_modification]
variable_modification = [mt[c] for c in variable_modification]
glycan_hypothesis_id = _glycan_hypothesis_builders[glycan_source_type](database_connection, glycan_source, name, glycan_source_identifier)
builder = task_type(fasta_file, database_connection, glycan_hypothesis_id=glycan_hypothesis_id, protease=enzyme, constant_modifications=constant_modification, variable_modifications=variable_modification, max_missed_cleavages=missed_cleavages, max_glycosylation_events=occupied_glycosites, hypothesis_name=name, semispecific=semispecific_digest, n_processes=processes, full_cross_product=(not not_full_crossproduct), max_variable_modifications=max_variable_modifications, peptide_length_range=peptide_length_range, require_glycosylation_sites=(not retain_all_peptides))
builder.display_header()
builder.start()
return builder.hypothesis_id<|docstring|>Constructs a glycopeptide hypothesis from a FASTA file of proteins and a
collection of glycans.<|endoftext|> |
2b032f1394a2cc9b437b1028149203064c499d99f6f3beb5091ae68c01b0294d | @build_hypothesis.command('glycopeptide-mzid', short_help='Build a glycopeptide search space with an mzIdentML file')
@click.pass_context
@click.argument('mzid-file', type=click.Path(exists=True))
@database_connection
@glycopeptide_hypothesis_common_options
@click.option('-t', '--target-protein', multiple=True, help='Specifies the name of a protein to include in the hypothesis. May be used many times.')
@click.option('-r', '--target-protein-re', multiple=True, help='Specifies a regular expression to select proteins to be included by name. May be used many times.')
@click.option('-R', '--reference-fasta', default=None, required=False, help='When the full sequence for each protein is not embedded in the mzIdentML file and the FASTA file used is not local.')
def glycopeptide_mzid(context, mzid_file, database_connection, name, occupied_glycosites, target_protein, target_protein_re, processes, glycan_source, glycan_source_type, glycan_source_identifier, reference_fasta, peptide_length_range=(5, 60)):
'Constructs a glycopeptide hypothesis from a MzIdentML file of proteins and a\n collection of glycans.\n '
proteins = validate_mzid_proteins(context, mzid_file, target_protein, target_protein_re)
validate_glycan_source(context, database_connection, glycan_source, glycan_source_type, glycan_source_identifier)
processes = min(multiprocessing.cpu_count(), processes)
if (name is not None):
name = validate_glycopeptide_hypothesis_name(context, database_connection, name)
click.secho(('Building Glycopeptide Hypothesis %s' % name), fg='cyan')
glycan_hypothesis_id = _glycan_hypothesis_builders[glycan_source_type](database_connection, glycan_source, name, glycan_source_identifier)
builder = MultipleProcessMzIdentMLGlycopeptideHypothesisSerializer(mzid_file, database_connection, glycan_hypothesis_id=glycan_hypothesis_id, hypothesis_name=name, target_proteins=proteins, max_glycosylation_events=occupied_glycosites, reference_fasta=reference_fasta, n_processes=processes, peptide_length_range=peptide_length_range)
builder.display_header()
builder.start()
return builder.hypothesis_id | Constructs a glycopeptide hypothesis from a MzIdentML file of proteins and a
collection of glycans. | glycan_profiling/cli/build_db.py | glycopeptide_mzid | mobiusklein/glycresoft | 4 | python | @build_hypothesis.command('glycopeptide-mzid', short_help='Build a glycopeptide search space with an mzIdentML file')
@click.pass_context
@click.argument('mzid-file', type=click.Path(exists=True))
@database_connection
@glycopeptide_hypothesis_common_options
@click.option('-t', '--target-protein', multiple=True, help='Specifies the name of a protein to include in the hypothesis. May be used many times.')
@click.option('-r', '--target-protein-re', multiple=True, help='Specifies a regular expression to select proteins to be included by name. May be used many times.')
@click.option('-R', '--reference-fasta', default=None, required=False, help='When the full sequence for each protein is not embedded in the mzIdentML file and the FASTA file used is not local.')
def glycopeptide_mzid(context, mzid_file, database_connection, name, occupied_glycosites, target_protein, target_protein_re, processes, glycan_source, glycan_source_type, glycan_source_identifier, reference_fasta, peptide_length_range=(5, 60)):
'Constructs a glycopeptide hypothesis from a MzIdentML file of proteins and a\n collection of glycans.\n '
proteins = validate_mzid_proteins(context, mzid_file, target_protein, target_protein_re)
validate_glycan_source(context, database_connection, glycan_source, glycan_source_type, glycan_source_identifier)
processes = min(multiprocessing.cpu_count(), processes)
if (name is not None):
name = validate_glycopeptide_hypothesis_name(context, database_connection, name)
click.secho(('Building Glycopeptide Hypothesis %s' % name), fg='cyan')
glycan_hypothesis_id = _glycan_hypothesis_builders[glycan_source_type](database_connection, glycan_source, name, glycan_source_identifier)
builder = MultipleProcessMzIdentMLGlycopeptideHypothesisSerializer(mzid_file, database_connection, glycan_hypothesis_id=glycan_hypothesis_id, hypothesis_name=name, target_proteins=proteins, max_glycosylation_events=occupied_glycosites, reference_fasta=reference_fasta, n_processes=processes, peptide_length_range=peptide_length_range)
builder.display_header()
builder.start()
return builder.hypothesis_id | @build_hypothesis.command('glycopeptide-mzid', short_help='Build a glycopeptide search space with an mzIdentML file')
@click.pass_context
@click.argument('mzid-file', type=click.Path(exists=True))
@database_connection
@glycopeptide_hypothesis_common_options
@click.option('-t', '--target-protein', multiple=True, help='Specifies the name of a protein to include in the hypothesis. May be used many times.')
@click.option('-r', '--target-protein-re', multiple=True, help='Specifies a regular expression to select proteins to be included by name. May be used many times.')
@click.option('-R', '--reference-fasta', default=None, required=False, help='When the full sequence for each protein is not embedded in the mzIdentML file and the FASTA file used is not local.')
def glycopeptide_mzid(context, mzid_file, database_connection, name, occupied_glycosites, target_protein, target_protein_re, processes, glycan_source, glycan_source_type, glycan_source_identifier, reference_fasta, peptide_length_range=(5, 60)):
'Constructs a glycopeptide hypothesis from a MzIdentML file of proteins and a\n collection of glycans.\n '
proteins = validate_mzid_proteins(context, mzid_file, target_protein, target_protein_re)
validate_glycan_source(context, database_connection, glycan_source, glycan_source_type, glycan_source_identifier)
processes = min(multiprocessing.cpu_count(), processes)
if (name is not None):
name = validate_glycopeptide_hypothesis_name(context, database_connection, name)
click.secho(('Building Glycopeptide Hypothesis %s' % name), fg='cyan')
glycan_hypothesis_id = _glycan_hypothesis_builders[glycan_source_type](database_connection, glycan_source, name, glycan_source_identifier)
builder = MultipleProcessMzIdentMLGlycopeptideHypothesisSerializer(mzid_file, database_connection, glycan_hypothesis_id=glycan_hypothesis_id, hypothesis_name=name, target_proteins=proteins, max_glycosylation_events=occupied_glycosites, reference_fasta=reference_fasta, n_processes=processes, peptide_length_range=peptide_length_range)
builder.display_header()
builder.start()
return builder.hypothesis_id<|docstring|>Constructs a glycopeptide hypothesis from a MzIdentML file of proteins and a
collection of glycans.<|endoftext|> |
b179783fc55d718ed19c7eb5291e200dd1c6d96441f237dc4b1a5c71a0b5bf3e | def __init__(self, module: str, count: Union[(int, str)]='25000', verbose: bool=True, lazy: bool=True, python: bool=True, jupyter: bool=True) -> None:
'Create a Module instance that can be used to find\n which sections of a Python module are most frequently used.\n\n This class exposes the following methods::\n\n usage()\n nested_usage()\n repositories()\n plot()\n n_uses()\n n_files()\n n_repositories()\n\n ..\n TODO: Alert users of `alert`, output `limitHit`\n TODO: Something with percentages?\n TODO: Info on just one object, e.g.\n >>> module.use("nltk.tokenize")\n "802 occurrences out of 83530 (0.96%)"\n TODO: Biggest repositories relying on some subsection.\n Perhaps an extension to `repositories()`?\n Add this to n_uses, n_files and n_repositories, too\n\n :param module: The name of a Python module of which to find\n the frequently used objects, e.g. `"nltk"`.\n :type module: str\n :param count: The maximum number of times an import of `module`\n should be fetched. Roughly equivalent to the number of fetched\n files. Either an integer, a string representing an integer,\n or "all", defaults to "25000".\n :type count: Union[int, str], optional\n :param verbose: If True, set the logging level to INFO, otherwise to\n WARNING. True implies that there is some data printed to sys.out,\n while False makes the class quiet. Defaults to True.\n :type verbose: bool, optional\n :param lazy: If True, waits with fetching and parsing the data to when\n the data is required. Defaults to True.\n :type lazy: bool, optional\n '
self.module = module
self.count = count
self.timeout = '10s'
self.verbose = verbose
languages = []
if python:
languages.append('Python')
if jupyter:
languages.append('Jupyter Notebook')
self.languages = tuple(languages)
if verbose:
logger.setLevel(logging.INFO)
else:
logger.setLevel(logging.WARNING)
if (not lazy):
self.data | Create a Module instance that can be used to find
which sections of a Python module are most frequently used.
This class exposes the following methods::
usage()
nested_usage()
repositories()
plot()
n_uses()
n_files()
n_repositories()
..
TODO: Alert users of `alert`, output `limitHit`
TODO: Something with percentages?
TODO: Info on just one object, e.g.
>>> module.use("nltk.tokenize")
"802 occurrences out of 83530 (0.96%)"
TODO: Biggest repositories relying on some subsection.
Perhaps an extension to `repositories()`?
Add this to n_uses, n_files and n_repositories, too
:param module: The name of a Python module of which to find
the frequently used objects, e.g. `"nltk"`.
:type module: str
:param count: The maximum number of times an import of `module`
should be fetched. Roughly equivalent to the number of fetched
files. Either an integer, a string representing an integer,
or "all", defaults to "25000".
:type count: Union[int, str], optional
:param verbose: If True, set the logging level to INFO, otherwise to
WARNING. True implies that there is some data printed to sys.out,
while False makes the class quiet. Defaults to True.
:type verbose: bool, optional
:param lazy: If True, waits with fetching and parsing the data to when
the data is required. Defaults to True.
:type lazy: bool, optional | module_dependencies/module/module.py | __init__ | tomaarsen/module_dependencies | 1 | python | def __init__(self, module: str, count: Union[(int, str)]='25000', verbose: bool=True, lazy: bool=True, python: bool=True, jupyter: bool=True) -> None:
'Create a Module instance that can be used to find\n which sections of a Python module are most frequently used.\n\n This class exposes the following methods::\n\n usage()\n nested_usage()\n repositories()\n plot()\n n_uses()\n n_files()\n n_repositories()\n\n ..\n TODO: Alert users of `alert`, output `limitHit`\n TODO: Something with percentages?\n TODO: Info on just one object, e.g.\n >>> module.use("nltk.tokenize")\n "802 occurrences out of 83530 (0.96%)"\n TODO: Biggest repositories relying on some subsection.\n Perhaps an extension to `repositories()`?\n Add this to n_uses, n_files and n_repositories, too\n\n :param module: The name of a Python module of which to find\n the frequently used objects, e.g. `"nltk"`.\n :type module: str\n :param count: The maximum number of times an import of `module`\n should be fetched. Roughly equivalent to the number of fetched\n files. Either an integer, a string representing an integer,\n or "all", defaults to "25000".\n :type count: Union[int, str], optional\n :param verbose: If True, set the logging level to INFO, otherwise to\n WARNING. True implies that there is some data printed to sys.out,\n while False makes the class quiet. Defaults to True.\n :type verbose: bool, optional\n :param lazy: If True, waits with fetching and parsing the data to when\n the data is required. Defaults to True.\n :type lazy: bool, optional\n '
self.module = module
self.count = count
self.timeout = '10s'
self.verbose = verbose
languages = []
if python:
languages.append('Python')
if jupyter:
languages.append('Jupyter Notebook')
self.languages = tuple(languages)
if verbose:
logger.setLevel(logging.INFO)
else:
logger.setLevel(logging.WARNING)
if (not lazy):
self.data | def __init__(self, module: str, count: Union[(int, str)]='25000', verbose: bool=True, lazy: bool=True, python: bool=True, jupyter: bool=True) -> None:
'Create a Module instance that can be used to find\n which sections of a Python module are most frequently used.\n\n This class exposes the following methods::\n\n usage()\n nested_usage()\n repositories()\n plot()\n n_uses()\n n_files()\n n_repositories()\n\n ..\n TODO: Alert users of `alert`, output `limitHit`\n TODO: Something with percentages?\n TODO: Info on just one object, e.g.\n >>> module.use("nltk.tokenize")\n "802 occurrences out of 83530 (0.96%)"\n TODO: Biggest repositories relying on some subsection.\n Perhaps an extension to `repositories()`?\n Add this to n_uses, n_files and n_repositories, too\n\n :param module: The name of a Python module of which to find\n the frequently used objects, e.g. `"nltk"`.\n :type module: str\n :param count: The maximum number of times an import of `module`\n should be fetched. Roughly equivalent to the number of fetched\n files. Either an integer, a string representing an integer,\n or "all", defaults to "25000".\n :type count: Union[int, str], optional\n :param verbose: If True, set the logging level to INFO, otherwise to\n WARNING. True implies that there is some data printed to sys.out,\n while False makes the class quiet. Defaults to True.\n :type verbose: bool, optional\n :param lazy: If True, waits with fetching and parsing the data to when\n the data is required. Defaults to True.\n :type lazy: bool, optional\n '
self.module = module
self.count = count
self.timeout = '10s'
self.verbose = verbose
languages = []
if python:
languages.append('Python')
if jupyter:
languages.append('Jupyter Notebook')
self.languages = tuple(languages)
if verbose:
logger.setLevel(logging.INFO)
else:
logger.setLevel(logging.WARNING)
if (not lazy):
self.data<|docstring|>Create a Module instance that can be used to find
which sections of a Python module are most frequently used.
This class exposes the following methods::
usage()
nested_usage()
repositories()
plot()
n_uses()
n_files()
n_repositories()
..
TODO: Alert users of `alert`, output `limitHit`
TODO: Something with percentages?
TODO: Info on just one object, e.g.
>>> module.use("nltk.tokenize")
"802 occurrences out of 83530 (0.96%)"
TODO: Biggest repositories relying on some subsection.
Perhaps an extension to `repositories()`?
Add this to n_uses, n_files and n_repositories, too
:param module: The name of a Python module of which to find
the frequently used objects, e.g. `"nltk"`.
:type module: str
:param count: The maximum number of times an import of `module`
should be fetched. Roughly equivalent to the number of fetched
files. Either an integer, a string representing an integer,
or "all", defaults to "25000".
:type count: Union[int, str], optional
:param verbose: If True, set the logging level to INFO, otherwise to
WARNING. True implies that there is some data printed to sys.out,
while False makes the class quiet. Defaults to True.
:type verbose: bool, optional
:param lazy: If True, waits with fetching and parsing the data to when
the data is required. Defaults to True.
:type lazy: bool, optional<|endoftext|> |
95bc5b04e19937393e0c42106a56ba9ab30dff4d9d372ef4c943fad3ee0baac7 | @cached_property
def data(self) -> Dict:
'Cached property of a Module, containing the parsed data from\n the SourceGraph API. This property lazily loads the data once upon request,\n and then parses it using `Source(...).dependencies()`.\n\n Example usage::\n\n >>> from module_dependencies import Module\n >>> module = Module("nltk", count=3)\n >>> pprint(module.data, depth=1)\n {\n \'alert\': None,\n \'cloning\': [],\n \'elapsedMilliseconds\': 573,\n \'limitHit\': True,\n \'matchCount\': 3,\n \'missing\': [],\n \'repositoriesCount\': 1,\n \'results\': [...],\n \'timedout\': []\n }\n\n :return: The cached, parsed SourceGraph API data.\n :rtype: Dict\n '
return ModuleSession().fetch_and_parse(self.module, self.count, self.timeout, self.verbose, self.languages) | Cached property of a Module, containing the parsed data from
the SourceGraph API. This property lazily loads the data once upon request,
and then parses it using `Source(...).dependencies()`.
Example usage::
>>> from module_dependencies import Module
>>> module = Module("nltk", count=3)
>>> pprint(module.data, depth=1)
{
'alert': None,
'cloning': [],
'elapsedMilliseconds': 573,
'limitHit': True,
'matchCount': 3,
'missing': [],
'repositoriesCount': 1,
'results': [...],
'timedout': []
}
:return: The cached, parsed SourceGraph API data.
:rtype: Dict | module_dependencies/module/module.py | data | tomaarsen/module_dependencies | 1 | python | @cached_property
def data(self) -> Dict:
'Cached property of a Module, containing the parsed data from\n the SourceGraph API. This property lazily loads the data once upon request,\n and then parses it using `Source(...).dependencies()`.\n\n Example usage::\n\n >>> from module_dependencies import Module\n >>> module = Module("nltk", count=3)\n >>> pprint(module.data, depth=1)\n {\n \'alert\': None,\n \'cloning\': [],\n \'elapsedMilliseconds\': 573,\n \'limitHit\': True,\n \'matchCount\': 3,\n \'missing\': [],\n \'repositoriesCount\': 1,\n \'results\': [...],\n \'timedout\': []\n }\n\n :return: The cached, parsed SourceGraph API data.\n :rtype: Dict\n '
return ModuleSession().fetch_and_parse(self.module, self.count, self.timeout, self.verbose, self.languages) | @cached_property
def data(self) -> Dict:
'Cached property of a Module, containing the parsed data from\n the SourceGraph API. This property lazily loads the data once upon request,\n and then parses it using `Source(...).dependencies()`.\n\n Example usage::\n\n >>> from module_dependencies import Module\n >>> module = Module("nltk", count=3)\n >>> pprint(module.data, depth=1)\n {\n \'alert\': None,\n \'cloning\': [],\n \'elapsedMilliseconds\': 573,\n \'limitHit\': True,\n \'matchCount\': 3,\n \'missing\': [],\n \'repositoriesCount\': 1,\n \'results\': [...],\n \'timedout\': []\n }\n\n :return: The cached, parsed SourceGraph API data.\n :rtype: Dict\n '
return ModuleSession().fetch_and_parse(self.module, self.count, self.timeout, self.verbose, self.languages)<|docstring|>Cached property of a Module, containing the parsed data from
the SourceGraph API. This property lazily loads the data once upon request,
and then parses it using `Source(...).dependencies()`.
Example usage::
>>> from module_dependencies import Module
>>> module = Module("nltk", count=3)
>>> pprint(module.data, depth=1)
{
'alert': None,
'cloning': [],
'elapsedMilliseconds': 573,
'limitHit': True,
'matchCount': 3,
'missing': [],
'repositoriesCount': 1,
'results': [...],
'timedout': []
}
:return: The cached, parsed SourceGraph API data.
:rtype: Dict<|endoftext|> |
557f3a71e5cd63f92ff75773117e182706d60f8e04f85119a7c097d9b63fa50f | @staticmethod
def is_subsection_of(var_one: Tuple[str], var_two: Tuple[str]) -> bool:
"Check whether `var_one` is a subsection of `var_two`. This means\n that `var_two` can be created by inserting strings into the tuple of\n `var_one`. For example, `var_two` as `('nltk', 'tokenize', 'word_tokenize')`\n can be created by inserting `'tokenize'` into a `var_one` as\n `('nltk', 'word_tokenize')`, so this function returns True.\n\n :param var_one: Tuple of strings representing the path to a Python\n object, e.g. `('nltk', 'word_tokenize')`.\n :type var_one: Tuple[str]\n :param var_two: Tuple of strings representing the path to a Python\n object, e.g. `('nltk', 'tokenize', 'word_tokenize')`.\n :type var_two: Tuple[str]\n :return: True if `var_one` is a subsection of `var_two`.\n :rtype: bool\n "
try:
i = 0
for section in var_two:
if (section == var_one[i]):
i += 1
except IndexError:
return True
return (i == len(var_one)) | Check whether `var_one` is a subsection of `var_two`. This means
that `var_two` can be created by inserting strings into the tuple of
`var_one`. For example, `var_two` as `('nltk', 'tokenize', 'word_tokenize')`
can be created by inserting `'tokenize'` into a `var_one` as
`('nltk', 'word_tokenize')`, so this function returns True.
:param var_one: Tuple of strings representing the path to a Python
object, e.g. `('nltk', 'word_tokenize')`.
:type var_one: Tuple[str]
:param var_two: Tuple of strings representing the path to a Python
object, e.g. `('nltk', 'tokenize', 'word_tokenize')`.
:type var_two: Tuple[str]
:return: True if `var_one` is a subsection of `var_two`.
:rtype: bool | module_dependencies/module/module.py | is_subsection_of | tomaarsen/module_dependencies | 1 | python | @staticmethod
def is_subsection_of(var_one: Tuple[str], var_two: Tuple[str]) -> bool:
"Check whether `var_one` is a subsection of `var_two`. This means\n that `var_two` can be created by inserting strings into the tuple of\n `var_one`. For example, `var_two` as `('nltk', 'tokenize', 'word_tokenize')`\n can be created by inserting `'tokenize'` into a `var_one` as\n `('nltk', 'word_tokenize')`, so this function returns True.\n\n :param var_one: Tuple of strings representing the path to a Python\n object, e.g. `('nltk', 'word_tokenize')`.\n :type var_one: Tuple[str]\n :param var_two: Tuple of strings representing the path to a Python\n object, e.g. `('nltk', 'tokenize', 'word_tokenize')`.\n :type var_two: Tuple[str]\n :return: True if `var_one` is a subsection of `var_two`.\n :rtype: bool\n "
try:
i = 0
for section in var_two:
if (section == var_one[i]):
i += 1
except IndexError:
return True
return (i == len(var_one)) | @staticmethod
def is_subsection_of(var_one: Tuple[str], var_two: Tuple[str]) -> bool:
"Check whether `var_one` is a subsection of `var_two`. This means\n that `var_two` can be created by inserting strings into the tuple of\n `var_one`. For example, `var_two` as `('nltk', 'tokenize', 'word_tokenize')`\n can be created by inserting `'tokenize'` into a `var_one` as\n `('nltk', 'word_tokenize')`, so this function returns True.\n\n :param var_one: Tuple of strings representing the path to a Python\n object, e.g. `('nltk', 'word_tokenize')`.\n :type var_one: Tuple[str]\n :param var_two: Tuple of strings representing the path to a Python\n object, e.g. `('nltk', 'tokenize', 'word_tokenize')`.\n :type var_two: Tuple[str]\n :return: True if `var_one` is a subsection of `var_two`.\n :rtype: bool\n "
try:
i = 0
for section in var_two:
if (section == var_one[i]):
i += 1
except IndexError:
return True
return (i == len(var_one))<|docstring|>Check whether `var_one` is a subsection of `var_two`. This means
that `var_two` can be created by inserting strings into the tuple of
`var_one`. For example, `var_two` as `('nltk', 'tokenize', 'word_tokenize')`
can be created by inserting `'tokenize'` into a `var_one` as
`('nltk', 'word_tokenize')`, so this function returns True.
:param var_one: Tuple of strings representing the path to a Python
object, e.g. `('nltk', 'word_tokenize')`.
:type var_one: Tuple[str]
:param var_two: Tuple of strings representing the path to a Python
object, e.g. `('nltk', 'tokenize', 'word_tokenize')`.
:type var_two: Tuple[str]
:return: True if `var_one` is a subsection of `var_two`.
:rtype: bool<|endoftext|> |
cc9e6d2ef6404de0ed28d692b5c88962b7102e082eeb48eddf92795a8c45feaf | @lru_cache(maxsize=1)
def usage(self, merge: bool=True, cumulative: bool=False) -> List[Tuple[(str, int)]]:
'Get a list of object-occurrence tuples, sorted by most to least frequent.\n\n Example usage::\n\n >>> from module_dependencies import Module\n >>> module = Module("nltk", count="3")\n >>> module.usage()\n [(\'nltk.metrics.distance.edit_distance\', 2),\n (\'nltk.tokenize.sent_tokenize\', 1),\n (\'nltk.tokenize.treebank.TreebankWordDetokenizer\', 1)]\n\n :param merge: Whether to attempt to merge e.g. `"nltk.word_tokenize"`\n into `"nltk.tokenize.word_tokenize"`. May give incorrect results\n for projects with "compat" folders, as the merging tends to prefer\n longer paths, e.g. `"tensorflow.float32"` will become\n `"tensorflow.compat.v1.dtypes.float32"` as opposed to just\n `"tensorflow.dtypes.float32"`. Defaults to True.\n :type merge: bool\n :return: A list of object-occurrence tuples, sorted by most to least frequent.\n :rtype: List[Tuple[str, int]]\n '
def merge_one(usage: List[Tuple[(Tuple[str], int)]]) -> List[Tuple[(str, int)]]:
'Merge a list of similar tuples, combining on "paths" that likely\n refer to the same object, e.g. `"nltk.word_tokenize"` and\n `"nltk.tokenize.word_tokenize"`. `usage` is a list of potentially\n combinable objects.\n\n :param usage: A list of tuples, where the first element is a tuple\n of strings that represent a path to a Python object, e.g.\n `(\'nltk\', \'word_tokenize\')`, and the second element is how\n often that Python object occurs in a large collection of code.\n Each path in the tuple ends in the same token, and thus could\n refer to the same object.\n :type usage: List[Tuple[Tuple[str], int]]\n :return: `usage`, but the first element of each tuple is detokenized,\n i.e. converted back to a string, and paths that refer to the\n same element are merged.\n :rtype: List[Tuple[str, int]]\n '
merged = {}
for (obj, occ) in sorted(usage, key=(lambda x: len(x[0])), reverse=True):
options = [(o_key, o_occ) for (o_key, o_occ) in merged.items() if (Module.is_subsection_of(obj, o_key) and (o_occ > 1))]
if options:
key = max(options, key=(lambda x: x[1]))[0]
merged[key] += occ
else:
merged[obj] = occ
return [(detokenize(obj), occ) for (obj, occ) in merged.items()]
def merge_all(usage: List[Tuple[(str, int)]]) -> List[Tuple[(str, int)]]:
'Merge a list of tuples, combining on "paths" that likely\n refer to the same object, e.g. `"nltk.word_tokenize"` and\n `"nltk.tokenize.word_tokenize"`.\n\n :param usage: A list of tuples, where the first element of\n each tuple is a string representing a path to a Python object,\n e.g. `"nltk.word_tokenize"`, and the second element of each\n tuple is the occurrence of that object in a large collection\n of code.\n :type usage: List[Tuple[str, int]]\n :return: `usage`, but with some merged tuples.\n :rtype: List[Tuple[str, int]]\n '
grouped = defaultdict(list)
for (obj, occ) in usage:
obj_tok = tokenize(obj)
grouped[obj_tok[(- 1)]].append((obj_tok, occ))
merged = []
for group in grouped.values():
merged.extend(merge_one(group))
return sorted(merged, key=(lambda x: x[1]), reverse=True)
def cumulate(usage: List[Tuple[(str, int)]]) -> List[Tuple[(str, int)]]:
usage = defaultdict((lambda : 0), {tokenize(obj): occ for (obj, occ) in usage})
for (tok_obj, occ) in usage.copy().items():
for i in range(1, len(tok_obj)):
usage[tok_obj[:i]] += occ
usage = [(detokenize(tok_obj), occ) for (tok_obj, occ) in usage.items()]
return sorted(usage, key=(lambda x: x[1]), reverse=True)
counter = Counter((use for result in self.data['results'] for use in result['file']['dependencies']))
usage = counter.most_common()
if merge:
usage = merge_all(usage)
if cumulative:
usage = cumulate(usage)
return usage | Get a list of object-occurrence tuples, sorted by most to least frequent.
Example usage::
>>> from module_dependencies import Module
>>> module = Module("nltk", count="3")
>>> module.usage()
[('nltk.metrics.distance.edit_distance', 2),
('nltk.tokenize.sent_tokenize', 1),
('nltk.tokenize.treebank.TreebankWordDetokenizer', 1)]
:param merge: Whether to attempt to merge e.g. `"nltk.word_tokenize"`
into `"nltk.tokenize.word_tokenize"`. May give incorrect results
for projects with "compat" folders, as the merging tends to prefer
longer paths, e.g. `"tensorflow.float32"` will become
`"tensorflow.compat.v1.dtypes.float32"` as opposed to just
`"tensorflow.dtypes.float32"`. Defaults to True.
:type merge: bool
:return: A list of object-occurrence tuples, sorted by most to least frequent.
:rtype: List[Tuple[str, int]] | module_dependencies/module/module.py | usage | tomaarsen/module_dependencies | 1 | python | @lru_cache(maxsize=1)
def usage(self, merge: bool=True, cumulative: bool=False) -> List[Tuple[(str, int)]]:
'Get a list of object-occurrence tuples, sorted by most to least frequent.\n\n Example usage::\n\n >>> from module_dependencies import Module\n >>> module = Module("nltk", count="3")\n >>> module.usage()\n [(\'nltk.metrics.distance.edit_distance\', 2),\n (\'nltk.tokenize.sent_tokenize\', 1),\n (\'nltk.tokenize.treebank.TreebankWordDetokenizer\', 1)]\n\n :param merge: Whether to attempt to merge e.g. `"nltk.word_tokenize"`\n into `"nltk.tokenize.word_tokenize"`. May give incorrect results\n for projects with "compat" folders, as the merging tends to prefer\n longer paths, e.g. `"tensorflow.float32"` will become\n `"tensorflow.compat.v1.dtypes.float32"` as opposed to just\n `"tensorflow.dtypes.float32"`. Defaults to True.\n :type merge: bool\n :return: A list of object-occurrence tuples, sorted by most to least frequent.\n :rtype: List[Tuple[str, int]]\n '
def merge_one(usage: List[Tuple[(Tuple[str], int)]]) -> List[Tuple[(str, int)]]:
'Merge a list of similar tuples, combining on "paths" that likely\n refer to the same object, e.g. `"nltk.word_tokenize"` and\n `"nltk.tokenize.word_tokenize"`. `usage` is a list of potentially\n combinable objects.\n\n :param usage: A list of tuples, where the first element is a tuple\n of strings that represent a path to a Python object, e.g.\n `(\'nltk\', \'word_tokenize\')`, and the second element is how\n often that Python object occurs in a large collection of code.\n Each path in the tuple ends in the same token, and thus could\n refer to the same object.\n :type usage: List[Tuple[Tuple[str], int]]\n :return: `usage`, but the first element of each tuple is detokenized,\n i.e. converted back to a string, and paths that refer to the\n same element are merged.\n :rtype: List[Tuple[str, int]]\n '
merged = {}
for (obj, occ) in sorted(usage, key=(lambda x: len(x[0])), reverse=True):
options = [(o_key, o_occ) for (o_key, o_occ) in merged.items() if (Module.is_subsection_of(obj, o_key) and (o_occ > 1))]
if options:
key = max(options, key=(lambda x: x[1]))[0]
merged[key] += occ
else:
merged[obj] = occ
return [(detokenize(obj), occ) for (obj, occ) in merged.items()]
def merge_all(usage: List[Tuple[(str, int)]]) -> List[Tuple[(str, int)]]:
'Merge a list of tuples, combining on "paths" that likely\n refer to the same object, e.g. `"nltk.word_tokenize"` and\n `"nltk.tokenize.word_tokenize"`.\n\n :param usage: A list of tuples, where the first element of\n each tuple is a string representing a path to a Python object,\n e.g. `"nltk.word_tokenize"`, and the second element of each\n tuple is the occurrence of that object in a large collection\n of code.\n :type usage: List[Tuple[str, int]]\n :return: `usage`, but with some merged tuples.\n :rtype: List[Tuple[str, int]]\n '
grouped = defaultdict(list)
for (obj, occ) in usage:
obj_tok = tokenize(obj)
grouped[obj_tok[(- 1)]].append((obj_tok, occ))
merged = []
for group in grouped.values():
merged.extend(merge_one(group))
return sorted(merged, key=(lambda x: x[1]), reverse=True)
def cumulate(usage: List[Tuple[(str, int)]]) -> List[Tuple[(str, int)]]:
usage = defaultdict((lambda : 0), {tokenize(obj): occ for (obj, occ) in usage})
for (tok_obj, occ) in usage.copy().items():
for i in range(1, len(tok_obj)):
usage[tok_obj[:i]] += occ
usage = [(detokenize(tok_obj), occ) for (tok_obj, occ) in usage.items()]
return sorted(usage, key=(lambda x: x[1]), reverse=True)
counter = Counter((use for result in self.data['results'] for use in result['file']['dependencies']))
usage = counter.most_common()
if merge:
usage = merge_all(usage)
if cumulative:
usage = cumulate(usage)
return usage | @lru_cache(maxsize=1)
def usage(self, merge: bool=True, cumulative: bool=False) -> List[Tuple[(str, int)]]:
'Get a list of object-occurrence tuples, sorted by most to least frequent.\n\n Example usage::\n\n >>> from module_dependencies import Module\n >>> module = Module("nltk", count="3")\n >>> module.usage()\n [(\'nltk.metrics.distance.edit_distance\', 2),\n (\'nltk.tokenize.sent_tokenize\', 1),\n (\'nltk.tokenize.treebank.TreebankWordDetokenizer\', 1)]\n\n :param merge: Whether to attempt to merge e.g. `"nltk.word_tokenize"`\n into `"nltk.tokenize.word_tokenize"`. May give incorrect results\n for projects with "compat" folders, as the merging tends to prefer\n longer paths, e.g. `"tensorflow.float32"` will become\n `"tensorflow.compat.v1.dtypes.float32"` as opposed to just\n `"tensorflow.dtypes.float32"`. Defaults to True.\n :type merge: bool\n :return: A list of object-occurrence tuples, sorted by most to least frequent.\n :rtype: List[Tuple[str, int]]\n '
def merge_one(usage: List[Tuple[(Tuple[str], int)]]) -> List[Tuple[(str, int)]]:
'Merge a list of similar tuples, combining on "paths" that likely\n refer to the same object, e.g. `"nltk.word_tokenize"` and\n `"nltk.tokenize.word_tokenize"`. `usage` is a list of potentially\n combinable objects.\n\n :param usage: A list of tuples, where the first element is a tuple\n of strings that represent a path to a Python object, e.g.\n `(\'nltk\', \'word_tokenize\')`, and the second element is how\n often that Python object occurs in a large collection of code.\n Each path in the tuple ends in the same token, and thus could\n refer to the same object.\n :type usage: List[Tuple[Tuple[str], int]]\n :return: `usage`, but the first element of each tuple is detokenized,\n i.e. converted back to a string, and paths that refer to the\n same element are merged.\n :rtype: List[Tuple[str, int]]\n '
merged = {}
for (obj, occ) in sorted(usage, key=(lambda x: len(x[0])), reverse=True):
options = [(o_key, o_occ) for (o_key, o_occ) in merged.items() if (Module.is_subsection_of(obj, o_key) and (o_occ > 1))]
if options:
key = max(options, key=(lambda x: x[1]))[0]
merged[key] += occ
else:
merged[obj] = occ
return [(detokenize(obj), occ) for (obj, occ) in merged.items()]
def merge_all(usage: List[Tuple[(str, int)]]) -> List[Tuple[(str, int)]]:
'Merge a list of tuples, combining on "paths" that likely\n refer to the same object, e.g. `"nltk.word_tokenize"` and\n `"nltk.tokenize.word_tokenize"`.\n\n :param usage: A list of tuples, where the first element of\n each tuple is a string representing a path to a Python object,\n e.g. `"nltk.word_tokenize"`, and the second element of each\n tuple is the occurrence of that object in a large collection\n of code.\n :type usage: List[Tuple[str, int]]\n :return: `usage`, but with some merged tuples.\n :rtype: List[Tuple[str, int]]\n '
grouped = defaultdict(list)
for (obj, occ) in usage:
obj_tok = tokenize(obj)
grouped[obj_tok[(- 1)]].append((obj_tok, occ))
merged = []
for group in grouped.values():
merged.extend(merge_one(group))
return sorted(merged, key=(lambda x: x[1]), reverse=True)
def cumulate(usage: List[Tuple[(str, int)]]) -> List[Tuple[(str, int)]]:
usage = defaultdict((lambda : 0), {tokenize(obj): occ for (obj, occ) in usage})
for (tok_obj, occ) in usage.copy().items():
for i in range(1, len(tok_obj)):
usage[tok_obj[:i]] += occ
usage = [(detokenize(tok_obj), occ) for (tok_obj, occ) in usage.items()]
return sorted(usage, key=(lambda x: x[1]), reverse=True)
counter = Counter((use for result in self.data['results'] for use in result['file']['dependencies']))
usage = counter.most_common()
if merge:
usage = merge_all(usage)
if cumulative:
usage = cumulate(usage)
return usage<|docstring|>Get a list of object-occurrence tuples, sorted by most to least frequent.
Example usage::
>>> from module_dependencies import Module
>>> module = Module("nltk", count="3")
>>> module.usage()
[('nltk.metrics.distance.edit_distance', 2),
('nltk.tokenize.sent_tokenize', 1),
('nltk.tokenize.treebank.TreebankWordDetokenizer', 1)]
:param merge: Whether to attempt to merge e.g. `"nltk.word_tokenize"`
into `"nltk.tokenize.word_tokenize"`. May give incorrect results
for projects with "compat" folders, as the merging tends to prefer
longer paths, e.g. `"tensorflow.float32"` will become
`"tensorflow.compat.v1.dtypes.float32"` as opposed to just
`"tensorflow.dtypes.float32"`. Defaults to True.
:type merge: bool
:return: A list of object-occurrence tuples, sorted by most to least frequent.
:rtype: List[Tuple[str, int]]<|endoftext|> |
69e3e7c46fe1df772ddbd0b6615e962a289b29e8ed65beff796dce6f3582c119 | @lru_cache(maxsize=1)
def nested_usage(self, full_name: bool=False, merge: bool=True, cumulative: bool=True) -> Dict[(str, Union[(Dict, int)])]:
'Get a (recursive) dictionary of objects mapped to occurrence of that object,\n and the object\'s children.\n\n Example usage::\n\n >>> from module_dependencies import Module\n >>> module = Module("nltk", count="3")\n >>> module.nested_usage()\n {\n "nltk": {\n "occurrences": 4,\n "corpus": {\n "occurrences": 2,\n "stopwords": {\n "occurrences": 2,\n "words": {\n "occurrences": 2\n }\n }\n },\n "tokenize": {\n "occurrences": 2,\n "sent_tokenize": {\n "occurrences": 1\n },\n "treebank": {\n "occurrences": 1,\n "TreebankWordDetokenizer": {\n "occurrences": 1\n }\n }\n }\n }\n }\n\n TODO: Optimize this by relying on usage() better for cumulative\n\n :param full_name: Whether each dictionary key should be the full path,\n e.g. `"nltk.tokenize"`, rather than just the right-most section.\n Defaults to False.\n :type full_name: bool\n :param merge: Whether to attempt to merge e.g. `"nltk.word_tokenize"`\n into `"nltk.tokenize.word_tokenize"`. May give incorrect results\n for projects with "compat" folders, as the merging tends to prefer\n longer paths, e.g. `"tensorflow.float32"` will become\n `"tensorflow.compat.v1.dtypes.float32"` as opposed to just\n `"tensorflow.dtypes.float32"`. Defaults to True.\n :type merge: bool\n :param cumulative: Whether to include usage counts of e.g.\n `"nltk.tokenize.word_tokenize"` into `"nltk.tokenize"` and\n `"nltk"` as well. Defaults to True.\n :param cumulative: bool\n :return: A dictionary mapping objects to how often that object occurred\n in the parsed source code.\n :rtype: Dict[str, Union[Dict, int]]\n '
def recursive_add(nested: Dict, obj_tup: List[str], occurrence: int, prefix: str=''):
if (not obj_tup):
return
head = obj_tup[0]
if (full_name and prefix):
head = ((prefix + '.') + head)
if (head not in nested):
nested[head] = {'occurrences': (occurrence if (cumulative or (len(obj_tup) == 1)) else 0)}
elif (cumulative or (len(obj_tup) == 1)):
nested[head]['occurrences'] += occurrence
recursive_add(nested[head], obj_tup[1:], occurrence, prefix=head)
nested = {}
for (obj, occurrence) in self.usage(merge=merge):
obj_tup = tokenize(obj)
recursive_add(nested, obj_tup, occurrence)
return nested | Get a (recursive) dictionary of objects mapped to occurrence of that object,
and the object's children.
Example usage::
>>> from module_dependencies import Module
>>> module = Module("nltk", count="3")
>>> module.nested_usage()
{
"nltk": {
"occurrences": 4,
"corpus": {
"occurrences": 2,
"stopwords": {
"occurrences": 2,
"words": {
"occurrences": 2
}
}
},
"tokenize": {
"occurrences": 2,
"sent_tokenize": {
"occurrences": 1
},
"treebank": {
"occurrences": 1,
"TreebankWordDetokenizer": {
"occurrences": 1
}
}
}
}
}
TODO: Optimize this by relying on usage() better for cumulative
:param full_name: Whether each dictionary key should be the full path,
e.g. `"nltk.tokenize"`, rather than just the right-most section.
Defaults to False.
:type full_name: bool
:param merge: Whether to attempt to merge e.g. `"nltk.word_tokenize"`
into `"nltk.tokenize.word_tokenize"`. May give incorrect results
for projects with "compat" folders, as the merging tends to prefer
longer paths, e.g. `"tensorflow.float32"` will become
`"tensorflow.compat.v1.dtypes.float32"` as opposed to just
`"tensorflow.dtypes.float32"`. Defaults to True.
:type merge: bool
:param cumulative: Whether to include usage counts of e.g.
`"nltk.tokenize.word_tokenize"` into `"nltk.tokenize"` and
`"nltk"` as well. Defaults to True.
:param cumulative: bool
:return: A dictionary mapping objects to how often that object occurred
in the parsed source code.
:rtype: Dict[str, Union[Dict, int]] | module_dependencies/module/module.py | nested_usage | tomaarsen/module_dependencies | 1 | python | @lru_cache(maxsize=1)
def nested_usage(self, full_name: bool=False, merge: bool=True, cumulative: bool=True) -> Dict[(str, Union[(Dict, int)])]:
'Get a (recursive) dictionary of objects mapped to occurrence of that object,\n and the object\'s children.\n\n Example usage::\n\n >>> from module_dependencies import Module\n >>> module = Module("nltk", count="3")\n >>> module.nested_usage()\n {\n "nltk": {\n "occurrences": 4,\n "corpus": {\n "occurrences": 2,\n "stopwords": {\n "occurrences": 2,\n "words": {\n "occurrences": 2\n }\n }\n },\n "tokenize": {\n "occurrences": 2,\n "sent_tokenize": {\n "occurrences": 1\n },\n "treebank": {\n "occurrences": 1,\n "TreebankWordDetokenizer": {\n "occurrences": 1\n }\n }\n }\n }\n }\n\n TODO: Optimize this by relying on usage() better for cumulative\n\n :param full_name: Whether each dictionary key should be the full path,\n e.g. `"nltk.tokenize"`, rather than just the right-most section.\n Defaults to False.\n :type full_name: bool\n :param merge: Whether to attempt to merge e.g. `"nltk.word_tokenize"`\n into `"nltk.tokenize.word_tokenize"`. May give incorrect results\n for projects with "compat" folders, as the merging tends to prefer\n longer paths, e.g. `"tensorflow.float32"` will become\n `"tensorflow.compat.v1.dtypes.float32"` as opposed to just\n `"tensorflow.dtypes.float32"`. Defaults to True.\n :type merge: bool\n :param cumulative: Whether to include usage counts of e.g.\n `"nltk.tokenize.word_tokenize"` into `"nltk.tokenize"` and\n `"nltk"` as well. Defaults to True.\n :param cumulative: bool\n :return: A dictionary mapping objects to how often that object occurred\n in the parsed source code.\n :rtype: Dict[str, Union[Dict, int]]\n '
def recursive_add(nested: Dict, obj_tup: List[str], occurrence: int, prefix: str=):
if (not obj_tup):
return
head = obj_tup[0]
if (full_name and prefix):
head = ((prefix + '.') + head)
if (head not in nested):
nested[head] = {'occurrences': (occurrence if (cumulative or (len(obj_tup) == 1)) else 0)}
elif (cumulative or (len(obj_tup) == 1)):
nested[head]['occurrences'] += occurrence
recursive_add(nested[head], obj_tup[1:], occurrence, prefix=head)
nested = {}
for (obj, occurrence) in self.usage(merge=merge):
obj_tup = tokenize(obj)
recursive_add(nested, obj_tup, occurrence)
return nested | @lru_cache(maxsize=1)
def nested_usage(self, full_name: bool=False, merge: bool=True, cumulative: bool=True) -> Dict[(str, Union[(Dict, int)])]:
'Get a (recursive) dictionary of objects mapped to occurrence of that object,\n and the object\'s children.\n\n Example usage::\n\n >>> from module_dependencies import Module\n >>> module = Module("nltk", count="3")\n >>> module.nested_usage()\n {\n "nltk": {\n "occurrences": 4,\n "corpus": {\n "occurrences": 2,\n "stopwords": {\n "occurrences": 2,\n "words": {\n "occurrences": 2\n }\n }\n },\n "tokenize": {\n "occurrences": 2,\n "sent_tokenize": {\n "occurrences": 1\n },\n "treebank": {\n "occurrences": 1,\n "TreebankWordDetokenizer": {\n "occurrences": 1\n }\n }\n }\n }\n }\n\n TODO: Optimize this by relying on usage() better for cumulative\n\n :param full_name: Whether each dictionary key should be the full path,\n e.g. `"nltk.tokenize"`, rather than just the right-most section.\n Defaults to False.\n :type full_name: bool\n :param merge: Whether to attempt to merge e.g. `"nltk.word_tokenize"`\n into `"nltk.tokenize.word_tokenize"`. May give incorrect results\n for projects with "compat" folders, as the merging tends to prefer\n longer paths, e.g. `"tensorflow.float32"` will become\n `"tensorflow.compat.v1.dtypes.float32"` as opposed to just\n `"tensorflow.dtypes.float32"`. Defaults to True.\n :type merge: bool\n :param cumulative: Whether to include usage counts of e.g.\n `"nltk.tokenize.word_tokenize"` into `"nltk.tokenize"` and\n `"nltk"` as well. Defaults to True.\n :param cumulative: bool\n :return: A dictionary mapping objects to how often that object occurred\n in the parsed source code.\n :rtype: Dict[str, Union[Dict, int]]\n '
def recursive_add(nested: Dict, obj_tup: List[str], occurrence: int, prefix: str=):
if (not obj_tup):
return
head = obj_tup[0]
if (full_name and prefix):
head = ((prefix + '.') + head)
if (head not in nested):
nested[head] = {'occurrences': (occurrence if (cumulative or (len(obj_tup) == 1)) else 0)}
elif (cumulative or (len(obj_tup) == 1)):
nested[head]['occurrences'] += occurrence
recursive_add(nested[head], obj_tup[1:], occurrence, prefix=head)
nested = {}
for (obj, occurrence) in self.usage(merge=merge):
obj_tup = tokenize(obj)
recursive_add(nested, obj_tup, occurrence)
return nested<|docstring|>Get a (recursive) dictionary of objects mapped to occurrence of that object,
and the object's children.
Example usage::
>>> from module_dependencies import Module
>>> module = Module("nltk", count="3")
>>> module.nested_usage()
{
"nltk": {
"occurrences": 4,
"corpus": {
"occurrences": 2,
"stopwords": {
"occurrences": 2,
"words": {
"occurrences": 2
}
}
},
"tokenize": {
"occurrences": 2,
"sent_tokenize": {
"occurrences": 1
},
"treebank": {
"occurrences": 1,
"TreebankWordDetokenizer": {
"occurrences": 1
}
}
}
}
}
TODO: Optimize this by relying on usage() better for cumulative
:param full_name: Whether each dictionary key should be the full path,
e.g. `"nltk.tokenize"`, rather than just the right-most section.
Defaults to False.
:type full_name: bool
:param merge: Whether to attempt to merge e.g. `"nltk.word_tokenize"`
into `"nltk.tokenize.word_tokenize"`. May give incorrect results
for projects with "compat" folders, as the merging tends to prefer
longer paths, e.g. `"tensorflow.float32"` will become
`"tensorflow.compat.v1.dtypes.float32"` as opposed to just
`"tensorflow.dtypes.float32"`. Defaults to True.
:type merge: bool
:param cumulative: Whether to include usage counts of e.g.
`"nltk.tokenize.word_tokenize"` into `"nltk.tokenize"` and
`"nltk"` as well. Defaults to True.
:param cumulative: bool
:return: A dictionary mapping objects to how often that object occurred
in the parsed source code.
:rtype: Dict[str, Union[Dict, int]]<|endoftext|> |
db056e542814a6f1172378cbf40fd1912bc1d57d0e89a8d1f1ea506001bab1d5 | @lru_cache(maxsize=1)
def repositories(self, obj: str='') -> Dict[(str, Dict[(str, Any)])]:
'Return a mapping of repository names to repository information\n that were fetched and parsed. Contains "description", "stars", "isFork" keys,\n plus a list of "files" with "name", "path", "url", "dependencies" and\n "parse_error" fields. The "parse_error" field lists the error that was\n encountered when attempting to parse the file, e.g. "SyntaxError".\n This might happen when a Python 2 file was fetched.\n\n Example usage::\n\n >>> from module_dependencies import Module\n >>> module = Module("nltk", count="3")\n >>> module.repositories()\n {\n "github.com/codelucas/newspaper": {\n "description": "News, full-text, and article metadata extraction in Python 3. Advanced docs:",\n "stars": 11224,\n "isFork": false,\n "files": [\n {\n "name": "download_corpora.py",\n "path": "download_corpora.py",\n "url": "/github.com/codelucas/newspaper/-/blob/download_corpora.py",\n "dependencies": [\n "nltk.download"\n ],\n "parse_error": null\n },\n {\n "name": "nlp.py",\n "path": "newspaper/nlp.py",\n "url": "/github.com/codelucas/newspaper/-/blob/newspaper/nlp.py",\n "dependencies": [\n "nltk.data.load"\n ],\n "parse_error": null\n },\n {\n "name": "text.py",\n "path": "newspaper/text.py",\n "url": "/github.com/codelucas/newspaper/-/blob/newspaper/text.py",\n "dependencies": [\n "nltk.stem.isri.ISRIStemmer",\n "nltk.tokenize.wordpunct_tokenize"\n ],\n "parse_error": null\n }\n ]\n }\n }\n\n :return: A mapping of repositories\n :rtype: Dict[str, Dict[str, Any]]\n '
if obj:
tok_obj = tokenize(obj)
objects = {potential_obj for (potential_obj, _) in self.usage(merge=False, cumulative=True) if Module.is_subsection_of(tok_obj, tokenize(potential_obj))}
if (not objects):
warnings.warn(f'No instance of {obj!r} was found in the fetched files!', stacklevel=2)
projects = {}
for result in self.data['results']:
if ((not obj) or set(result['file']['dependencies']).intersection(objects)):
name = result['repository']['name']
del result['repository']['name']
if (name in projects):
projects[name]['files'].append(result['file'])
else:
projects[name] = {**result['repository'], 'files': [result['file']]}
return dict(sorted(projects.items(), key=(lambda project: project[1]['stars']), reverse=True)) | Return a mapping of repository names to repository information
that were fetched and parsed. Contains "description", "stars", "isFork" keys,
plus a list of "files" with "name", "path", "url", "dependencies" and
"parse_error" fields. The "parse_error" field lists the error that was
encountered when attempting to parse the file, e.g. "SyntaxError".
This might happen when a Python 2 file was fetched.
Example usage::
>>> from module_dependencies import Module
>>> module = Module("nltk", count="3")
>>> module.repositories()
{
"github.com/codelucas/newspaper": {
"description": "News, full-text, and article metadata extraction in Python 3. Advanced docs:",
"stars": 11224,
"isFork": false,
"files": [
{
"name": "download_corpora.py",
"path": "download_corpora.py",
"url": "/github.com/codelucas/newspaper/-/blob/download_corpora.py",
"dependencies": [
"nltk.download"
],
"parse_error": null
},
{
"name": "nlp.py",
"path": "newspaper/nlp.py",
"url": "/github.com/codelucas/newspaper/-/blob/newspaper/nlp.py",
"dependencies": [
"nltk.data.load"
],
"parse_error": null
},
{
"name": "text.py",
"path": "newspaper/text.py",
"url": "/github.com/codelucas/newspaper/-/blob/newspaper/text.py",
"dependencies": [
"nltk.stem.isri.ISRIStemmer",
"nltk.tokenize.wordpunct_tokenize"
],
"parse_error": null
}
]
}
}
:return: A mapping of repositories
:rtype: Dict[str, Dict[str, Any]] | module_dependencies/module/module.py | repositories | tomaarsen/module_dependencies | 1 | python | @lru_cache(maxsize=1)
def repositories(self, obj: str=) -> Dict[(str, Dict[(str, Any)])]:
'Return a mapping of repository names to repository information\n that were fetched and parsed. Contains "description", "stars", "isFork" keys,\n plus a list of "files" with "name", "path", "url", "dependencies" and\n "parse_error" fields. The "parse_error" field lists the error that was\n encountered when attempting to parse the file, e.g. "SyntaxError".\n This might happen when a Python 2 file was fetched.\n\n Example usage::\n\n >>> from module_dependencies import Module\n >>> module = Module("nltk", count="3")\n >>> module.repositories()\n {\n "github.com/codelucas/newspaper": {\n "description": "News, full-text, and article metadata extraction in Python 3. Advanced docs:",\n "stars": 11224,\n "isFork": false,\n "files": [\n {\n "name": "download_corpora.py",\n "path": "download_corpora.py",\n "url": "/github.com/codelucas/newspaper/-/blob/download_corpora.py",\n "dependencies": [\n "nltk.download"\n ],\n "parse_error": null\n },\n {\n "name": "nlp.py",\n "path": "newspaper/nlp.py",\n "url": "/github.com/codelucas/newspaper/-/blob/newspaper/nlp.py",\n "dependencies": [\n "nltk.data.load"\n ],\n "parse_error": null\n },\n {\n "name": "text.py",\n "path": "newspaper/text.py",\n "url": "/github.com/codelucas/newspaper/-/blob/newspaper/text.py",\n "dependencies": [\n "nltk.stem.isri.ISRIStemmer",\n "nltk.tokenize.wordpunct_tokenize"\n ],\n "parse_error": null\n }\n ]\n }\n }\n\n :return: A mapping of repositories\n :rtype: Dict[str, Dict[str, Any]]\n '
if obj:
tok_obj = tokenize(obj)
objects = {potential_obj for (potential_obj, _) in self.usage(merge=False, cumulative=True) if Module.is_subsection_of(tok_obj, tokenize(potential_obj))}
if (not objects):
warnings.warn(f'No instance of {obj!r} was found in the fetched files!', stacklevel=2)
projects = {}
for result in self.data['results']:
if ((not obj) or set(result['file']['dependencies']).intersection(objects)):
name = result['repository']['name']
del result['repository']['name']
if (name in projects):
projects[name]['files'].append(result['file'])
else:
projects[name] = {**result['repository'], 'files': [result['file']]}
return dict(sorted(projects.items(), key=(lambda project: project[1]['stars']), reverse=True)) | @lru_cache(maxsize=1)
def repositories(self, obj: str=) -> Dict[(str, Dict[(str, Any)])]:
'Return a mapping of repository names to repository information\n that were fetched and parsed. Contains "description", "stars", "isFork" keys,\n plus a list of "files" with "name", "path", "url", "dependencies" and\n "parse_error" fields. The "parse_error" field lists the error that was\n encountered when attempting to parse the file, e.g. "SyntaxError".\n This might happen when a Python 2 file was fetched.\n\n Example usage::\n\n >>> from module_dependencies import Module\n >>> module = Module("nltk", count="3")\n >>> module.repositories()\n {\n "github.com/codelucas/newspaper": {\n "description": "News, full-text, and article metadata extraction in Python 3. Advanced docs:",\n "stars": 11224,\n "isFork": false,\n "files": [\n {\n "name": "download_corpora.py",\n "path": "download_corpora.py",\n "url": "/github.com/codelucas/newspaper/-/blob/download_corpora.py",\n "dependencies": [\n "nltk.download"\n ],\n "parse_error": null\n },\n {\n "name": "nlp.py",\n "path": "newspaper/nlp.py",\n "url": "/github.com/codelucas/newspaper/-/blob/newspaper/nlp.py",\n "dependencies": [\n "nltk.data.load"\n ],\n "parse_error": null\n },\n {\n "name": "text.py",\n "path": "newspaper/text.py",\n "url": "/github.com/codelucas/newspaper/-/blob/newspaper/text.py",\n "dependencies": [\n "nltk.stem.isri.ISRIStemmer",\n "nltk.tokenize.wordpunct_tokenize"\n ],\n "parse_error": null\n }\n ]\n }\n }\n\n :return: A mapping of repositories\n :rtype: Dict[str, Dict[str, Any]]\n '
if obj:
tok_obj = tokenize(obj)
objects = {potential_obj for (potential_obj, _) in self.usage(merge=False, cumulative=True) if Module.is_subsection_of(tok_obj, tokenize(potential_obj))}
if (not objects):
warnings.warn(f'No instance of {obj!r} was found in the fetched files!', stacklevel=2)
projects = {}
for result in self.data['results']:
if ((not obj) or set(result['file']['dependencies']).intersection(objects)):
name = result['repository']['name']
del result['repository']['name']
if (name in projects):
projects[name]['files'].append(result['file'])
else:
projects[name] = {**result['repository'], 'files': [result['file']]}
return dict(sorted(projects.items(), key=(lambda project: project[1]['stars']), reverse=True))<|docstring|>Return a mapping of repository names to repository information
that were fetched and parsed. Contains "description", "stars", "isFork" keys,
plus a list of "files" with "name", "path", "url", "dependencies" and
"parse_error" fields. The "parse_error" field lists the error that was
encountered when attempting to parse the file, e.g. "SyntaxError".
This might happen when a Python 2 file was fetched.
Example usage::
>>> from module_dependencies import Module
>>> module = Module("nltk", count="3")
>>> module.repositories()
{
"github.com/codelucas/newspaper": {
"description": "News, full-text, and article metadata extraction in Python 3. Advanced docs:",
"stars": 11224,
"isFork": false,
"files": [
{
"name": "download_corpora.py",
"path": "download_corpora.py",
"url": "/github.com/codelucas/newspaper/-/blob/download_corpora.py",
"dependencies": [
"nltk.download"
],
"parse_error": null
},
{
"name": "nlp.py",
"path": "newspaper/nlp.py",
"url": "/github.com/codelucas/newspaper/-/blob/newspaper/nlp.py",
"dependencies": [
"nltk.data.load"
],
"parse_error": null
},
{
"name": "text.py",
"path": "newspaper/text.py",
"url": "/github.com/codelucas/newspaper/-/blob/newspaper/text.py",
"dependencies": [
"nltk.stem.isri.ISRIStemmer",
"nltk.tokenize.wordpunct_tokenize"
],
"parse_error": null
}
]
}
}
:return: A mapping of repositories
:rtype: Dict[str, Dict[str, Any]]<|endoftext|> |
0e55608a60109cd5488d9df978a7ef158a828deebfdcc494e877929a84ce9d82 | def plot(self, merge: bool=True, threshold: int=0, limit: int=(- 1), max_depth: int=4, transparant: bool=False, show: bool=True) -> None:
'Display a plotly Sunburst plot showing the frequency of use\n of different sections of this module.\n\n :param merge: Whether to attempt to merge e.g. `"nltk.word_tokenize"`\n into `"nltk.tokenize.word_tokenize"`. May give incorrect results\n for projects with "compat" folders, as the merging tends to prefer\n longer paths, e.g. `"tensorflow.float32"` will become\n `"tensorflow.compat.v1.dtypes.float32"` as opposed to just\n `"tensorflow.dtypes.float32"`. Defaults to True.\n :type merge: bool\n :rtype: None\n '
import plotly.graph_objects as go
def get_value(nested_dict: Dict, tok_obj: Tuple[str]) -> int:
'Recursively apply elements from `tok_obj` as keys in `nested_dict`,\n and then gather the `occurrences`.\n\n :param nested_dict: A dictionary with nested usages, generally taken\n from the `nested_usage` method.\n :type nested_dict: Dict\n :param tok_obj: A tuple of strings representing a path to a Python path.\n :type tok_obj: Tuple[str]\n :return: The occurrence of the object represented by `tok_obj`\n in `nested_dict`.\n :rtype: int\n '
if (not tok_obj):
return nested_dict['occurrences']
return get_value(nested_dict[tok_obj[0]], tok_obj[1:])
usage = self.usage(merge=merge)
nested_usage = self.nested_usage(merge=merge)
objects = set()
for (obj, _) in usage:
tok_obj = tokenize(obj)
objects |= {(detokenize(tok_obj[:i]), tok_obj[:i]) for i in range(1, (len(tok_obj) + 1))}
full_objects = [{'obj': obj, 'tok': tok_obj, 'val': get_value(nested_usage, tok_obj)} for (obj, tok_obj) in objects]
if threshold:
full_objects = [fobj for fobj in full_objects if (fobj['val'] > threshold)]
if (limit > 0):
sorted_fobjs = sorted(full_objects, key=(lambda fobj: fobj['val']), reverse=True)
limit_value = sorted_fobjs[limit]['val']
full_objects = [fobj for fobj in full_objects if (fobj['val'] >= limit_value)]
parameters = {'ids': [fobj['obj'] for fobj in full_objects], 'labels': [fobj['tok'][(- 1)] for fobj in full_objects], 'parents': [detokenize(fobj['tok'][:(- 1)]) for fobj in full_objects], 'values': [fobj['val'] for fobj in full_objects]}
if show:
fig = go.Figure(go.Sunburst(**parameters, branchvalues='total', insidetextorientation='radial', maxdepth=max_depth), layout=go.Layout(paper_bgcolor=('rgba(0,0,0,0)' if transparant else None), margin={'t': 0, 'l': 0, 'r': 0, 'b': 0}))
fig.show()
else:
return parameters | Display a plotly Sunburst plot showing the frequency of use
of different sections of this module.
:param merge: Whether to attempt to merge e.g. `"nltk.word_tokenize"`
into `"nltk.tokenize.word_tokenize"`. May give incorrect results
for projects with "compat" folders, as the merging tends to prefer
longer paths, e.g. `"tensorflow.float32"` will become
`"tensorflow.compat.v1.dtypes.float32"` as opposed to just
`"tensorflow.dtypes.float32"`. Defaults to True.
:type merge: bool
:rtype: None | module_dependencies/module/module.py | plot | tomaarsen/module_dependencies | 1 | python | def plot(self, merge: bool=True, threshold: int=0, limit: int=(- 1), max_depth: int=4, transparant: bool=False, show: bool=True) -> None:
'Display a plotly Sunburst plot showing the frequency of use\n of different sections of this module.\n\n :param merge: Whether to attempt to merge e.g. `"nltk.word_tokenize"`\n into `"nltk.tokenize.word_tokenize"`. May give incorrect results\n for projects with "compat" folders, as the merging tends to prefer\n longer paths, e.g. `"tensorflow.float32"` will become\n `"tensorflow.compat.v1.dtypes.float32"` as opposed to just\n `"tensorflow.dtypes.float32"`. Defaults to True.\n :type merge: bool\n :rtype: None\n '
import plotly.graph_objects as go
def get_value(nested_dict: Dict, tok_obj: Tuple[str]) -> int:
'Recursively apply elements from `tok_obj` as keys in `nested_dict`,\n and then gather the `occurrences`.\n\n :param nested_dict: A dictionary with nested usages, generally taken\n from the `nested_usage` method.\n :type nested_dict: Dict\n :param tok_obj: A tuple of strings representing a path to a Python path.\n :type tok_obj: Tuple[str]\n :return: The occurrence of the object represented by `tok_obj`\n in `nested_dict`.\n :rtype: int\n '
if (not tok_obj):
return nested_dict['occurrences']
return get_value(nested_dict[tok_obj[0]], tok_obj[1:])
usage = self.usage(merge=merge)
nested_usage = self.nested_usage(merge=merge)
objects = set()
for (obj, _) in usage:
tok_obj = tokenize(obj)
objects |= {(detokenize(tok_obj[:i]), tok_obj[:i]) for i in range(1, (len(tok_obj) + 1))}
full_objects = [{'obj': obj, 'tok': tok_obj, 'val': get_value(nested_usage, tok_obj)} for (obj, tok_obj) in objects]
if threshold:
full_objects = [fobj for fobj in full_objects if (fobj['val'] > threshold)]
if (limit > 0):
sorted_fobjs = sorted(full_objects, key=(lambda fobj: fobj['val']), reverse=True)
limit_value = sorted_fobjs[limit]['val']
full_objects = [fobj for fobj in full_objects if (fobj['val'] >= limit_value)]
parameters = {'ids': [fobj['obj'] for fobj in full_objects], 'labels': [fobj['tok'][(- 1)] for fobj in full_objects], 'parents': [detokenize(fobj['tok'][:(- 1)]) for fobj in full_objects], 'values': [fobj['val'] for fobj in full_objects]}
if show:
fig = go.Figure(go.Sunburst(**parameters, branchvalues='total', insidetextorientation='radial', maxdepth=max_depth), layout=go.Layout(paper_bgcolor=('rgba(0,0,0,0)' if transparant else None), margin={'t': 0, 'l': 0, 'r': 0, 'b': 0}))
fig.show()
else:
return parameters | def plot(self, merge: bool=True, threshold: int=0, limit: int=(- 1), max_depth: int=4, transparant: bool=False, show: bool=True) -> None:
'Display a plotly Sunburst plot showing the frequency of use\n of different sections of this module.\n\n :param merge: Whether to attempt to merge e.g. `"nltk.word_tokenize"`\n into `"nltk.tokenize.word_tokenize"`. May give incorrect results\n for projects with "compat" folders, as the merging tends to prefer\n longer paths, e.g. `"tensorflow.float32"` will become\n `"tensorflow.compat.v1.dtypes.float32"` as opposed to just\n `"tensorflow.dtypes.float32"`. Defaults to True.\n :type merge: bool\n :rtype: None\n '
import plotly.graph_objects as go
def get_value(nested_dict: Dict, tok_obj: Tuple[str]) -> int:
'Recursively apply elements from `tok_obj` as keys in `nested_dict`,\n and then gather the `occurrences`.\n\n :param nested_dict: A dictionary with nested usages, generally taken\n from the `nested_usage` method.\n :type nested_dict: Dict\n :param tok_obj: A tuple of strings representing a path to a Python path.\n :type tok_obj: Tuple[str]\n :return: The occurrence of the object represented by `tok_obj`\n in `nested_dict`.\n :rtype: int\n '
if (not tok_obj):
return nested_dict['occurrences']
return get_value(nested_dict[tok_obj[0]], tok_obj[1:])
usage = self.usage(merge=merge)
nested_usage = self.nested_usage(merge=merge)
objects = set()
for (obj, _) in usage:
tok_obj = tokenize(obj)
objects |= {(detokenize(tok_obj[:i]), tok_obj[:i]) for i in range(1, (len(tok_obj) + 1))}
full_objects = [{'obj': obj, 'tok': tok_obj, 'val': get_value(nested_usage, tok_obj)} for (obj, tok_obj) in objects]
if threshold:
full_objects = [fobj for fobj in full_objects if (fobj['val'] > threshold)]
if (limit > 0):
sorted_fobjs = sorted(full_objects, key=(lambda fobj: fobj['val']), reverse=True)
limit_value = sorted_fobjs[limit]['val']
full_objects = [fobj for fobj in full_objects if (fobj['val'] >= limit_value)]
parameters = {'ids': [fobj['obj'] for fobj in full_objects], 'labels': [fobj['tok'][(- 1)] for fobj in full_objects], 'parents': [detokenize(fobj['tok'][:(- 1)]) for fobj in full_objects], 'values': [fobj['val'] for fobj in full_objects]}
if show:
fig = go.Figure(go.Sunburst(**parameters, branchvalues='total', insidetextorientation='radial', maxdepth=max_depth), layout=go.Layout(paper_bgcolor=('rgba(0,0,0,0)' if transparant else None), margin={'t': 0, 'l': 0, 'r': 0, 'b': 0}))
fig.show()
else:
return parameters<|docstring|>Display a plotly Sunburst plot showing the frequency of use
of different sections of this module.
:param merge: Whether to attempt to merge e.g. `"nltk.word_tokenize"`
into `"nltk.tokenize.word_tokenize"`. May give incorrect results
for projects with "compat" folders, as the merging tends to prefer
longer paths, e.g. `"tensorflow.float32"` will become
`"tensorflow.compat.v1.dtypes.float32"` as opposed to just
`"tensorflow.dtypes.float32"`. Defaults to True.
:type merge: bool
:rtype: None<|endoftext|> |
a48510399b575dcd5f622909af873a5e6bdbcc1109f8512c66572067202a3786 | def n_uses(self, obj: str='') -> int:
'Return the number of uses of the module.\n\n Example usage::\n\n >>> from module_dependencies import Module\n >>> module = Module("nltk", count="100")\n >>> module.n_uses()\n 137\n\n :return: The number of uses, i.e. the number of times\n `self.module` was used in the fetched files.\n :rtype: int\n '
if obj:
tok_obj = tokenize(obj)
objects = {potential_obj for (potential_obj, _) in self.usage(merge=False, cumulative=True) if Module.is_subsection_of(tok_obj, tokenize(potential_obj))}
usages = defaultdict((lambda : 0), self.usage(merge=False, cumulative=False))
return sum((usages[potential_obj] for potential_obj in objects))
return sum((occ for (_, occ) in self.usage(merge=False, cumulative=False))) | Return the number of uses of the module.
Example usage::
>>> from module_dependencies import Module
>>> module = Module("nltk", count="100")
>>> module.n_uses()
137
:return: The number of uses, i.e. the number of times
`self.module` was used in the fetched files.
:rtype: int | module_dependencies/module/module.py | n_uses | tomaarsen/module_dependencies | 1 | python | def n_uses(self, obj: str=) -> int:
'Return the number of uses of the module.\n\n Example usage::\n\n >>> from module_dependencies import Module\n >>> module = Module("nltk", count="100")\n >>> module.n_uses()\n 137\n\n :return: The number of uses, i.e. the number of times\n `self.module` was used in the fetched files.\n :rtype: int\n '
if obj:
tok_obj = tokenize(obj)
objects = {potential_obj for (potential_obj, _) in self.usage(merge=False, cumulative=True) if Module.is_subsection_of(tok_obj, tokenize(potential_obj))}
usages = defaultdict((lambda : 0), self.usage(merge=False, cumulative=False))
return sum((usages[potential_obj] for potential_obj in objects))
return sum((occ for (_, occ) in self.usage(merge=False, cumulative=False))) | def n_uses(self, obj: str=) -> int:
'Return the number of uses of the module.\n\n Example usage::\n\n >>> from module_dependencies import Module\n >>> module = Module("nltk", count="100")\n >>> module.n_uses()\n 137\n\n :return: The number of uses, i.e. the number of times\n `self.module` was used in the fetched files.\n :rtype: int\n '
if obj:
tok_obj = tokenize(obj)
objects = {potential_obj for (potential_obj, _) in self.usage(merge=False, cumulative=True) if Module.is_subsection_of(tok_obj, tokenize(potential_obj))}
usages = defaultdict((lambda : 0), self.usage(merge=False, cumulative=False))
return sum((usages[potential_obj] for potential_obj in objects))
return sum((occ for (_, occ) in self.usage(merge=False, cumulative=False)))<|docstring|>Return the number of uses of the module.
Example usage::
>>> from module_dependencies import Module
>>> module = Module("nltk", count="100")
>>> module.n_uses()
137
:return: The number of uses, i.e. the number of times
`self.module` was used in the fetched files.
:rtype: int<|endoftext|> |
dfbc89bdb9602e97f0d427b7a63ece89132d5a4210a1c029afee6cb1d0d25cf6 | def n_files(self) -> int:
'Return the number of files fetched.\n\n Example usage::\n\n >>> from module_dependencies import Module\n >>> module = Module("nltk", count="100")\n >>> module.n_files()\n 100\n\n :return: The number of fetched files in which `self.module` was\n imported. Generally equivalent or similar to `count` if it\n was provided.\n :rtype: int\n '
return len(self.data['results']) | Return the number of files fetched.
Example usage::
>>> from module_dependencies import Module
>>> module = Module("nltk", count="100")
>>> module.n_files()
100
:return: The number of fetched files in which `self.module` was
imported. Generally equivalent or similar to `count` if it
was provided.
:rtype: int | module_dependencies/module/module.py | n_files | tomaarsen/module_dependencies | 1 | python | def n_files(self) -> int:
'Return the number of files fetched.\n\n Example usage::\n\n >>> from module_dependencies import Module\n >>> module = Module("nltk", count="100")\n >>> module.n_files()\n 100\n\n :return: The number of fetched files in which `self.module` was\n imported. Generally equivalent or similar to `count` if it\n was provided.\n :rtype: int\n '
return len(self.data['results']) | def n_files(self) -> int:
'Return the number of files fetched.\n\n Example usage::\n\n >>> from module_dependencies import Module\n >>> module = Module("nltk", count="100")\n >>> module.n_files()\n 100\n\n :return: The number of fetched files in which `self.module` was\n imported. Generally equivalent or similar to `count` if it\n was provided.\n :rtype: int\n '
return len(self.data['results'])<|docstring|>Return the number of files fetched.
Example usage::
>>> from module_dependencies import Module
>>> module = Module("nltk", count="100")
>>> module.n_files()
100
:return: The number of fetched files in which `self.module` was
imported. Generally equivalent or similar to `count` if it
was provided.
:rtype: int<|endoftext|> |
bc241538afb8265833c153c9a0127d7a4196482719cfedc7d99b5c23f5906d6c | def n_repositories(self) -> int:
'Return the number of repositories fetched.\n\n Example usage::\n\n >>> from module_dependencies import Module\n >>> module = Module("nltk", count="100")\n >>> module.n_repositories()\n 52\n\n TODO: Exclude errorred code\n\n :return: The number of fetched repositories in which `self.module`\n was imported.\n :rtype: int\n '
return self.data['repositoriesCount'] | Return the number of repositories fetched.
Example usage::
>>> from module_dependencies import Module
>>> module = Module("nltk", count="100")
>>> module.n_repositories()
52
TODO: Exclude errorred code
:return: The number of fetched repositories in which `self.module`
was imported.
:rtype: int | module_dependencies/module/module.py | n_repositories | tomaarsen/module_dependencies | 1 | python | def n_repositories(self) -> int:
'Return the number of repositories fetched.\n\n Example usage::\n\n >>> from module_dependencies import Module\n >>> module = Module("nltk", count="100")\n >>> module.n_repositories()\n 52\n\n TODO: Exclude errorred code\n\n :return: The number of fetched repositories in which `self.module`\n was imported.\n :rtype: int\n '
return self.data['repositoriesCount'] | def n_repositories(self) -> int:
'Return the number of repositories fetched.\n\n Example usage::\n\n >>> from module_dependencies import Module\n >>> module = Module("nltk", count="100")\n >>> module.n_repositories()\n 52\n\n TODO: Exclude errorred code\n\n :return: The number of fetched repositories in which `self.module`\n was imported.\n :rtype: int\n '
return self.data['repositoriesCount']<|docstring|>Return the number of repositories fetched.
Example usage::
>>> from module_dependencies import Module
>>> module = Module("nltk", count="100")
>>> module.n_repositories()
52
TODO: Exclude errorred code
:return: The number of fetched repositories in which `self.module`
was imported.
:rtype: int<|endoftext|> |
54be0f20c0f26950da46f1be257835128adcd4ec3be5f15352e1e2e6aa28e8d9 | def merge_one(usage: List[Tuple[(Tuple[str], int)]]) -> List[Tuple[(str, int)]]:
'Merge a list of similar tuples, combining on "paths" that likely\n refer to the same object, e.g. `"nltk.word_tokenize"` and\n `"nltk.tokenize.word_tokenize"`. `usage` is a list of potentially\n combinable objects.\n\n :param usage: A list of tuples, where the first element is a tuple\n of strings that represent a path to a Python object, e.g.\n `(\'nltk\', \'word_tokenize\')`, and the second element is how\n often that Python object occurs in a large collection of code.\n Each path in the tuple ends in the same token, and thus could\n refer to the same object.\n :type usage: List[Tuple[Tuple[str], int]]\n :return: `usage`, but the first element of each tuple is detokenized,\n i.e. converted back to a string, and paths that refer to the\n same element are merged.\n :rtype: List[Tuple[str, int]]\n '
merged = {}
for (obj, occ) in sorted(usage, key=(lambda x: len(x[0])), reverse=True):
options = [(o_key, o_occ) for (o_key, o_occ) in merged.items() if (Module.is_subsection_of(obj, o_key) and (o_occ > 1))]
if options:
key = max(options, key=(lambda x: x[1]))[0]
merged[key] += occ
else:
merged[obj] = occ
return [(detokenize(obj), occ) for (obj, occ) in merged.items()] | Merge a list of similar tuples, combining on "paths" that likely
refer to the same object, e.g. `"nltk.word_tokenize"` and
`"nltk.tokenize.word_tokenize"`. `usage` is a list of potentially
combinable objects.
:param usage: A list of tuples, where the first element is a tuple
of strings that represent a path to a Python object, e.g.
`('nltk', 'word_tokenize')`, and the second element is how
often that Python object occurs in a large collection of code.
Each path in the tuple ends in the same token, and thus could
refer to the same object.
:type usage: List[Tuple[Tuple[str], int]]
:return: `usage`, but the first element of each tuple is detokenized,
i.e. converted back to a string, and paths that refer to the
same element are merged.
:rtype: List[Tuple[str, int]] | module_dependencies/module/module.py | merge_one | tomaarsen/module_dependencies | 1 | python | def merge_one(usage: List[Tuple[(Tuple[str], int)]]) -> List[Tuple[(str, int)]]:
'Merge a list of similar tuples, combining on "paths" that likely\n refer to the same object, e.g. `"nltk.word_tokenize"` and\n `"nltk.tokenize.word_tokenize"`. `usage` is a list of potentially\n combinable objects.\n\n :param usage: A list of tuples, where the first element is a tuple\n of strings that represent a path to a Python object, e.g.\n `(\'nltk\', \'word_tokenize\')`, and the second element is how\n often that Python object occurs in a large collection of code.\n Each path in the tuple ends in the same token, and thus could\n refer to the same object.\n :type usage: List[Tuple[Tuple[str], int]]\n :return: `usage`, but the first element of each tuple is detokenized,\n i.e. converted back to a string, and paths that refer to the\n same element are merged.\n :rtype: List[Tuple[str, int]]\n '
merged = {}
for (obj, occ) in sorted(usage, key=(lambda x: len(x[0])), reverse=True):
options = [(o_key, o_occ) for (o_key, o_occ) in merged.items() if (Module.is_subsection_of(obj, o_key) and (o_occ > 1))]
if options:
key = max(options, key=(lambda x: x[1]))[0]
merged[key] += occ
else:
merged[obj] = occ
return [(detokenize(obj), occ) for (obj, occ) in merged.items()] | def merge_one(usage: List[Tuple[(Tuple[str], int)]]) -> List[Tuple[(str, int)]]:
'Merge a list of similar tuples, combining on "paths" that likely\n refer to the same object, e.g. `"nltk.word_tokenize"` and\n `"nltk.tokenize.word_tokenize"`. `usage` is a list of potentially\n combinable objects.\n\n :param usage: A list of tuples, where the first element is a tuple\n of strings that represent a path to a Python object, e.g.\n `(\'nltk\', \'word_tokenize\')`, and the second element is how\n often that Python object occurs in a large collection of code.\n Each path in the tuple ends in the same token, and thus could\n refer to the same object.\n :type usage: List[Tuple[Tuple[str], int]]\n :return: `usage`, but the first element of each tuple is detokenized,\n i.e. converted back to a string, and paths that refer to the\n same element are merged.\n :rtype: List[Tuple[str, int]]\n '
merged = {}
for (obj, occ) in sorted(usage, key=(lambda x: len(x[0])), reverse=True):
options = [(o_key, o_occ) for (o_key, o_occ) in merged.items() if (Module.is_subsection_of(obj, o_key) and (o_occ > 1))]
if options:
key = max(options, key=(lambda x: x[1]))[0]
merged[key] += occ
else:
merged[obj] = occ
return [(detokenize(obj), occ) for (obj, occ) in merged.items()]<|docstring|>Merge a list of similar tuples, combining on "paths" that likely
refer to the same object, e.g. `"nltk.word_tokenize"` and
`"nltk.tokenize.word_tokenize"`. `usage` is a list of potentially
combinable objects.
:param usage: A list of tuples, where the first element is a tuple
of strings that represent a path to a Python object, e.g.
`('nltk', 'word_tokenize')`, and the second element is how
often that Python object occurs in a large collection of code.
Each path in the tuple ends in the same token, and thus could
refer to the same object.
:type usage: List[Tuple[Tuple[str], int]]
:return: `usage`, but the first element of each tuple is detokenized,
i.e. converted back to a string, and paths that refer to the
same element are merged.
:rtype: List[Tuple[str, int]]<|endoftext|> |
d041e8bb052a2c85d51cf5eb2bc0c8e0348fac0dc37ed88ba88708f039857959 | def merge_all(usage: List[Tuple[(str, int)]]) -> List[Tuple[(str, int)]]:
'Merge a list of tuples, combining on "paths" that likely\n refer to the same object, e.g. `"nltk.word_tokenize"` and\n `"nltk.tokenize.word_tokenize"`.\n\n :param usage: A list of tuples, where the first element of\n each tuple is a string representing a path to a Python object,\n e.g. `"nltk.word_tokenize"`, and the second element of each\n tuple is the occurrence of that object in a large collection\n of code.\n :type usage: List[Tuple[str, int]]\n :return: `usage`, but with some merged tuples.\n :rtype: List[Tuple[str, int]]\n '
grouped = defaultdict(list)
for (obj, occ) in usage:
obj_tok = tokenize(obj)
grouped[obj_tok[(- 1)]].append((obj_tok, occ))
merged = []
for group in grouped.values():
merged.extend(merge_one(group))
return sorted(merged, key=(lambda x: x[1]), reverse=True) | Merge a list of tuples, combining on "paths" that likely
refer to the same object, e.g. `"nltk.word_tokenize"` and
`"nltk.tokenize.word_tokenize"`.
:param usage: A list of tuples, where the first element of
each tuple is a string representing a path to a Python object,
e.g. `"nltk.word_tokenize"`, and the second element of each
tuple is the occurrence of that object in a large collection
of code.
:type usage: List[Tuple[str, int]]
:return: `usage`, but with some merged tuples.
:rtype: List[Tuple[str, int]] | module_dependencies/module/module.py | merge_all | tomaarsen/module_dependencies | 1 | python | def merge_all(usage: List[Tuple[(str, int)]]) -> List[Tuple[(str, int)]]:
'Merge a list of tuples, combining on "paths" that likely\n refer to the same object, e.g. `"nltk.word_tokenize"` and\n `"nltk.tokenize.word_tokenize"`.\n\n :param usage: A list of tuples, where the first element of\n each tuple is a string representing a path to a Python object,\n e.g. `"nltk.word_tokenize"`, and the second element of each\n tuple is the occurrence of that object in a large collection\n of code.\n :type usage: List[Tuple[str, int]]\n :return: `usage`, but with some merged tuples.\n :rtype: List[Tuple[str, int]]\n '
grouped = defaultdict(list)
for (obj, occ) in usage:
obj_tok = tokenize(obj)
grouped[obj_tok[(- 1)]].append((obj_tok, occ))
merged = []
for group in grouped.values():
merged.extend(merge_one(group))
return sorted(merged, key=(lambda x: x[1]), reverse=True) | def merge_all(usage: List[Tuple[(str, int)]]) -> List[Tuple[(str, int)]]:
'Merge a list of tuples, combining on "paths" that likely\n refer to the same object, e.g. `"nltk.word_tokenize"` and\n `"nltk.tokenize.word_tokenize"`.\n\n :param usage: A list of tuples, where the first element of\n each tuple is a string representing a path to a Python object,\n e.g. `"nltk.word_tokenize"`, and the second element of each\n tuple is the occurrence of that object in a large collection\n of code.\n :type usage: List[Tuple[str, int]]\n :return: `usage`, but with some merged tuples.\n :rtype: List[Tuple[str, int]]\n '
grouped = defaultdict(list)
for (obj, occ) in usage:
obj_tok = tokenize(obj)
grouped[obj_tok[(- 1)]].append((obj_tok, occ))
merged = []
for group in grouped.values():
merged.extend(merge_one(group))
return sorted(merged, key=(lambda x: x[1]), reverse=True)<|docstring|>Merge a list of tuples, combining on "paths" that likely
refer to the same object, e.g. `"nltk.word_tokenize"` and
`"nltk.tokenize.word_tokenize"`.
:param usage: A list of tuples, where the first element of
each tuple is a string representing a path to a Python object,
e.g. `"nltk.word_tokenize"`, and the second element of each
tuple is the occurrence of that object in a large collection
of code.
:type usage: List[Tuple[str, int]]
:return: `usage`, but with some merged tuples.
:rtype: List[Tuple[str, int]]<|endoftext|> |
0131b89d52a9f99db3c305f28ec94b05fea9841c218ff6d737d83c896b16471c | def get_value(nested_dict: Dict, tok_obj: Tuple[str]) -> int:
'Recursively apply elements from `tok_obj` as keys in `nested_dict`,\n and then gather the `occurrences`.\n\n :param nested_dict: A dictionary with nested usages, generally taken\n from the `nested_usage` method.\n :type nested_dict: Dict\n :param tok_obj: A tuple of strings representing a path to a Python path.\n :type tok_obj: Tuple[str]\n :return: The occurrence of the object represented by `tok_obj`\n in `nested_dict`.\n :rtype: int\n '
if (not tok_obj):
return nested_dict['occurrences']
return get_value(nested_dict[tok_obj[0]], tok_obj[1:]) | Recursively apply elements from `tok_obj` as keys in `nested_dict`,
and then gather the `occurrences`.
:param nested_dict: A dictionary with nested usages, generally taken
from the `nested_usage` method.
:type nested_dict: Dict
:param tok_obj: A tuple of strings representing a path to a Python path.
:type tok_obj: Tuple[str]
:return: The occurrence of the object represented by `tok_obj`
in `nested_dict`.
:rtype: int | module_dependencies/module/module.py | get_value | tomaarsen/module_dependencies | 1 | python | def get_value(nested_dict: Dict, tok_obj: Tuple[str]) -> int:
'Recursively apply elements from `tok_obj` as keys in `nested_dict`,\n and then gather the `occurrences`.\n\n :param nested_dict: A dictionary with nested usages, generally taken\n from the `nested_usage` method.\n :type nested_dict: Dict\n :param tok_obj: A tuple of strings representing a path to a Python path.\n :type tok_obj: Tuple[str]\n :return: The occurrence of the object represented by `tok_obj`\n in `nested_dict`.\n :rtype: int\n '
if (not tok_obj):
return nested_dict['occurrences']
return get_value(nested_dict[tok_obj[0]], tok_obj[1:]) | def get_value(nested_dict: Dict, tok_obj: Tuple[str]) -> int:
'Recursively apply elements from `tok_obj` as keys in `nested_dict`,\n and then gather the `occurrences`.\n\n :param nested_dict: A dictionary with nested usages, generally taken\n from the `nested_usage` method.\n :type nested_dict: Dict\n :param tok_obj: A tuple of strings representing a path to a Python path.\n :type tok_obj: Tuple[str]\n :return: The occurrence of the object represented by `tok_obj`\n in `nested_dict`.\n :rtype: int\n '
if (not tok_obj):
return nested_dict['occurrences']
return get_value(nested_dict[tok_obj[0]], tok_obj[1:])<|docstring|>Recursively apply elements from `tok_obj` as keys in `nested_dict`,
and then gather the `occurrences`.
:param nested_dict: A dictionary with nested usages, generally taken
from the `nested_usage` method.
:type nested_dict: Dict
:param tok_obj: A tuple of strings representing a path to a Python path.
:type tok_obj: Tuple[str]
:return: The occurrence of the object represented by `tok_obj`
in `nested_dict`.
:rtype: int<|endoftext|> |
d7fb2cb6eec4c7540dd8af04b6f5c445f86e765a64973fe9be3d5b449e5d33e4 | def test_tddft_iter_lda(self):
' Compute polarization with LDA TDDFT '
from timeit import default_timer as timer
dname = os.path.dirname(os.path.abspath(__file__))
td = tddft_iter(label='water', cd=dname, jcutoff=7, iter_broadening=0.01, xc_code='LDA,PZ', level=0)
omegas = (np.linspace(0.0, 2.0, 150) + (1j * td.eps))
pxx = (- td.comp_polariz_inter_xx(omegas).imag)
data = np.array([(omegas.real * 27.2114), pxx])
np.savetxt('water.tddft_iter_lda.omega.inter.pxx.txt', data.T, fmt=['%f', '%f'])
data_ref = np.loadtxt((dname + '/water.tddft_iter_lda.omega.inter.pxx.txt-ref'))
self.assertTrue(np.allclose(data_ref, data.T, rtol=1.0, atol=1e-05)) | Compute polarization with LDA TDDFT | pyscf/nao/test/test_0034_tddft_iter_lda_nao.py | test_tddft_iter_lda | mfkasim1/pyscf | 3 | python | def test_tddft_iter_lda(self):
' '
from timeit import default_timer as timer
dname = os.path.dirname(os.path.abspath(__file__))
td = tddft_iter(label='water', cd=dname, jcutoff=7, iter_broadening=0.01, xc_code='LDA,PZ', level=0)
omegas = (np.linspace(0.0, 2.0, 150) + (1j * td.eps))
pxx = (- td.comp_polariz_inter_xx(omegas).imag)
data = np.array([(omegas.real * 27.2114), pxx])
np.savetxt('water.tddft_iter_lda.omega.inter.pxx.txt', data.T, fmt=['%f', '%f'])
data_ref = np.loadtxt((dname + '/water.tddft_iter_lda.omega.inter.pxx.txt-ref'))
self.assertTrue(np.allclose(data_ref, data.T, rtol=1.0, atol=1e-05)) | def test_tddft_iter_lda(self):
' '
from timeit import default_timer as timer
dname = os.path.dirname(os.path.abspath(__file__))
td = tddft_iter(label='water', cd=dname, jcutoff=7, iter_broadening=0.01, xc_code='LDA,PZ', level=0)
omegas = (np.linspace(0.0, 2.0, 150) + (1j * td.eps))
pxx = (- td.comp_polariz_inter_xx(omegas).imag)
data = np.array([(omegas.real * 27.2114), pxx])
np.savetxt('water.tddft_iter_lda.omega.inter.pxx.txt', data.T, fmt=['%f', '%f'])
data_ref = np.loadtxt((dname + '/water.tddft_iter_lda.omega.inter.pxx.txt-ref'))
self.assertTrue(np.allclose(data_ref, data.T, rtol=1.0, atol=1e-05))<|docstring|>Compute polarization with LDA TDDFT<|endoftext|> |
f7ba3fc467b684fb2e3be9d235e7c369344f541761096e7c24f519f53baaafda | @staticmethod
def random_agent(observation, configuration):
'Agent for taking a random action.'
del observation
return random.randrange(configuration.banditCount) | Agent for taking a random action. | idea01/bots.py | random_agent | RobRomijnders/santa20 | 0 | python | @staticmethod
def random_agent(observation, configuration):
del observation
return random.randrange(configuration.banditCount) | @staticmethod
def random_agent(observation, configuration):
del observation
return random.randrange(configuration.banditCount)<|docstring|>Agent for taking a random action.<|endoftext|> |
202474d18428954253c129c202689a53f423387176694943bbedf5d7f2274f47 | def random_agent_limit(self, observation, configuration):
'Agent for taking a random action within a limit.'
del observation
return random.randrange(int((configuration.banditCount * self.limit))) | Agent for taking a random action within a limit. | idea01/bots.py | random_agent_limit | RobRomijnders/santa20 | 0 | python | def random_agent_limit(self, observation, configuration):
del observation
return random.randrange(int((configuration.banditCount * self.limit))) | def random_agent_limit(self, observation, configuration):
del observation
return random.randrange(int((configuration.banditCount * self.limit)))<|docstring|>Agent for taking a random action within a limit.<|endoftext|> |
6c9af06b89fb54720528298a5d5e2f639b6cd7fa9cf7d0e87b51f794450234b0 | def random_agent_constant(self, observation, configuration):
'Just returns the same value over and over again.'
del observation
return int((configuration.banditCount * self.limit)) | Just returns the same value over and over again. | idea01/bots.py | random_agent_constant | RobRomijnders/santa20 | 0 | python | def random_agent_constant(self, observation, configuration):
del observation
return int((configuration.banditCount * self.limit)) | def random_agent_constant(self, observation, configuration):
del observation
return int((configuration.banditCount * self.limit))<|docstring|>Just returns the same value over and over again.<|endoftext|> |
9452a3deabbb5cfcd2c6ec682856c7144429f5f545bab06a0817c23853984c10 | def thompson_sampling_agent(self, observation, configuration):
'Agent that uses Thompson sampling.'
if (len(self.counts) == 0):
for i in range(configuration.banditCount):
self.counts[i] = self.prior
if (len(observation.lastActions) > 0):
self.rewards.append(observation.reward)
self.opponent_picks.append(oppo_action(observation.lastActions, self.actions[(- 1)]))
reward_t2 = (self.rewards[(- 2)] if (len(self.rewards) >= 2) else 0)
reward_t1 = (self.rewards[(- 1)] if (len(self.rewards) > 0) else 0)
self.counts[self.actions[(- 1)]] = {'n': (self.counts[self.actions[(- 1)]]['n'] + 1), 'h': (self.counts[self.actions[(- 1)]]['h'] + (reward_t1 - reward_t2))}
action = random.randrange(configuration.banditCount)
if (observation.step > 1):
action = oppo_action(observation.lastActions, self.actions[(- 1)])
if (observation.step > 10):
pvals = np.array([np.random.beta(d['n'], max(0, d['h'])) for d in self.counts.values()])
pvals = (pvals / pvals.sum())
action = int(np.random.choice(list(range(len(self.counts))), p=(pvals / pvals.sum())))
self.actions.append(action)
return action | Agent that uses Thompson sampling. | idea01/bots.py | thompson_sampling_agent | RobRomijnders/santa20 | 0 | python | def thompson_sampling_agent(self, observation, configuration):
if (len(self.counts) == 0):
for i in range(configuration.banditCount):
self.counts[i] = self.prior
if (len(observation.lastActions) > 0):
self.rewards.append(observation.reward)
self.opponent_picks.append(oppo_action(observation.lastActions, self.actions[(- 1)]))
reward_t2 = (self.rewards[(- 2)] if (len(self.rewards) >= 2) else 0)
reward_t1 = (self.rewards[(- 1)] if (len(self.rewards) > 0) else 0)
self.counts[self.actions[(- 1)]] = {'n': (self.counts[self.actions[(- 1)]]['n'] + 1), 'h': (self.counts[self.actions[(- 1)]]['h'] + (reward_t1 - reward_t2))}
action = random.randrange(configuration.banditCount)
if (observation.step > 1):
action = oppo_action(observation.lastActions, self.actions[(- 1)])
if (observation.step > 10):
pvals = np.array([np.random.beta(d['n'], max(0, d['h'])) for d in self.counts.values()])
pvals = (pvals / pvals.sum())
action = int(np.random.choice(list(range(len(self.counts))), p=(pvals / pvals.sum())))
self.actions.append(action)
return action | def thompson_sampling_agent(self, observation, configuration):
if (len(self.counts) == 0):
for i in range(configuration.banditCount):
self.counts[i] = self.prior
if (len(observation.lastActions) > 0):
self.rewards.append(observation.reward)
self.opponent_picks.append(oppo_action(observation.lastActions, self.actions[(- 1)]))
reward_t2 = (self.rewards[(- 2)] if (len(self.rewards) >= 2) else 0)
reward_t1 = (self.rewards[(- 1)] if (len(self.rewards) > 0) else 0)
self.counts[self.actions[(- 1)]] = {'n': (self.counts[self.actions[(- 1)]]['n'] + 1), 'h': (self.counts[self.actions[(- 1)]]['h'] + (reward_t1 - reward_t2))}
action = random.randrange(configuration.banditCount)
if (observation.step > 1):
action = oppo_action(observation.lastActions, self.actions[(- 1)])
if (observation.step > 10):
pvals = np.array([np.random.beta(d['n'], max(0, d['h'])) for d in self.counts.values()])
pvals = (pvals / pvals.sum())
action = int(np.random.choice(list(range(len(self.counts))), p=(pvals / pvals.sum())))
self.actions.append(action)
return action<|docstring|>Agent that uses Thompson sampling.<|endoftext|> |
3af7e3729f51a7e8dead6fe31ad236229c6e779ebd4abe688410ca7929b3c014 | def init_markets(self, markets):
'Initialize markets by importing public market classes.'
self.market_names = markets
for market_name in markets:
exec(('import public_markets.' + market_name.lower()))
market = eval((((('public_markets.' + market_name.lower()) + '.') + market_name) + '()'))
self.markets[market_name] = market | Initialize markets by importing public market classes. | arbitrage/arbitrer.py | init_markets | acontry/altcoin-arbitrage | 7 | python | def init_markets(self, markets):
self.market_names = markets
for market_name in markets:
exec(('import public_markets.' + market_name.lower()))
market = eval((((('public_markets.' + market_name.lower()) + '.') + market_name) + '()'))
self.markets[market_name] = market | def init_markets(self, markets):
self.market_names = markets
for market_name in markets:
exec(('import public_markets.' + market_name.lower()))
market = eval((((('public_markets.' + market_name.lower()) + '.') + market_name) + '()'))
self.markets[market_name] = market<|docstring|>Initialize markets by importing public market classes.<|endoftext|> |
12c3461c077e117f361afd5a518db468bea6367895a86d87be6fa2e7a5365478 | def init_observers(self, _observers):
'Initialize observers by importing observer classes.'
self.observer_names = _observers
for observer_name in _observers:
exec(('import observers.' + observer_name.lower()))
observer = eval((((('observers.' + observer_name.lower()) + '.') + observer_name) + '()'))
self.observers.append(observer) | Initialize observers by importing observer classes. | arbitrage/arbitrer.py | init_observers | acontry/altcoin-arbitrage | 7 | python | def init_observers(self, _observers):
self.observer_names = _observers
for observer_name in _observers:
exec(('import observers.' + observer_name.lower()))
observer = eval((((('observers.' + observer_name.lower()) + '.') + observer_name) + '()'))
self.observers.append(observer) | def init_observers(self, _observers):
self.observer_names = _observers
for observer_name in _observers:
exec(('import observers.' + observer_name.lower()))
observer = eval((((('observers.' + observer_name.lower()) + '.') + observer_name) + '()'))
self.observers.append(observer)<|docstring|>Initialize observers by importing observer classes.<|endoftext|> |
c5d5ced5b3aa13ba2eb79bdc5fa8a692730d15c2a5a33055f770bd33296b0bfd | def check_opportunity(self, kask, kbid):
'Replacement for arbitrage_depth_opportunity machinery. Returns the\n profit, volume, buy price, sell price, weighted buy/sell prices for a\n potential arbitrage opportunity. Only considers the best bid/ask prices\n and does not go into the depth like the more complicated method.'
buy_price = self.depths[kask]['asks'][0]['price']
sell_price = self.depths[kbid]['bids'][0]['price']
ask_vol = self.depths[kask]['asks'][0]['amount']
bid_vol = self.depths[kbid]['bids'][0]['amount']
trade_vol = min(ask_vol, bid_vol)
buy_fee = self.markets[kask].fees['buy']['fee']
sell_fee = self.markets[kbid].fees['sell']['fee']
profit = (trade_vol * (((1 - sell_fee) * sell_price) - ((1 + buy_fee) * buy_price)))
return (profit, trade_vol, buy_price, sell_price, buy_price, sell_price) | Replacement for arbitrage_depth_opportunity machinery. Returns the
profit, volume, buy price, sell price, weighted buy/sell prices for a
potential arbitrage opportunity. Only considers the best bid/ask prices
and does not go into the depth like the more complicated method. | arbitrage/arbitrer.py | check_opportunity | acontry/altcoin-arbitrage | 7 | python | def check_opportunity(self, kask, kbid):
'Replacement for arbitrage_depth_opportunity machinery. Returns the\n profit, volume, buy price, sell price, weighted buy/sell prices for a\n potential arbitrage opportunity. Only considers the best bid/ask prices\n and does not go into the depth like the more complicated method.'
buy_price = self.depths[kask]['asks'][0]['price']
sell_price = self.depths[kbid]['bids'][0]['price']
ask_vol = self.depths[kask]['asks'][0]['amount']
bid_vol = self.depths[kbid]['bids'][0]['amount']
trade_vol = min(ask_vol, bid_vol)
buy_fee = self.markets[kask].fees['buy']['fee']
sell_fee = self.markets[kbid].fees['sell']['fee']
profit = (trade_vol * (((1 - sell_fee) * sell_price) - ((1 + buy_fee) * buy_price)))
return (profit, trade_vol, buy_price, sell_price, buy_price, sell_price) | def check_opportunity(self, kask, kbid):
'Replacement for arbitrage_depth_opportunity machinery. Returns the\n profit, volume, buy price, sell price, weighted buy/sell prices for a\n potential arbitrage opportunity. Only considers the best bid/ask prices\n and does not go into the depth like the more complicated method.'
buy_price = self.depths[kask]['asks'][0]['price']
sell_price = self.depths[kbid]['bids'][0]['price']
ask_vol = self.depths[kask]['asks'][0]['amount']
bid_vol = self.depths[kbid]['bids'][0]['amount']
trade_vol = min(ask_vol, bid_vol)
buy_fee = self.markets[kask].fees['buy']['fee']
sell_fee = self.markets[kbid].fees['sell']['fee']
profit = (trade_vol * (((1 - sell_fee) * sell_price) - ((1 + buy_fee) * buy_price)))
return (profit, trade_vol, buy_price, sell_price, buy_price, sell_price)<|docstring|>Replacement for arbitrage_depth_opportunity machinery. Returns the
profit, volume, buy price, sell price, weighted buy/sell prices for a
potential arbitrage opportunity. Only considers the best bid/ask prices
and does not go into the depth like the more complicated method.<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.